hexsha
stringlengths 40
40
| size
int64 4
1.02M
| ext
stringclasses 8
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 4
209
| max_stars_repo_name
stringlengths 5
121
| max_stars_repo_head_hexsha
stringlengths 40
40
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 4
209
| max_issues_repo_name
stringlengths 5
121
| max_issues_repo_head_hexsha
stringlengths 40
40
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 4
209
| max_forks_repo_name
stringlengths 5
121
| max_forks_repo_head_hexsha
stringlengths 40
40
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 4
1.02M
| avg_line_length
float64 1.07
66.1k
| max_line_length
int64 4
266k
| alphanum_fraction
float64 0.01
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37c6769a47c8627a965c14d167c164e068428adc
| 495
|
py
|
Python
|
tests/test_window.py
|
aspfohl/notepad
|
30c5373d1745b388230a5fa1ccb1eb346b0e3798
|
[
"Apache-2.0"
] | null | null | null |
tests/test_window.py
|
aspfohl/notepad
|
30c5373d1745b388230a5fa1ccb1eb346b0e3798
|
[
"Apache-2.0"
] | null | null | null |
tests/test_window.py
|
aspfohl/notepad
|
30c5373d1745b388230a5fa1ccb1eb346b0e3798
|
[
"Apache-2.0"
] | null | null | null |
from notepad import window, constants
def test_get_title():
assert window.get_title("foo") == f"foo - {constants.APP_NAME}"
assert window.get_title() == f"{constants.DEFAULT_UNNAMED_TITLE} - {constants.APP_NAME}"
def test_window_dimension():
w = window.WindowDimension(height=50, width=100)
assert w.left_alignment(screen_width=500) == 200.0
assert w.top_alignment(screen_height=100) == 25.0
assert w.get_geometry(screen_width=500, screen_height=100) == "100x50+200+25"
| 45
| 92
| 0.737374
|
d328e9ab5866c1725380fab56c204b45aa2621b4
| 900
|
py
|
Python
|
problema34.py
|
bptfreitas/Project-Euler
|
02b3ef8f8e3754b886b266fcd5eee7fd00d97dde
|
[
"MIT"
] | null | null | null |
problema34.py
|
bptfreitas/Project-Euler
|
02b3ef8f8e3754b886b266fcd5eee7fd00d97dde
|
[
"MIT"
] | null | null | null |
problema34.py
|
bptfreitas/Project-Euler
|
02b3ef8f8e3754b886b266fcd5eee7fd00d97dde
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
#145 is a curious number, as 1! + 4! + 5! = 1 + 24 + 120 = 145.
#Find the sum of all numbers which are equal to the sum of the factorial of their digits.
#Note: as 1! = 1 and 2! = 2 are not sums they are not included.
from euler import fact
from itertools import permutations
from itertools import combinations_with_replacement
fats={}
for i in range(0,10):
print i
fats[i]=fact(i)
num_digits=1
num = 9
num_f9 = fats[9]
while num<num_f9:
num=10*num+9
num_f9+=fats[9]
num_digits+=1
print "maximum number of digits is: " + str(num_digits)
s = 0
nums = []
#num_digits=2
limite = pow(10,num_digits+1)
soma_numeros = 0
for i in range(1,limite):
#print i
#perm = combinations_with_replacement(fats,i)
num = i
num_f = 0
while num!=0:
digit = num%10
num_f += fats[digit]
num//=10
if num_f == i:
print "Adicionei : " + str(i)
nums.append(i)
print sum(nums)
| 18
| 89
| 0.678889
|
2be69b30e39c778dc5bd643df1397ee5a5ddb331
| 2,765
|
py
|
Python
|
perfkitbenchmarker/scripts/spark_sql_runner.py
|
justinuang/PerfKitBenchmarker
|
0730a7a6ebcd9447c7667ff6a3902c203d85fadb
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/scripts/spark_sql_runner.py
|
justinuang/PerfKitBenchmarker
|
0730a7a6ebcd9447c7667ff6a3902c203d85fadb
|
[
"Apache-2.0"
] | null | null | null |
perfkitbenchmarker/scripts/spark_sql_runner.py
|
justinuang/PerfKitBenchmarker
|
0730a7a6ebcd9447c7667ff6a3902c203d85fadb
|
[
"Apache-2.0"
] | null | null | null |
"""Runs a Spark SQL query with preloaded temp views.
Views can be BigQuery tables or HCFS directories containing Parquet.
This is useful for Storage formats not expressible as External Hive Tables.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import logging
from pyspark.sql import SparkSession
def parse_args():
"""Parse argv."""
def comma_separated_list(string):
return string.split(',')
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument(
'sql_script', help='The local path to the SQL file to run')
data_sources = parser.add_mutually_exclusive_group()
data_sources.add_argument(
'--bigquery_tables',
metavar='TABLES',
type=comma_separated_list,
default=[],
help='Comma separated list of fully qualified BigQuery '
'tables. Views will share the name of the tables.')
parser.add_argument(
'--bigquery-record-format',
metavar='FORMAT',
help='Data Format for reading from BigQuery Storage')
data_sources.add_argument(
'--hcfs_dirs',
metavar='DIRS',
type=comma_separated_list,
default=[],
help='Comma separated list of HCFS directories containing parquet '
'tables. Views will be named the basename of the directories.')
return parser.parse_args()
def register_views(
spark, bigquery_tables, hcfs_dirs, bigquery_record_format=None):
"""Pre-register BigQuery tables and Parquet directories as temporary views."""
temp_dfs = {}
for table in bigquery_tables:
name = table.split('.')[-1]
logging.info('Loading %s', table)
reader = spark.read.format('bigquery').option('table', table)
if bigquery_record_format:
reader.option('readDataFormat', bigquery_record_format)
temp_dfs[name] = reader.load()
for hcfs_dir in hcfs_dirs:
name = hcfs_dir.split('/')[-1]
logging.info('Loading %s', hcfs_dir)
temp_dfs[name] = spark.read.format('parquet').load(hcfs_dir)
for name, df in temp_dfs.items():
df.createTempView(name)
def main(args):
spark = (SparkSession.builder.appName('Spark SQL Query').getOrCreate())
register_views(
spark, args.bigquery_tables, args.hcfs_dirs, args.bigquery_record_format)
logging.info('Running %s', args.sql_script)
with open(args.sql_script) as f:
sql = f.read()
# spark-sql does not limit it's output. Replicate that here by setting limit
# to max Java Integer. Hopefully you limited the output in SQL or you are
# going to have a bad time.
# pylint: disable=protected-access
spark.sql(sql).show(spark._jvm.java.lang.Integer.MAX_VALUE)
# pylint: enable=protected-access
if __name__ == '__main__':
main(parse_args())
| 31.781609
| 80
| 0.717179
|
f26ef0e25b7790d0a89227aeef7241ea30e47783
| 2,515
|
py
|
Python
|
detectron2/utils/memory.py
|
sourcery-ai-bot/detectron2
|
fd0c5c59afbdc43f7005fb1a8c0c39ac5dc44039
|
[
"Apache-2.0"
] | null | null | null |
detectron2/utils/memory.py
|
sourcery-ai-bot/detectron2
|
fd0c5c59afbdc43f7005fb1a8c0c39ac5dc44039
|
[
"Apache-2.0"
] | null | null | null |
detectron2/utils/memory.py
|
sourcery-ai-bot/detectron2
|
fd0c5c59afbdc43f7005fb1a8c0c39ac5dc44039
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
import logging
from contextlib import contextmanager
from functools import wraps
import torch
__all__ = ["retry_if_cuda_oom"]
@contextmanager
def _ignore_torch_cuda_oom():
"""
A context which ignores CUDA OOM exception from pytorch.
"""
try:
yield
except RuntimeError as e:
# NOTE: the string may change?
if "CUDA out of memory. " not in str(e):
raise
def retry_if_cuda_oom(func):
"""
Makes a function retry itself after encountering
pytorch's CUDA OOM error.
It will first retry after calling `torch.cuda.empty_cache()`.
If that still fails, it will then retry by trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to CPU implementation.
The return values may become CPU tensors as well and it's user's
responsibility to convert it back to CUDA tensor if needed.
Args:
func: a stateless callable that takes tensor-like objects as arguments
Returns:
a callable which retries `func` if OOM is encountered.
Examples:
::
output = retry_if_cuda_oom(some_torch_function)(input1, input2)
# output may be on CPU even if inputs are on GPU
Note:
1. When converting inputs to CPU, it will only look at each argument and check
if it has `.device` and `.to` for conversion. Nested structures of tensors
are not supported.
2. Since the function might be called more than once, it has to be
stateless.
"""
def maybe_to_cpu(x):
try:
like_gpu_tensor = x.device.type == "cuda" and hasattr(x, "to")
except AttributeError:
like_gpu_tensor = False
return x.to(device="cpu") if like_gpu_tensor else x
@wraps(func)
def wrapped(*args, **kwargs):
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Try on CPU. This slows down the code significantly, therefore print a notice.
logger = logging.getLogger(__name__)
logger.info("Attempting to copy inputs of {} to CPU due to CUDA OOM".format(str(func)))
new_args = (maybe_to_cpu(x) for x in args)
new_kwargs = {k: maybe_to_cpu(v) for k, v in kwargs.items()}
return func(*new_args, **new_kwargs)
return wrapped
| 31.4375
| 95
| 0.65328
|
47a1ed53df52d600999b395aa7b4971ea9433275
| 1,544
|
py
|
Python
|
yunibot/plugins/pcr/model.py
|
algobot76/yunibot
|
e3bc5b4ddc6f38d185e0e67b7f846ed4c1a56ef4
|
[
"BSD-3-Clause"
] | null | null | null |
yunibot/plugins/pcr/model.py
|
algobot76/yunibot
|
e3bc5b4ddc6f38d185e0e67b7f846ed4c1a56ef4
|
[
"BSD-3-Clause"
] | 8
|
2021-01-06T19:38:47.000Z
|
2021-01-09T19:58:57.000Z
|
yunibot/plugins/pcr/model.py
|
algobot76/yunibot
|
e3bc5b4ddc6f38d185e0e67b7f846ed4c1a56ef4
|
[
"BSD-3-Clause"
] | null | null | null |
import sqlalchemy
metadata = sqlalchemy.MetaData()
clan = sqlalchemy.Table(
"clan",
metadata,
sqlalchemy.Column("group_id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("clan_name", sqlalchemy.String, nullable=False),
sqlalchemy.Column("server", sqlalchemy.String, nullable=False),
)
member = sqlalchemy.Table(
"member",
metadata,
sqlalchemy.Column(
"group_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey("clan.group_id"),
primary_key=True,
),
sqlalchemy.Column("user_id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("nickname", sqlalchemy.String, nullable=False),
)
challenge = sqlalchemy.Table(
"challenge",
metadata,
sqlalchemy.Column("id", sqlalchemy.Integer, primary_key=True),
sqlalchemy.Column("year", sqlalchemy.Integer, nullable=False),
sqlalchemy.Column("month", sqlalchemy.Integer, nullable=False),
sqlalchemy.Column(
"group_id",
sqlalchemy.Integer,
sqlalchemy.ForeignKey("clan.group_id"),
),
sqlalchemy.Column(
"user_id", sqlalchemy.Integer, sqlalchemy.ForeignKey("member.user_id")
),
sqlalchemy.Column("timestamp", sqlalchemy.TIMESTAMP, nullable=False),
sqlalchemy.Column("round", sqlalchemy.Integer, nullable=False),
sqlalchemy.Column("boss", sqlalchemy.Integer, nullable=False),
sqlalchemy.Column("damage", sqlalchemy.Integer, nullable=False),
sqlalchemy.Column("type", sqlalchemy.Integer, nullable=False),
sqlite_autoincrement=True,
)
| 35.090909
| 78
| 0.704016
|
7d8a213d174ff7485e92aae0c9e6dab935d27aed
| 114
|
py
|
Python
|
locations/models/__init__.py
|
Rafiatu/mozio
|
7521587266bd93a3a2b759a2968a6d51c9837745
|
[
"MIT"
] | null | null | null |
locations/models/__init__.py
|
Rafiatu/mozio
|
7521587266bd93a3a2b759a2968a6d51c9837745
|
[
"MIT"
] | null | null | null |
locations/models/__init__.py
|
Rafiatu/mozio
|
7521587266bd93a3a2b759a2968a6d51c9837745
|
[
"MIT"
] | null | null | null |
from .polygon_model import Polygon
from .provider_model import Provider
from .coordinates_model import Coordinate
| 28.5
| 41
| 0.868421
|
515cf1c2c24b719f6db7b53cb0220d857391821f
| 5,803
|
py
|
Python
|
tests/test_lowcost.py
|
lparth/reference_implementation
|
fa2aafc192ff1f42153301633c72fe6a9c63ed30
|
[
"Apache-2.0"
] | 135
|
2020-04-10T17:41:39.000Z
|
2022-03-07T19:43:00.000Z
|
tests/test_lowcost.py
|
lparth/reference_implementation
|
fa2aafc192ff1f42153301633c72fe6a9c63ed30
|
[
"Apache-2.0"
] | 50
|
2020-06-27T08:34:13.000Z
|
2021-04-20T10:18:25.000Z
|
tests/test_lowcost.py
|
lparth/reference_implementation
|
fa2aafc192ff1f42153301633c72fe6a9c63ed30
|
[
"Apache-2.0"
] | 39
|
2020-04-10T18:47:21.000Z
|
2020-07-30T03:29:57.000Z
|
__copyright__ = """
Copyright 2020 EPFL
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__license__ = "Apache 2.0"
from datetime import datetime, timedelta, timezone
import pytest
import dp3t.config as config
from dp3t.protocols.lowcost import (
day_start_from_time,
next_day_key,
generate_ephids_for_day,
batch_start_from_time,
ContactTracer,
TracingDataBatch,
SECONDS_PER_BATCH,
)
START_TIME = datetime(2020, 4, 25, 15, 17, tzinfo=timezone.utc)
START_TIME_DAY_START_IN_EPOCHS = 1587772800
EPHID1 = bytes.fromhex("66687aadf862bd776c8fc18b8e9f8e20")
EPHID2 = bytes.fromhex("b7b1d06cd81686669aeea51e9f4723b5")
KEY0 = bytes.fromhex("0000000000000000000000000000000000000000000000000000000000000000")
KEY1 = bytes.fromhex("66687aadf862bd776c8fc18b8e9f8e20089714856ee233b3902a591d0d5f2925")
KEY2 = bytes.fromhex("2b32db6c2c0a6235fb1397e8225ea85e0f0e6e8c7b126d0016ccbde0e667151e")
EPHIDS_KEY1 = [
bytes.fromhex("04cab76af57ca373de1d52689fae06c1"),
bytes.fromhex("ab7747084efb743a6aa1b19bab2f0ca3"),
bytes.fromhex("f417c16279d7f718465f958e17466550"),
]
##############################
### TEST UTILITY FUNCTIONS ###
##############################
def test_day_start_from_time():
day_start = day_start_from_time(START_TIME)
assert day_start % config.SECONDS_PER_DAY == 0
assert day_start_from_time(START_TIME) == START_TIME_DAY_START_IN_EPOCHS
def test_batch_start_from_time():
batch_start = batch_start_from_time(START_TIME)
assert batch_start % SECONDS_PER_BATCH == 0
##########################################
### TEST BASIC CRYPTOGRAPHIC FUNCTIONS ###
##########################################
def test_next_day_key():
key0 = KEY0
key1 = next_day_key(key0)
assert key1 == KEY1
key2 = next_day_key(key1)
assert key2 == KEY2
def test_generate_ephids_for_day():
key = KEY1
# Test correct order when not shuffling
ephids = generate_ephids_for_day(key, shuffle=False)
for idx, ephid in enumerate(EPHIDS_KEY1):
assert ephids[idx] == ephid
# Test values are still present when shuffling
ephids = generate_ephids_for_day(key, shuffle=True)
for ephid in EPHIDS_KEY1:
assert ephid in ephids
##########################
### TEST TRACING BATCH ###
##########################
def test_tracing_batch_init():
batch = TracingDataBatch([])
# Approximate test for default release time
assert datetime.now().timestamp() - batch.release_time < SECONDS_PER_BATCH + 60
def test_tracing_batch_non_aligned_release_time():
# Don't accept release time that does not align to batch boundary
release_time = int(START_TIME.timestamp())
with pytest.raises(ValueError):
TracingDataBatch([], release_time=release_time)
def test_tracing_batch_aligned_release_time():
# With an aligned release time we shouldn't get an error
ts_start = int(START_TIME.timestamp())
release_time = (ts_start // SECONDS_PER_BATCH) * SECONDS_PER_BATCH
TracingDataBatch([], release_time=release_time)
####################################
### TEST INTERNAL DATASTRUCTURES ###
####################################
def test_deleting_old_keys():
ct = ContactTracer(start_time=START_TIME)
ct.next_day()
ct.next_day()
assert len(ct.past_keys) > 0
old_day_key = ct.current_day_key
old_ephids = set(ct.current_ephids)
# Get with side-effects: deleting old keys
ct.get_tracing_information(START_TIME)
# Should delete all old keys
assert len(ct.past_keys) == 0
# Should pick a new day key
assert ct.current_day_key != old_day_key
# And all EphIDs should have been regenerated
assert len(set(ct.current_ephids).intersection(old_ephids)) == 0
def test_contact_tracing_retention():
ct = ContactTracer(start_time=START_TIME)
t1 = START_TIME + timedelta(minutes=20)
t2 = START_TIME + timedelta(hours=6)
ct.add_observation(EPHID1, t1)
ct.add_observation(EPHID2, t2)
recorded_times = ct.observations.keys()
for _ in range(config.RETENTION_PERIOD + 1):
ct.next_day()
for time in recorded_times:
assert time not in ct.observations
def test_observation_granularity():
ct = ContactTracer(start_time=START_TIME)
t1 = START_TIME + timedelta(minutes=20)
t2 = START_TIME + timedelta(hours=6)
ct.add_observation(EPHID1, t1)
ct.add_observation(EPHID2, t2)
# Verify that internal representation has batch granularity
for time in ct.observations:
assert time % SECONDS_PER_BATCH == 0
def test_observation_granularity_after_update():
ct = ContactTracer(start_time=START_TIME)
t1 = START_TIME + timedelta(minutes=20)
t2 = START_TIME + timedelta(hours=6)
t3 = START_TIME + timedelta(days=1, hours=6)
ct.add_observation(EPHID1, t1)
ct.add_observation(EPHID2, t2)
ct.next_day()
ct.add_observation(EPHID2, t3)
t4 = int((START_TIME + timedelta(days=1, hours=10)).timestamp())
release_time = (t4 // SECONDS_PER_BATCH) * SECONDS_PER_BATCH
batch = TracingDataBatch([], release_time=release_time)
ct.housekeeping_after_batch(batch)
# All observations should now be at day granularity
for time in ct.observations:
assert time % config.SECONDS_PER_DAY == 0
| 30.223958
| 88
| 0.702223
|
06b662886489c97eec43ca27a4675b8e4fdeacb2
| 9,285
|
py
|
Python
|
pkgs/pickleshare-0.5-py27_0/lib/python2.7/site-packages/pickleshare.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/pickleshare-0.5-py27_0/lib/python2.7/site-packages/pickleshare.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null |
pkgs/pickleshare-0.5-py27_0/lib/python2.7/site-packages/pickleshare.py
|
wangyum/anaconda
|
6e5a0dbead3327661d73a61e85414cf92aa52be6
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 1
|
2020-11-01T20:40:01.000Z
|
2020-11-01T20:40:01.000Z
|
#!/usr/bin/env python
""" PickleShare - a small 'shelve' like datastore with concurrency support
Like shelve, a PickleShareDB object acts like a normal dictionary. Unlike
shelve, many processes can access the database simultaneously. Changing a
value in database is immediately visible to other processes accessing the
same database.
Concurrency is possible because the values are stored in separate files. Hence
the "database" is a directory where *all* files are governed by PickleShare.
Example usage::
from pickleshare import *
db = PickleShareDB('~/testpickleshare')
db.clear()
print "Should be empty:",db.items()
db['hello'] = 15
db['aku ankka'] = [1,2,313]
db['paths/are/ok/key'] = [1,(5,46)]
print db.keys()
del db['aku ankka']
This module is certainly not ZODB, but can be used for low-load
(non-mission-critical) situations where tiny code size trumps the
advanced features of a "real" object database.
Installation guide: pip install path pickleshare
Author: Ville Vainio <vivainio@gmail.com>
License: MIT open source license.
"""
from __future__ import print_function
__version__ = "0.5"
from path import path as Path
# from IPython.external.path import path as Path
import os,stat,time
import collections
try:
import cPickle as pickle
except ImportError:
import pickle
import glob
import errno
def gethashfile(key):
return ("%02x" % abs(hash(key) % 256))[-2:]
_sentinel = object()
class PickleShareDB(collections.MutableMapping):
""" The main 'connection' object for PickleShare database """
def __init__(self,root):
""" Return a db object that will manage the specied directory"""
self.root = Path(root).expanduser().abspath()
if not self.root.isdir():
self.root.makedirs_p()
# cache has { 'key' : (obj, orig_mod_time) }
self.cache = {}
def __getitem__(self,key):
""" db['key'] reading """
fil = self.root / key
try:
mtime = (fil.stat()[stat.ST_MTIME])
except OSError:
raise KeyError(key)
if fil in self.cache and mtime == self.cache[fil][1]:
return self.cache[fil][0]
try:
# The cached item has expired, need to read
with fil.open("rb") as f:
obj = pickle.loads(f.read())
except:
raise KeyError(key)
self.cache[fil] = (obj,mtime)
return obj
def __setitem__(self,key,value):
""" db['key'] = 5 """
fil = self.root / key
parent = fil.parent
if parent and not parent.isdir():
parent.makedirs()
# We specify protocol 2, so that we can mostly go between Python 2
# and Python 3. We can upgrade to protocol 3 when Python 2 is obsolete.
with fil.open('wb') as f:
pickle.dump(value, f, protocol=2)
try:
self.cache[fil] = (value,fil.mtime)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def hset(self, hashroot, key, value):
""" hashed set """
hroot = self.root / hashroot
if not hroot.isdir():
hroot.makedirs()
hfile = hroot / gethashfile(key)
d = self.get(hfile, {})
d.update( {key : value})
self[hfile] = d
def hget(self, hashroot, key, default = _sentinel, fast_only = True):
""" hashed get """
hroot = self.root / hashroot
hfile = hroot / gethashfile(key)
d = self.get(hfile, _sentinel )
#print "got dict",d,"from",hfile
if d is _sentinel:
if fast_only:
if default is _sentinel:
raise KeyError(key)
return default
# slow mode ok, works even after hcompress()
d = self.hdict(hashroot)
return d.get(key, default)
def hdict(self, hashroot):
""" Get all data contained in hashed category 'hashroot' as dict """
hfiles = self.keys(hashroot + "/*")
hfiles.sort()
last = len(hfiles) and hfiles[-1] or ''
if last.endswith('xx'):
# print "using xx"
hfiles = [last] + hfiles[:-1]
all = {}
for f in hfiles:
# print "using",f
try:
all.update(self[f])
except KeyError:
print("Corrupt",f,"deleted - hset is not threadsafe!")
del self[f]
self.uncache(f)
return all
def hcompress(self, hashroot):
""" Compress category 'hashroot', so hset is fast again
hget will fail if fast_only is True for compressed items (that were
hset before hcompress).
"""
hfiles = self.keys(hashroot + "/*")
all = {}
for f in hfiles:
# print "using",f
all.update(self[f])
self.uncache(f)
self[hashroot + '/xx'] = all
for f in hfiles:
p = self.root / f
if p.basename() == 'xx':
continue
p.remove()
def __delitem__(self,key):
""" del db["key"] """
fil = self.root / key
self.cache.pop(fil,None)
try:
fil.remove()
except OSError:
# notfound and permission denied are ok - we
# lost, the other process wins the conflict
pass
def _normalized(self, p):
""" Make a key suitable for user's eyes """
return str(self.root.relpathto(p)).replace('\\','/')
def keys(self, globpat = None):
""" All keys in DB, or all keys matching a glob"""
if globpat is None:
files = self.root.walkfiles()
else:
files = [Path(p) for p in glob.glob(self.root/globpat)]
return [self._normalized(p) for p in files if p.isfile()]
def __iter__(self):
return iter(self.keys())
def __len__(self):
return len(self.keys())
def uncache(self,*items):
""" Removes all, or specified items from cache
Use this after reading a large amount of large objects
to free up memory, when you won't be needing the objects
for a while.
"""
if not items:
self.cache = {}
for it in items:
self.cache.pop(it,None)
def waitget(self,key, maxwaittime = 60 ):
""" Wait (poll) for a key to get a value
Will wait for `maxwaittime` seconds before raising a KeyError.
The call exits normally if the `key` field in db gets a value
within the timeout period.
Use this for synchronizing different processes or for ensuring
that an unfortunately timed "db['key'] = newvalue" operation
in another process (which causes all 'get' operation to cause a
KeyError for the duration of pickling) won't screw up your program
logic.
"""
wtimes = [0.2] * 3 + [0.5] * 2 + [1]
tries = 0
waited = 0
while 1:
try:
val = self[key]
return val
except KeyError:
pass
if waited > maxwaittime:
raise KeyError(key)
time.sleep(wtimes[tries])
waited+=wtimes[tries]
if tries < len(wtimes) -1:
tries+=1
def getlink(self,folder):
""" Get a convenient link for accessing items """
return PickleShareLink(self, folder)
def __repr__(self):
return "PickleShareDB('%s')" % self.root
class PickleShareLink:
""" A shortdand for accessing nested PickleShare data conveniently.
Created through PickleShareDB.getlink(), example::
lnk = db.getlink('myobjects/test')
lnk.foo = 2
lnk.bar = lnk.foo + 5
"""
def __init__(self, db, keydir ):
self.__dict__.update(locals())
def __getattr__(self,key):
return self.__dict__['db'][self.__dict__['keydir']+'/' + key]
def __setattr__(self,key,val):
self.db[self.keydir+'/' + key] = val
def __repr__(self):
db = self.__dict__['db']
keys = db.keys( self.__dict__['keydir'] +"/*")
return "<PickleShareLink '%s': %s>" % (
self.__dict__['keydir'],
";".join([Path(k).basename() for k in keys]))
def main():
import textwrap
usage = textwrap.dedent("""\
pickleshare - manage PickleShare databases
Usage:
pickleshare dump /path/to/db > dump.txt
pickleshare load /path/to/db < dump.txt
pickleshare test /path/to/db
""")
DB = PickleShareDB
import sys
if len(sys.argv) < 2:
print(usage)
return
cmd = sys.argv[1]
args = sys.argv[2:]
if cmd == 'dump':
if not args: args= ['.']
db = DB(args[0])
import pprint
pprint.pprint(db.items())
elif cmd == 'load':
cont = sys.stdin.read()
db = DB(args[0])
data = eval(cont)
db.clear()
for k,v in db.items():
db[k] = v
elif cmd == 'testwait':
db = DB(args[0])
db.clear()
print(db.waitget('250'))
elif cmd == 'test':
test()
stress()
if __name__== "__main__":
main()
| 27.966867
| 79
| 0.562089
|
f380ec6d4ec28c12552bb7c4cc31a51a27dcbed1
| 789
|
py
|
Python
|
udacity course code/01-06-plottwohistograms.py
|
bluemurder/mlfl
|
b895b2f1d01b0f6418a5bcee2f204dd7916062f0
|
[
"MIT"
] | 1
|
2021-03-22T22:25:54.000Z
|
2021-03-22T22:25:54.000Z
|
udacity course code/01-06-plottwohistograms.py
|
bluemurder/mlfl
|
b895b2f1d01b0f6418a5bcee2f204dd7916062f0
|
[
"MIT"
] | 6
|
2017-01-16T09:53:21.000Z
|
2017-01-18T12:20:09.000Z
|
udacity course code/01-06-plottwohistograms.py
|
bluemurder/mlfl
|
b895b2f1d01b0f6418a5bcee2f204dd7916062f0
|
[
"MIT"
] | null | null | null |
"""Plot a couple of histogram."""
import pandas as pd
import matplotlib.pyplot as plt
from util import get_data, plot_data, compute_daily_returns
def test_run():
# Read data
dates = pd.date_range('2009-01-01', '2012-12-31') # date range as index
symbols = ['SPY','XOM']
df = get_data(symbols, dates) # get data for each symbol
#plot_data(df)
# Compute daily returns
daily_returns = compute_daily_returns(df)
#plot_data(daily_returns, title = "Daily returns", ylabel = "Daily returns")
# Compute and plot a couple of histograms on same chart
daily_returns['SPY'].hist(bins = 20, label = 'SPY')
daily_returns['XOM'].hist(bins = 20, label = 'XOM')
plt.legend(loc = 'upper right')
plt.show()
if __name__ == "__main__":
test_run()
| 30.346154
| 80
| 0.671736
|
4bad0f105714002f3d49d1ee0045a31f93d6b87c
| 287
|
py
|
Python
|
xcffibaer_lib/__init__.py
|
whitelynx/xcffibaer
|
9c504d0908820ea3e4163847490281cd779a2c9d
|
[
"MIT"
] | 1
|
2020-05-07T04:26:15.000Z
|
2020-05-07T04:26:15.000Z
|
xcffibaer_lib/__init__.py
|
whitelynx/xcffibaer
|
9c504d0908820ea3e4163847490281cd779a2c9d
|
[
"MIT"
] | null | null | null |
xcffibaer_lib/__init__.py
|
whitelynx/xcffibaer
|
9c504d0908820ea3e4163847490281cd779a2c9d
|
[
"MIT"
] | null | null | null |
'''A themeable status bar written in Python, using xcffib.
'''
from . import chunks
from .Bar import Bar
from .FSReader import FSReader
from .Store import Store
from .Window import Window
from .XSetup import XSetup
__all__ = ['chunks', 'Bar', 'FSReader', 'Store', 'Window', 'XSetup']
| 22.076923
| 68
| 0.724739
|
da32111c2830bf0aea92365bd37b4826981abda3
| 34,640
|
py
|
Python
|
application/models.py
|
City-of-Helsinki/events-helsinki-cms
|
64e4c1ce6cc058fb3783e417560dc244bd753d05
|
[
"MIT"
] | 2
|
2020-04-20T05:37:28.000Z
|
2021-02-19T10:33:45.000Z
|
application/models.py
|
City-of-Helsinki/events-helsinki-cms
|
64e4c1ce6cc058fb3783e417560dc244bd753d05
|
[
"MIT"
] | 6
|
2020-02-12T12:55:37.000Z
|
2021-03-30T12:56:28.000Z
|
application/models.py
|
City-of-Helsinki/events-helsinki-cms
|
64e4c1ce6cc058fb3783e417560dc244bd753d05
|
[
"MIT"
] | 1
|
2021-02-18T12:11:18.000Z
|
2021-02-18T12:11:18.000Z
|
from uuid import uuid4
from django.conf import settings
from django.db import models
from wagtail.core import blocks
from wagtail.core.models import Page
from wagtail.core.fields import StreamField, RichTextField
from wagtail.images.models import AbstractImage, AbstractRendition
from wagtail.admin.edit_handlers import (
FieldPanel,
MultiFieldPanel,
ObjectList,
StreamFieldPanel,
TabbedInterface,
PageChooserPanel,
)
from application.wagtail_edit_handlers import (
CustomImageChooserPanel as ImageChooserPanel,
)
from application.wagtail_edit_handlers import CUSTOM_SETTINGS_PANELS
class CustomImage(AbstractImage):
photographer_credit_fi = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Kuvaajan tiedot"
)
photographer_credit_sv = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Fotografkredit"
)
photographer_credit_en = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Photographer credit"
)
admin_form_fields = (
"file",
"photographer_credit_fi",
"photographer_credit_sv",
"photographer_credit_en",
"title",
)
class CustomRendition(AbstractRendition):
image = models.ForeignKey(
CustomImage, on_delete=models.CASCADE, related_name="renditions"
)
class Meta:
unique_together = (("image", "filter_spec", "focal_point_key"),)
class HelsinkiActivities(Page):
parent_page_types = ["wagtailcore.Page"]
subpage_typed = [
"application.CollectionsFolder",
"application.LandingPagesFolder",
"application.StaticPagesFolder",
]
preview_modes = []
max_count = 1
class Meta:
verbose_name = "Helsinki Activities Service"
class CollectionsFolder(Page):
parent_page_types = ["application.HelsinkiActivities"]
subpage_typed = ["application.Collections"]
preview_modes = []
max_count = 1
class Meta:
verbose_name = "Collections Folder"
class LandingPagesFolder(Page):
parent_page_types = ["application.HelsinkiActivities"]
subpage_typed = ["application.LandingPages"]
preview_modes = []
max_count = 1
class Meta:
verbose_name = "Landing Pages Folder"
class BannerPagesFolder(Page):
parent_page_types = ["application.HelsinkiActivities"]
subpage_typed = ["application.BannerPages"]
preview_modes = []
max_count = 1
class Meta:
verbose_name = "Banner Pages Folder"
class StaticPagesFolder(Page):
parent_page_types = ["application.HelsinkiActivities"]
subpage_typed = ["application.AboutPage", "application.AccessibilityPage"]
preview_modes = []
max_count = 1
class Meta:
verbose_name = "Static Pages Folder"
class AboutPage(Page):
parent_page_types = ["application.StaticPagesFolder"]
subpage_typed = []
preview_modes = []
max_count = 1
limited_rich_text_field_features = [
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"bold",
"italic",
"ol",
"ul",
"hr",
"link",
]
heading_section_fi = RichTextField(
features=limited_rich_text_field_features, verbose_name="Ingressi FI"
)
heading_section_sv = RichTextField(
features=limited_rich_text_field_features, verbose_name="Ingressi SV"
)
heading_section_en = RichTextField(
features=limited_rich_text_field_features, verbose_name="Ingressi EN"
)
content_section_fi = RichTextField(
features=limited_rich_text_field_features, verbose_name="Sisältöäalue FI"
)
content_section_sv = RichTextField(
features=limited_rich_text_field_features, verbose_name="Sisältöäalue SV"
)
content_section_en = RichTextField(
features=limited_rich_text_field_features, verbose_name="Sisältöäalue EN"
)
keywords_fi = StreamField(
[
("keywords_fi", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords FI",
)
keywords_sv = StreamField(
[
("keywords_sv", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords SV",
)
keywords_en = StreamField(
[
("keywords_en", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords EN",
)
content_panels = [
MultiFieldPanel(
[
FieldPanel("heading_section_fi"),
FieldPanel("heading_section_sv"),
FieldPanel("heading_section_en"),
],
heading="Ingressi",
help_text="Tämä teksti tulee sivun ylälaitaan ja siihen on tarkoitus kirjoittaa lyhyesti sisällön ydin.",
),
MultiFieldPanel(
[
FieldPanel("content_section_fi"),
FieldPanel("content_section_sv"),
FieldPanel("content_section_en"),
],
heading="Sisältöäalue",
help_text="Tämä sisältö tulee ingressin jälkeen.",
),
MultiFieldPanel(
[
StreamFieldPanel("keywords_fi"),
StreamFieldPanel("keywords_sv"),
StreamFieldPanel("keywords_en"),
],
heading="Keywords",
help_text="",
),
]
edit_handler = TabbedInterface(
[
ObjectList(content_panels, heading="Sisältö"),
ObjectList(
CUSTOM_SETTINGS_PANELS, heading="Asetukset", classname="settings"
),
]
)
def clean(self):
self.title = "Tietoja palvelusta"
self.slug = str(uuid4())
super().clean()
def get_context(self, request):
context = super().get_context(request)
context["FRONTEND_BASE_URL"] = settings.FRONTEND_BASE_URL
return context
class Meta:
verbose_name = "About Page"
class AccessibilityPage(Page):
parent_page_types = ["application.StaticPagesFolder"]
subpage_typed = []
preview_modes = []
max_count = 1
limited_rich_text_field_features = [
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"bold",
"italic",
"ol",
"ul",
"hr",
"link",
]
heading_section_fi = RichTextField(
features=limited_rich_text_field_features, verbose_name="Ingressi FI"
)
heading_section_sv = RichTextField(
features=limited_rich_text_field_features, verbose_name="Ingressi SV"
)
heading_section_en = RichTextField(
features=limited_rich_text_field_features, verbose_name="Ingressi EN"
)
content_section_fi = RichTextField(
features=limited_rich_text_field_features, verbose_name="Sisältöäalue FI"
)
content_section_sv = RichTextField(
features=limited_rich_text_field_features, verbose_name="Sisältöäalue SV"
)
content_section_en = RichTextField(
features=limited_rich_text_field_features, verbose_name="Sisältöäalue EN"
)
keywords_fi = StreamField(
[
("keywords_fi", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords FI",
)
keywords_sv = StreamField(
[
("keywords_sv", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords SV",
)
keywords_en = StreamField(
[
("keywords_en", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords EN",
)
content_panels = [
MultiFieldPanel(
[
FieldPanel("heading_section_fi"),
FieldPanel("heading_section_sv"),
FieldPanel("heading_section_en"),
],
heading="Ingressi",
help_text="Tämä teksti tulee sivun ylälaitaan ja siihen on tarkoitus kirjoittaa lyhyesti sisällön ydin.",
),
MultiFieldPanel(
[
FieldPanel("content_section_fi"),
FieldPanel("content_section_sv"),
FieldPanel("content_section_en"),
],
heading="Sisältöäalue",
help_text="Tämä sisältö tulee ingressin jälkeen.",
),
MultiFieldPanel(
[
StreamFieldPanel("keywords_fi"),
StreamFieldPanel("keywords_sv"),
StreamFieldPanel("keywords_en"),
],
heading="Keywords",
help_text="",
),
]
edit_handler = TabbedInterface(
[
ObjectList(content_panels, heading="Sisältö"),
ObjectList(
CUSTOM_SETTINGS_PANELS, heading="Asetukset", classname="settings"
),
]
)
def clean(self):
self.title = "Saavutettavuus"
self.slug = str(uuid4())
super().clean()
def get_context(self, request):
context = super().get_context(request)
context["FRONTEND_BASE_URL"] = settings.FRONTEND_BASE_URL
return context
class Meta:
verbose_name = "Accessibility Page"
class BannerPages(Page):
parent_page_types = ["application.BannerPagesFolder"]
subpage_typed = []
preview_modes = []
hero_background_image_color_choices = [
("ENGEL", "Engel"),
("COPPER", "Kupari"),
("SUOMENLINNA", "Suomenlinna"),
]
title_and_description_color_choices = [
("BLACK", "Black"),
("WHITE", "White"),
]
hero_background_image_fi = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Pääkuva FI",
)
hero_background_image_sv = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Pääkuva SV",
)
hero_background_image_en = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Pääkuva EN",
)
hero_background_image_color_fi = models.CharField(
max_length=255,
choices=hero_background_image_color_choices,
null=True,
blank=True,
verbose_name="Pääkuvan taustaväri FI",
)
hero_background_image_color_sv = models.CharField(
max_length=255,
choices=hero_background_image_color_choices,
null=True,
blank=True,
verbose_name="Pääkuvan taustaväri SV",
)
hero_background_image_color_en = models.CharField(
max_length=255,
choices=hero_background_image_color_choices,
null=True,
blank=True,
verbose_name="Pääkuvan taustaväri EN",
)
hero_background_image_mobile_fi = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Pääkuva mobiililla FI",
)
hero_background_image_mobile_sv = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Pääkuva mobiililla SV",
)
hero_background_image_mobile_en = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Pääkuva mobiililla EN",
)
hero_top_layer_image_fi = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Pääkuvan päälle asettuva kuva FI",
)
hero_top_layer_image_sv = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Pääkuvan päälle asettuva kuva SV",
)
hero_top_layer_image_en = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Pääkuvan päälle asettuva kuva EN",
)
button_text_fi = models.CharField(
max_length=255, null=True, verbose_name="Napin teksti FI"
)
button_text_sv = models.CharField(
max_length=255, null=True, verbose_name="Napin teksti SV"
)
button_text_en = models.CharField(
max_length=255, null=True, verbose_name="Napin teksti EN"
)
button_url_fi = models.URLField(
max_length=500, null=True, verbose_name="Linkki suomenkieliselle sivulle"
)
button_url_sv = models.URLField(
max_length=500, null=True, verbose_name="Linkki ruotsinkieliselle sivulle"
)
button_url_en = models.URLField(
max_length=500, null=True, verbose_name="Linkki englanninkieliselle sivulle"
)
social_media_image_fi = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Some-postauksen kuva FI",
)
social_media_image_sv = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Some-postauksen kuva SV",
)
social_media_image_en = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Some-postauksen kuva EN",
)
title_fi = models.CharField(max_length=255, null=True, verbose_name="Otsikko FI")
title_sv = models.CharField(max_length=255, null=True, verbose_name="Otsikko SV")
title_en = models.CharField(max_length=255, null=True, verbose_name="Otsikko EN")
title_and_description_color_fi = models.CharField(
max_length=255,
choices=title_and_description_color_choices,
null=True,
blank=True,
verbose_name="Tekstin väri FI",
)
title_and_description_color_sv = models.CharField(
max_length=255,
choices=title_and_description_color_choices,
null=True,
blank=True,
verbose_name="Tekstin väri SV",
)
title_and_description_color_en = models.CharField(
max_length=255,
choices=title_and_description_color_choices,
null=True,
blank=True,
verbose_name="Tekstin väri EN",
)
description_fi = models.TextField(null=True, blank=True, verbose_name="Selite FI")
description_sv = models.TextField(null=True, blank=True, verbose_name="Selite SV")
description_en = models.TextField(null=True, blank=True, verbose_name="Selite EN")
content_panels = [
MultiFieldPanel(
[
FieldPanel("title"),
],
heading="Bannerin nimi",
help_text="Otsikon maksimimerkkimäärä on noin 60 merkkiä sanojen pituudesta riippuen. Tarkistatathan esikatselusta, että sisältö on kooltaan sopiva.",
),
MultiFieldPanel(
[
ImageChooserPanel("hero_background_image_fi"),
ImageChooserPanel("hero_background_image_sv"),
ImageChooserPanel("hero_background_image_en"),
],
heading="Pääkuva",
help_text="Pääkuvalla tarkoitetaan sivuston etusivulla olevaa koko sivun levyistä kuvaa.",
),
MultiFieldPanel(
[
FieldPanel("hero_background_image_color_fi"),
FieldPanel("hero_background_image_color_sv"),
FieldPanel("hero_background_image_color_en"),
],
heading="Pääkuvan taustaväri",
help_text="Pääkuvan taustalle tuleva väri.",
),
MultiFieldPanel(
[
FieldPanel("title_and_description_color_fi"),
FieldPanel("title_and_description_color_sv"),
FieldPanel("title_and_description_color_en"),
],
heading="Tekstin Väri",
help_text="",
),
MultiFieldPanel(
[
ImageChooserPanel("hero_background_image_mobile_fi"),
ImageChooserPanel("hero_background_image_mobile_sv"),
ImageChooserPanel("hero_background_image_mobile_en"),
],
heading="Pääkuva mobiililla",
help_text="Pääkuvalla tarkoitetaan sivuston etusivulla olevaa koko sivun levyistä kuvaa. Tämä kuva näkyy vain mobiilissa.",
),
MultiFieldPanel(
[
ImageChooserPanel("hero_top_layer_image_fi"),
ImageChooserPanel("hero_top_layer_image_sv"),
ImageChooserPanel("hero_top_layer_image_en"),
],
heading="Pääkuvan päälle asettuva kuva",
help_text="Kuva asettuu pääkuvan päälle. Tämä kuva ei näy mobiilissa. Ainoastaan taustakuva näkyy pienemmillä näytöillä.",
),
MultiFieldPanel(
[
FieldPanel("button_text_fi"),
FieldPanel("button_text_sv"),
FieldPanel("button_text_en"),
],
heading="NAPPI",
help_text="",
),
MultiFieldPanel(
[
FieldPanel("button_url_fi"),
FieldPanel("button_url_sv"),
FieldPanel("button_url_en"),
],
heading="NAPIN LINKKI",
help_text="",
),
MultiFieldPanel(
[
ImageChooserPanel("social_media_image_fi"),
ImageChooserPanel("social_media_image_sv"),
ImageChooserPanel("social_media_image_en"),
],
heading="Some-postauksen kuva",
help_text="Kun käyttäjä jakaa etusivun somessa, tämä kuva tulee näkyviin postauksessa.",
),
MultiFieldPanel(
[
FieldPanel("title_fi"),
FieldPanel("title_sv"),
FieldPanel("title_en"),
],
heading="OTSIKKO",
help_text="Otsikon maksimimerkkimäärä on noin 60 merkkiä sanojen pituudesta riippuen. Tarkistatathan esikatselusta, että sisältö on kooltaan sopiva.",
),
MultiFieldPanel(
[
FieldPanel("description_fi"),
FieldPanel("description_sv"),
FieldPanel("description_en"),
],
heading="SELITE",
help_text="Selite sijoittuu otsikon yläpuolelle. Voit jättää tämän kohdan myös tyhjäksi.",
),
]
edit_handler = TabbedInterface(
[
ObjectList(content_panels, heading="Sisältö"),
ObjectList(
CUSTOM_SETTINGS_PANELS, heading="Asetukset", classname="settings"
),
]
)
class Meta:
verbose_name = "Banner"
class LandingPages(Page):
parent_page_types = ["application.LandingPagesFolder"]
subpage_typed = []
top_banner = models.ForeignKey(
BannerPages,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Top banner",
)
bottom_banner = models.ForeignKey(
BannerPages,
null=True,
blank=True,
on_delete=models.SET_NULL,
related_name="+",
verbose_name="Bottom banner",
)
meta_information_fi = models.TextField(null=True, verbose_name="Meta tieto FI")
meta_information_sv = models.TextField(null=True, verbose_name="Meta tieto SV")
meta_information_en = models.TextField(null=True, verbose_name="Meta tieto EN")
page_title_fi = models.CharField(
max_length=255, null=True, verbose_name="Sivun otsikointi FI"
)
page_title_sv = models.CharField(
max_length=255, null=True, verbose_name="Sivun otsikointi SV"
)
page_title_en = models.CharField(
max_length=255, null=True, verbose_name="Sivun otsikointi EN"
)
keywords_fi = StreamField(
[
("keywords_fi", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords FI",
)
keywords_sv = StreamField(
[
("keywords_sv", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords SV",
)
keywords_en = StreamField(
[
("keywords_en", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords EN",
)
content_panels = [
MultiFieldPanel(
[PageChooserPanel("top_banner"), PageChooserPanel("bottom_banner")],
heading="Banner selection",
help_text="Pääkuvalla tarkoitetaan sivuston etusivulla olevaa koko sivun levyistä kuvaa.",
),
MultiFieldPanel(
[
FieldPanel("meta_information_fi"),
FieldPanel("meta_information_sv"),
FieldPanel("meta_information_en"),
],
heading="META TIETO",
help_text="Meta tieto avustaa hakukoneita tiedon etsimisessä.",
),
MultiFieldPanel(
[
FieldPanel("page_title_fi"),
FieldPanel("page_title_sv"),
FieldPanel("page_title_en"),
],
heading="SIVUN OTSIKOINTI",
help_text="",
),
MultiFieldPanel(
[
StreamFieldPanel("keywords_fi"),
StreamFieldPanel("keywords_sv"),
StreamFieldPanel("keywords_en"),
],
heading="Keywords",
help_text="",
),
]
def get_context(self, request):
context = super().get_context(request)
context["FRONTEND_BASE_URL"] = settings.FRONTEND_BASE_URL
return context
def clean(self):
"""
Unfortunately Wagtail doesn't support customizing which field it uses for Page titles.
At the moment, it uses "title" field, but this is not always desirable.
The extremely hacky trick below makes Wagtail explorer look like its default language is Finnish.
Taken from: https://stackoverflow.com/a/48632873/5208999
"""
self.title = (
self.top_banner and self.top_banner.title_fi
) or "Etusivu ilman suomenkielistä otsikkoa"
self.slug = str(uuid4())
super().clean()
edit_handler = TabbedInterface(
[
ObjectList(content_panels, heading="Sisältö"),
ObjectList(
CUSTOM_SETTINGS_PANELS, heading="Asetukset", classname="settings"
),
]
)
class Meta:
verbose_name = "Landing Page"
class Collections(Page):
parent_page_types = ["application.CollectionsFolder"]
subpage_typed = []
color_choices = [
("ENGEL", "Engel – keltainen"),
("COPPER", "Kupari – vihreä"),
("SUOMENLINNA", "Suomenlinna – vaaleanpunainen"),
]
visible_on_frontpage = models.BooleanField(
default=False, verbose_name="Näytä kokoelma etusivulla"
)
hero_image = models.ForeignKey(
settings.WAGTAILIMAGES_IMAGE_MODEL,
null=True,
blank=True,
on_delete=models.SET_NULL,
verbose_name="Kokoelman pääkuva",
)
box_color = models.CharField(
max_length=255,
choices=color_choices,
null=True,
verbose_name="Taustaväri ylätunnisteelle",
)
title_fi = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Otsikko FI"
)
title_sv = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Otsikko SV"
)
title_en = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Otsikko EN"
)
description_fi = models.TextField(
max_length=700, null=True, blank=True, verbose_name="Kuvaus FI"
)
description_sv = models.TextField(
max_length=700, null=True, blank=True, verbose_name="Kuvaus SV"
)
description_en = models.TextField(
max_length=700, null=True, blank=True, verbose_name="Kuvaus EN"
)
link_text_fi = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Linkki teksti FI"
)
link_text_sv = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Linkki teksti SV"
)
link_text_en = models.CharField(
max_length=255, null=True, blank=True, verbose_name="Linkki teksti EN"
)
link_url_fi = models.URLField(
max_length=500,
null=True,
blank=True,
verbose_name="Linkki suomenkieliselle sivulle",
)
link_url_sv = models.URLField(
max_length=500,
null=True,
blank=True,
verbose_name="Linkki ruotsinkieliselle sivulle",
)
link_url_en = models.URLField(
max_length=500,
null=True,
blank=True,
verbose_name="Linkki englanninkieliselle sivulle",
)
social_media_description_fi = models.TextField(
null=True, blank=True, verbose_name="Some-kuvaus FI"
)
social_media_description_sv = models.TextField(
null=True, blank=True, verbose_name="Some-kuvaus SV"
)
social_media_description_en = models.TextField(
null=True, blank=True, verbose_name="Some-kuvaus EN"
)
curated_events_title_fi = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name="Tapahtumien otsikko FI",
default="Suositellut tapahtumat",
)
curated_events_title_sv = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name="Tapahtumien otsikko SV",
default="Rekommenderade evenemang",
)
curated_events_title_en = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name="Tapahtumien otsikko EN",
default="Recommended events",
)
curated_events = StreamField(
[
("event_link", blocks.URLBlock()),
],
null=True,
verbose_name="Suositeltavat tapahtumat",
)
event_list_title_fi = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name="Tapahtumien otsikko FI",
default="Sinua voisi myös kiinnostaa",
)
event_list_title_sv = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name="Tapahtumien otsikko SV",
default="Kolla även dessa",
)
event_list_title_en = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name="Tapahtumien otsikko EN",
default="Related events",
)
event_list_query_fi = models.URLField(
max_length=500,
null=True,
blank=True,
verbose_name="Hakutulossivun www-osoite FI",
)
event_list_query_sv = models.URLField(
max_length=500,
null=True,
blank=True,
verbose_name="Hakutulossivun www-osoite SV",
)
event_list_query_en = models.URLField(
max_length=500,
null=True,
blank=True,
verbose_name="Hakutulossivun www-osoite EN",
)
keywords_fi = StreamField(
[
("keywords_fi", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords FI",
)
keywords_sv = StreamField(
[
("keywords_sv", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords SV",
)
keywords_en = StreamField(
[
("keywords_en", blocks.CharBlock()),
],
null=True,
blank=True,
verbose_name="keywords EN",
)
content_panels = [
MultiFieldPanel(
[
FieldPanel("slug"),
],
heading="Lyhytnimi",
help_text="Lyhytnimi määrittelee sivun nimen esiintymisen URL:eissa esim. http://domain.com/blog/[polkutunnus]/",
),
MultiFieldPanel(
[
FieldPanel("visible_on_frontpage"),
],
heading="Näytä kokoelma etusivulla",
help_text='Valitessasi "Näytä kokoelma etusivulla" kokoelma tulee näkyviin kokoelmasivun lisäksi myös palvelun etusivulla.',
),
MultiFieldPanel(
[
ImageChooserPanel("hero_image"),
],
heading="Kokoelman pääkuva",
help_text="Kuvan maksimikoko on 200 KB. Kuvan tulisi olla vähintään 970 px leveä ja 650 px korkea.",
),
MultiFieldPanel(
[
FieldPanel("box_color"),
],
heading="Taustaväri ylätunnisteelle",
help_text="Valittu väri tulee näkyviin kokoelman yläosaan, joka sisältää kokoelman otsikon sekä kuvauksen.",
),
MultiFieldPanel(
[
FieldPanel("title_fi"),
FieldPanel("title_sv"),
FieldPanel("title_en"),
],
heading="OTSIKKO",
help_text="Kokoelma julkaistaan vain niillä kielillä, joilla on otsikko. Voit halutessasi jättää otsikkokentän tyhjäksi, jolloin kyseistä kieliversiota ei julkaista.",
),
MultiFieldPanel(
[
FieldPanel("description_fi"),
FieldPanel("description_sv"),
FieldPanel("description_en"),
],
heading="KOKOELMAN KUVAUS",
help_text="Pääotsikon alle tuleva teksti, joka kertoo lisää kokoelmasta ja houkuttelee käyttäjiä tutustumaan suosituksiin. Kuvauksen maksimimerkkimäärä on 700 merkkiä.",
),
MultiFieldPanel(
[
FieldPanel("link_text_fi"),
FieldPanel("link_text_sv"),
FieldPanel("link_text_en"),
],
heading="Linkkiteksti - valinnainen",
help_text="Vapaaehtoinen linkki, joka ohjaa lukijan pois kokoelmasta. Käytä vain harkitusti ja pidä linkkiteksti lyhyenä.",
),
MultiFieldPanel(
[
FieldPanel("link_url_fi"),
FieldPanel("link_url_sv"),
FieldPanel("link_url_en"),
],
heading="Linkin www-osoite - valinnainen",
help_text="Jos lisäsit aikaisempaan 'Linkkiteksti'-osioon linkin, lisää tähän kenttään www-osoite, johon käyttäjä ohjataan.",
),
MultiFieldPanel(
[
FieldPanel("curated_events_title_fi"),
FieldPanel("curated_events_title_sv"),
FieldPanel("curated_events_title_en"),
],
heading="Nostojen otsikko",
help_text="Kirjoita tähän otsikko, jonka haluat näyttää käsin poimittavien, suositeltavien tapahtumien yläpuolella.",
),
MultiFieldPanel(
[
StreamFieldPanel("curated_events"),
],
heading="SUOSITELTAVAT TAPAHTUMAT",
help_text="Lisää tähän ne tapahtumat, joita haluat suositella käyttäjälle. Mene haluamasi tapahtuman sivulle, kopioi sen www-osoite ja liitä osoite alla olevaan kenttään. Tähän lisäämäsi tapahtumat näkyvät tapahtumat.hel.fi-sivuston kokoelmassa automaattisesti aikajärjestyksessä, vaikka et syöttäisi niitä aikajärjestyksessä.",
),
MultiFieldPanel(
[
FieldPanel("event_list_title_fi"),
FieldPanel("event_list_title_sv"),
FieldPanel("event_list_title_en"),
],
heading="MUIDEN TAPAHTUMIEN OTSIKKO",
help_text='Käsin poimittujen tapahtumien voit tässä suositella muita samankaltaisia tai muuten kiinnostavia tapahtumia. Näille tapahtumille tarvitaan oma otsikko, esim. "Tutustu myös näihin".',
),
MultiFieldPanel(
[
FieldPanel("event_list_query_fi"),
FieldPanel("event_list_query_sv"),
FieldPanel("event_list_query_en"),
],
heading="TAPAHTUMALISTAUKSEN HAUN WWW-OSOITE",
help_text="Tee tapahtumahaku sopivilla hakukriteereillä tapahtumat.helsingissa. Kun hakutuloksessa on haluamasi tapahtumat, kopioi hakutuloksen www-osoite tähän kenttään.",
),
MultiFieldPanel(
[
FieldPanel("social_media_description_fi"),
FieldPanel("social_media_description_sv"),
FieldPanel("social_media_description_en"),
],
heading="KUVAUS SOSIAALISEEN MEDIAAN",
help_text="Tämä teksti näkyy, kun käyttäjä jakaa kokoelman sosiaalisessa mediassa. Max. 160 merkkiä pitkä teksti, joka houkuttelee avaamaan linkin.",
),
MultiFieldPanel(
[
StreamFieldPanel("keywords_fi"),
StreamFieldPanel("keywords_sv"),
StreamFieldPanel("keywords_en"),
],
heading="Keywords",
help_text="",
),
]
def get_context(self, request):
context = super().get_context(request)
context["FRONTEND_BASE_URL"] = settings.FRONTEND_BASE_URL
return context
def clean(self):
"""
Unfortunately Wagtail doesn't support customizing which field it uses for Page titles.
At the moment, it uses "title" field, but this is not always desirable.
The extremely hacky trick below makes Wagtail explorer look like its default language is Finnish.
Taken from: https://stackoverflow.com/a/48632873/5208999
"""
self.title = self.title_fi or "Kokoelma ilman suomenkielistä otsikkoa"
super().clean()
edit_handler = TabbedInterface(
[
ObjectList(content_panels, heading="Sisältö"),
ObjectList(
CUSTOM_SETTINGS_PANELS, heading="Asetukset", classname="settings"
),
]
)
class Meta:
verbose_name = "Collection"
| 31.348416
| 340
| 0.605167
|
bd32d5bfc11b53f31422abb852a2ca2b8b628b22
| 24,178
|
py
|
Python
|
src/hip_prof_gen.py
|
ex-rzr/hipamd
|
d2d2cacfe210307ec10c77400e1dafdeafefbc0f
|
[
"MIT"
] | 12
|
2021-08-07T13:27:51.000Z
|
2022-03-28T04:10:57.000Z
|
src/hip_prof_gen.py
|
ex-rzr/hipamd
|
d2d2cacfe210307ec10c77400e1dafdeafefbc0f
|
[
"MIT"
] | 11
|
2021-08-06T08:29:59.000Z
|
2022-03-20T11:47:46.000Z
|
src/hip_prof_gen.py
|
ex-rzr/hipamd
|
d2d2cacfe210307ec10c77400e1dafdeafefbc0f
|
[
"MIT"
] | 13
|
2021-08-06T07:02:24.000Z
|
2022-03-27T09:27:48.000Z
|
#!/usr/bin/python
# Copyright (c) 2019 - 2021 Advanced Micro Devices, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os, sys, re
import CppHeaderParser
import filecmp
PROF_HEADER = "hip_prof_str.h"
OUTPUT = PROF_HEADER
REC_MAX_LEN = 1024
# Recursive sources processing
recursive_mode = 0
# HIP_INIT_API macro patching
hip_patch_mode = 0
# API matching types check
types_check_mode = 0
# Private API check
private_check_mode = 0
# Messages and errors controll
verbose = 0
errexit = 0
inp_file = 'none'
line_num = -1
# Verbose message
def message(msg):
if verbose: sys.stdout.write(msg + '\n')
# Fatal error termination
def error(msg):
if line_num != -1:
msg += ", file '" + inp_file + "', line (" + str(line_num) + ")"
if errexit:
msg = " Error: " + msg
else:
msg = " Warning: " + msg
sys.stdout.write(msg + '\n')
sys.stderr.write(sys.argv[0] + msg +'\n')
def fatal(msg):
error(msg)
sys.exit(1)
#############################################################
# Normalizing API name
def filtr_api_name(name):
name = re.sub(r'\s*$', r'', name);
return name
def filtr_api_decl(record):
record = re.sub("\s__dparm\([^\)]*\)", r'', record);
record = re.sub("\(void\*\)", r'', record);
return record
# Normalizing API arguments
def filtr_api_args(args_str):
args_str = re.sub(r'^\s*', r'', args_str);
args_str = re.sub(r'\s*$', r'', args_str);
args_str = re.sub(r'\s*,\s*', r',', args_str);
args_str = re.sub(r'\s+', r' ', args_str);
args_str = re.sub(r'\s*(\*+)\s*', r'\1 ', args_str);
args_str = re.sub(r'(enum|struct) ', '', args_str);
return args_str
# Normalizing types
def norm_api_types(type_str):
type_str = re.sub(r'uint32_t', r'unsigned int', type_str)
type_str = re.sub(r'^unsigned$', r'unsigned int', type_str)
return type_str
# Creating a list of arguments [(type, name), ...]
def list_api_args(args_str):
args_str = filtr_api_args(args_str)
args_list = []
if args_str != '':
for arg_pair in args_str.split(','):
if arg_pair == 'void': continue
arg_pair = re.sub(r'\s*=\s*\S+$','', arg_pair);
m = re.match("^(.*)\s(\S+)$", arg_pair);
if m:
arg_type = norm_api_types(m.group(1))
arg_name = m.group(2)
args_list.append((arg_type, arg_name))
else:
fatal("bad args: args_str: '" + args_str + "' arg_pair: '" + arg_pair + "'")
return args_list;
# Creating arguments string "type0, type1, ..."
def filtr_api_types(args_str):
args_list = list_api_args(args_str)
types_str = ''
for arg_tuple in args_list:
types_str += arg_tuple[0] + ', '
return types_str
# Creating options list [opt0, opt1, ...]
def filtr_api_opts(args_str):
args_list = list_api_args(args_str)
opts_list = []
for arg_tuple in args_list:
opts_list.append(arg_tuple[1])
return opts_list
# Checking for pointer non-void arg type
def pointer_ck(arg_type):
ptr_type = ''
m = re.match(r'(.*)\*$', arg_type)
if m:
ptr_type = m.group(1)
ptr_type = re.sub(r'const ', '', ptr_type)
if ptr_type == 'void': ptr_type = ''
return ptr_type
#############################################################
# Parsing API header
# hipError_t hipSetupArgument(const void* arg, size_t size, size_t offset);
def parse_api(inp_file_p, out):
global inp_file
global line_num
inp_file = inp_file_p
beg_pattern = re.compile("^(hipError_t|const char\s*\*)\s+([^\(]+)\(");
api_pattern = re.compile("^(hipError_t|const char\s*\*)\s+([^\(]+)\(([^\)]*)\)");
end_pattern = re.compile("Texture");
hidden_pattern = re.compile(r'__attribute__\(\(visibility\("hidden"\)\)\)')
nms_open_pattern = re.compile(r'namespace hip_impl {')
nms_close_pattern = re.compile(r'}')
inp = open(inp_file, 'r')
found = 0
hidden = 0
nms_level = 0;
record = ""
line_num = -1
for line in inp.readlines():
record += re.sub(r'^\s+', r' ', line[:-1])
line_num += 1
if len(record) > REC_MAX_LEN:
fatal("bad record \"" + record + "\"")
m = beg_pattern.match(line)
if m:
name = m.group(2)
if hidden != 0:
message("api: " + name + " - hidden")
elif nms_level != 0:
message("api: " + name + " - hip_impl")
else:
message("api: " + name)
found = 1
if found != 0:
record = re.sub("\s__dparm\([^\)]*\)", '', record);
m = api_pattern.match(record)
if m:
found = 0
if end_pattern.search(record): continue
api_name = filtr_api_name(m.group(2))
api_args = m.group(3)
if not api_name in out:
out[api_name] = api_args
else: continue
hidden = 0
if hidden_pattern.match(line): hidden = 1
if nms_open_pattern.match(line): nms_level += 1
if (nms_level > 0) and nms_close_pattern.match(line): nms_level -= 1
if nms_level < 0:
fatal("nms level < 0")
record = ""
inp.close()
line_num = -1
#############################################################
# Parsing API implementation
# hipError_t hipSetupArgument(const void* arg, size_t size, size_t offset) {
# HIP_INIT_API(hipSetupArgument, arg, size, offset);
# inp_file - input implementation source file
# api_map - input public API map [<api name>] => <api args>
# out - output map [<api name>] => [opt0, opt1, ...]
def parse_content(inp_file_p, api_map, out):
global hip_patch_mode
global types_check_mode
global private_check_mode
global inp_file
global line_num
inp_file = inp_file_p
# API method begin pattern
beg_pattern = re.compile("^(hipError_t|const char\s*\*)\s+[^\(]+\(");
# API declaration pattern
decl_pattern = re.compile("^(hipError_t|const char\s*\*)\s+([^\(]+)\(([^\)]*)\)\s*;");
# API definition pattern
api_pattern = re.compile("^(hipError_t|const char\s*\*)\s+([^\(]+)\(([^\)]*)\)\s*{");
# API init macro pattern
init_pattern = re.compile("(^\s*HIP_INIT_API\s*)\((([^,]+)(,.*|)|)(\);|,)\s*$");
# Open input file
inp = open(inp_file, 'r')
# API name
api_name = ""
# Valid public API found flag
api_valid = 0
# API overload (parameters mismatch)
api_overload = 0
# Input file patched content
content = ''
# Sub content for found API defiition
sub_content = ''
# Current record, accumulating several API definition related lines
record = ''
# Current input file line number
line_num = -1
# API beginning found flag
found = 0
# Reading input file
for line in inp.readlines():
# Accumulating record
record += re.sub(r'^\s+', r' ', line[:-1])
line_num += 1
if len(record) > REC_MAX_LEN:
fatal("bad record \"" + record + "\"")
break;
# Looking for API begin
if found == 0:
record = re.sub(r'\s*extern\s+"C"\s+', r'', record);
if beg_pattern.match(record):
found = 1
record = filtr_api_decl(record)
# Matching API declaration
if found == 1:
if decl_pattern.match(record):
found = 0
# Matching API definition
if found == 1:
m = api_pattern.match(record)
# Checking if complete API matched
if m:
found = 2
api_valid = 0
api_overload = 0
api_name = filtr_api_name(m.group(2))
# Checking if API name is in the API map
if (private_check_mode == 0) or (api_name in api_map):
if not api_name in api_map: api_map[api_name] = ''
# Getting API arguments
api_args = m.group(3)
# Getting etalon arguments from the API map
eta_args = api_map[api_name]
if eta_args == '':
eta_args = api_args
api_map[api_name] = eta_args
# Normalizing API arguments
api_types = filtr_api_types(api_args)
# Normalizing etalon arguments
eta_types = filtr_api_types(eta_args)
if (api_types == eta_types) or ((types_check_mode == 0) and (not api_name in out)):
# API is already found and not is mismatched
if (api_name in out):
fatal("API redefined \"" + api_name + "\", record \"" + record + "\"")
# Set valid public API found flag
api_valid = 1
# Set output API map with API arguments list
out[api_name] = filtr_api_opts(api_args)
# Register missmatched API methods
else:
api_overload = 1
# Warning about mismatched API, possible non public overloaded version
api_diff = '\t\t' + inp_file + " line(" + str(line_num) + ")\n\t\tapi: " + api_types + "\n\t\teta: " + eta_types
message("\t" + api_name + ' args mismatch:\n' + api_diff + '\n')
# API found action
if found == 2:
if hip_patch_mode != 0:
# Looking for INIT macro
m = init_pattern.match(line)
if m:
init_name = api_name
if api_overload == 1: init_name = 'NONE'
init_args = m.group(4)
line = m.group(1) + '(' + init_name + init_args + m.group(5) + '\n'
m = init_pattern.match(line)
if m:
found = 0
if api_valid == 1: message("\t" + api_name)
# Ignore if it is initialized as NONE
init_name = m.group(3)
if init_name != 'NONE':
# Check if init name matching API name
if init_name != api_name:
fatal("init name mismatch: '" + init_name + "' <> '" + api_name + "'")
# Registering dummy API for non public API if the name in INIT is not NONE
if api_valid == 0:
# If init name is not in public API map then it is private API
# else it was not identified and will be checked on finish
if not init_name in api_map:
if init_name in out:
fatal("API reinit \"" + api_name + "\", record \"" + record + "\"")
out[init_name] = []
elif re.search('}', line):
found = 0
# Expect INIT macro for valid public API
# Removing and registering non-conformant APIs with missing HIP_INIT macro
if api_valid == 1:
if api_name in out:
del out[api_name]
del api_map[api_name]
# Registering non-conformant APIs
out['.' + api_name] = 1
else:
fatal("API is not in out \"" + api_name + "\", record \"" + record + "\"")
if found != 1: record = ""
content += line
inp.close()
line_num = -1
if len(out) != 0:
return content
else:
return ''
# src path walk
def parse_src(api_map, src_path, src_patt, out):
global recursive_mode
pattern = re.compile(src_patt)
src_path = re.sub(r'\s', '', src_path)
for src_dir in src_path.split(':'):
message("Parsing " + src_dir + " for '" + src_patt + "'")
for root, dirs, files in os.walk(src_dir):
for fnm in files:
if pattern.search(fnm):
file = root + '/' + fnm
message(file)
content = parse_content(file, api_map, out);
if (hip_patch_mode != 0) and (content != ''):
f = open(file, 'w')
f.write(content)
f.close()
if recursive_mode == 0: break
#############################################################
# Generating profiling primitives header
# api_map - public API map [<api name>] => [(type, name), ...]
# callback_ids - public API callback IDs list (name, callback_id)
# opts_map - opts map [<api name>] => [opt0, opt1, ...]
def generate_prof_header(f, api_map, callback_ids, opts_map):
# Private API list
priv_lst = []
f.write('// Generated file. DO NOT EDIT.\n')
f.write('//\n')
f.write('// This file is automatically generated by the ' + os.path.basename(__file__) + ' script.\n')
f.write('// If changes are required, run the script and commit the updated file.\n\n')
f.write('#ifndef _HIP_PROF_STR_H\n');
f.write('#define _HIP_PROF_STR_H\n');
f.write('#define HIP_PROF_VER 1\n')
# Check for non-public API
for name in sorted(opts_map.keys()):
if not name in api_map:
opts_lst = opts_map[name]
if len(opts_lst) != 0:
fatal("bad dummy API \"" + name + "\", args: " + str(opts_lst))
priv_lst.append(name)
message("Private: " + name)
# Generating the callbacks ID enumaration
f.write('\n// HIP API callbacks ID enumeration\n')
f.write('enum hip_api_id_t {\n')
f.write(' HIP_API_ID_NONE = 0,\n')
f.write(' HIP_API_ID_FIRST = 1,\n')
cb_id_map = {}
last_cb_id = 0
for name, cb_id in callback_ids:
if not name in api_map:
f.write(' HIP_API_ID_RESERVED_' + str(cb_id) + ' = ' + str(cb_id) + ',\n')
else:
f.write(' HIP_API_ID_' + name + ' = ' + str(cb_id) + ',\n')
cb_id_map[name] = cb_id
if cb_id > last_cb_id: last_cb_id = cb_id
for name in sorted(api_map.keys()):
if not name in cb_id_map:
last_cb_id += 1
f.write(' HIP_API_ID_' + name + ' = ' + str(last_cb_id) + ',\n')
f.write(' HIP_API_ID_LAST = ' + str(last_cb_id) + ',\n')
f.write('\n')
for name in sorted(priv_lst):
f.write(' HIP_API_ID_' + name + ' = HIP_API_ID_NONE,\n')
f.write('};\n')
# Generating the method to return API name by ID
f.write('\n// Return the HIP API string for a given callback ID\n')
f.write('static inline const char* hip_api_name(const uint32_t id) {\n')
f.write(' switch(id) {\n')
for name in sorted(api_map.keys()):
f.write(' case HIP_API_ID_' + name + ': return "' + name + '";\n')
f.write(' };\n')
f.write(' return "unknown";\n')
f.write('};\n')
# Generating the method for querying API ID by name
f.write('\n')
f.write('#include <string.h>\n');
f.write('// Return the HIP API callback ID for a given name\n')
f.write('static inline uint32_t hipApiIdByName(const char* name) {\n')
for name in sorted(api_map.keys()):
f.write(' if (strcmp("' + name + '", name) == 0) return HIP_API_ID_' + name + ';\n')
f.write(' return HIP_API_ID_NONE;\n')
f.write('}\n')
# Generating the callbacks data structure
f.write('\n// HIP API callbacks data structures\n')
f.write(
'typedef struct hip_api_data_s {\n' +
' uint64_t correlation_id;\n' +
' uint32_t phase;\n' +
' union {\n'
)
for name in sorted(api_map.keys()):
args = api_map[name]
if len(args) != 0:
f.write(' struct {\n')
for arg_tuple in args:
arg_type = arg_tuple[0]
ptr_type = pointer_ck(arg_type)
arg_name = arg_tuple[1]
# Checking for enum type
if arg_type == "hipLimit_t": arg_type = 'enum ' + arg_type
# Structuer field code
f.write(' ' + arg_type + ' ' + arg_name + ';\n')
if ptr_type != '':
f.write(' ' + ptr_type + ' ' + arg_name + '__val;\n')
f.write(' } ' + name + ';\n')
f.write(
' } args;\n' +
'} hip_api_data_t;\n'
)
# Generating the callbacks args data filling macros
f.write('\n// HIP API callbacks args data filling macros\n')
for name in sorted(api_map.keys()):
args = api_map[name]
f.write('// ' + name + str(args) + '\n')
f.write('#define INIT_' + name + '_CB_ARGS_DATA(cb_data) { \\\n')
if name in opts_map:
opts_list = opts_map[name]
if len(args) != len(opts_list):
fatal("\"" + name + "\" API args and opts mismatch, args: " + str(args) + ", opts: " + str(opts_list))
# API args iterating:
# type is args[<ind>][0]
# name is args[<ind>][1]
for ind in range(0, len(args)):
arg_tuple = args[ind]
arg_type = arg_tuple[0]
ptr_type = pointer_ck(arg_type)
fld_name = arg_tuple[1]
opt_name = opts_list[ind]
if arg_type == "const char*":
f.write(' cb_data.args.' + name + '.' + fld_name + ' = (' + opt_name + ') ? strdup(' + opt_name + ') : NULL; \\\n')
else:
f.write(' cb_data.args.' + name + '.' + fld_name + ' = (' + arg_type + ')' + opt_name + '; \\\n')
f.write('};\n')
f.write('#define INIT_CB_ARGS_DATA(cb_id, cb_data) INIT_##cb_id##_CB_ARGS_DATA(cb_data)\n')
# Generating macro for non-public API
f.write('\n// Macros for non-public API primitives\n')
for name in sorted(priv_lst):
f.write('// ' + name + '()\n')
f.write('#define INIT_'+ name + '_CB_ARGS_DATA(cb_data) {};\n')
f.write('\n#define INIT_NONE_CB_ARGS_DATA(cb_data) {};\n')
f.write('\n#if HIP_PROF_HIP_API_STRING\n')
# Generating the method for the API args filling
f.write('// HIP API args filling helper\n')
f.write('static inline void hipApiArgsInit(hip_api_id_t id, hip_api_data_t* data) {\n')
f.write(' switch (id) {\n')
for name in sorted(api_map.keys()):
args = api_map[name]
f.write('// ' + name + str(args) + '\n')
f.write(' case HIP_API_ID_' + name + ':\n')
for ind in range(0, len(args)):
arg_tuple = args[ind]
arg_type = arg_tuple[0]
ptr_type = pointer_ck(arg_type)
fld_name = arg_tuple[1]
var_name = 'data->args.' + name + '.' + fld_name
if arg_type == "char*":
f.write(' ' + var_name + ' = (' + var_name + ') ? strdup(' + var_name + ') : NULL;\n')
else:
if ptr_type != '':
f.write(' if (' + var_name + ') ' + var_name + '__val = *(' + var_name + ');\n')
f.write(' break;\n')
f.write(' default: break;\n')
f.write(' };\n')
f.write('}\n')
# Generating the method for the API string, name and parameters
f.write('\n')
f.write('#include <sstream>\n');
f.write('#include <string>\n');
f.write('// HIP API string method, method name and parameters\n')
f.write('static inline const char* hipApiString(hip_api_id_t id, const hip_api_data_t* data) {\n')
f.write(' std::ostringstream oss;\n')
f.write(' switch (id) {\n')
for name in sorted(api_map.keys()):
args = api_map[name]
f.write(' case HIP_API_ID_' + name + ':\n')
f.write(' oss << "' + name + '(";\n')
for ind in range(0, len(args)):
arg_tuple = args[ind]
arg_type = arg_tuple[0]
ptr_type = pointer_ck(arg_type)
arg_name = arg_tuple[1]
var_name = 'data->args.' + name + '.' + arg_name
delim = '' if ind == 0 else ', ';
oss_stream = 'oss << "' + delim + arg_name + '='
line_shift = ' '
f.write(line_shift)
if ptr_type != '':
f.write('if (' + var_name + ' == NULL) ' + oss_stream + 'NULL";\n' + line_shift + 'else ')
if pointer_ck(ptr_type) != '':
f.write(oss_stream + '" << (void*)' + var_name + '__val' + ';\n')
else:
f.write(oss_stream + '" << ' + var_name + '__val' + ';\n')
else:
f.write(oss_stream + '" << ' + var_name + ';\n')
f.write(' oss << ")";\n')
f.write(' break;\n')
f.write(' default: oss << "unknown";\n')
f.write(' };\n')
f.write(' return strdup(oss.str().c_str());\n')
f.write('}\n')
f.write('#endif // HIP_PROF_HIP_API_STRING\n')
f.write('#endif // _HIP_PROF_STR_H\n');
#############################################################
# main
while len(sys.argv) > 1:
if not re.match(r'-', sys.argv[1]): break
if (sys.argv[1] == '-v'):
verbose = 1
sys.argv.pop(1)
if (sys.argv[1] == '-r'):
recursive_mode = 1
sys.argv.pop(1)
if (sys.argv[1] == '-t'):
types_check_mode = 1
sys.argv.pop(1)
if (sys.argv[1] == '--priv'):
private_check_mode = 1
sys.argv.pop(1)
if (sys.argv[1] == '-e'):
errexit = 1
sys.argv.pop(1)
if (sys.argv[1] == '-p'):
hip_patch_mode = 1
sys.argv.pop(1)
# Usage
if (len(sys.argv) < 4):
fatal ("Usage: " + sys.argv[0] + " [-v] <input HIP API .h file> <patched srcs path> <previous output> [<output>]\n" +
" -v - verbose messages\n" +
" -r - process source directory recursively\n" +
" -t - API types matching check\n" +
" --priv - private API check\n" +
" -e - on error exit mode\n" +
" -p - HIP_INIT_API macro patching mode\n" +
"\n" +
" Example:\n" +
" $ " + sys.argv[0] + " -v -p -t --priv ./include/hip/amd_detail/hip_runtime_api.h" +
" ./src ./include/hip/amd_detail/hip_prof_str.h ./include/hip/amd_detail/hip_prof_str.h.new");
# API header file given as an argument
src_pat = "\.cpp$"
api_hfile = sys.argv[1]
if not os.path.isfile(api_hfile):
fatal("input file '" + api_hfile + "' not found")
# Srcs directory given as an argument
src_dir = sys.argv[2]
if not os.path.isdir(src_dir):
fatal("src directory " + src_dir + "' not found")
# Current hip_prof_str include
INPUT = sys.argv[3]
if not os.path.isfile(INPUT):
fatal("input file '" + INPUT + "' not found")
if len(sys.argv) > 4: OUTPUT = sys.argv[4]
# API declaration map
api_map = {
'hipSetupArgument': '',
'hipMalloc3DArray': '',
'hipFuncGetAttribute': '',
'hipMemset3DAsync': '',
'hipKernelNameRef': '',
'hipStreamGetPriority': '',
'hipLaunchByPtr': '',
'hipFreeHost': '',
'hipGetErrorName': '',
'hipMemcpy3DAsync': '',
'hipMemcpyParam2DAsync': '',
'hipArray3DCreate': '',
'hipOccupancyMaxActiveBlocksPerMultiprocessorWithFlags': '',
'hipOccupancyMaxPotentialBlockSize': '',
'hipMallocManaged': '',
'hipOccupancyMaxActiveBlocksPerMultiprocessor': '',
'hipGetErrorString': '',
'hipMallocHost': '',
'hipModuleLoadDataEx': '',
'hipGetDeviceProperties': '',
'hipConfigureCall': '',
'hipHccModuleLaunchKernel': '',
'hipExtModuleLaunchKernel': '',
}
# API options map
opts_map = {}
# Parsing API header
parse_api(api_hfile, api_map)
# Parsing sources
parse_src(api_map, src_dir, src_pat, opts_map)
try:
cppHeader = CppHeaderParser.CppHeader(INPUT)
except CppHeaderParser.CppParseError as e:
print(e)
sys.exit(1)
# Callback IDs
api_callback_ids = []
for enum in cppHeader.enums:
if enum['name'] == 'hip_api_id_t':
for value in enum['values']:
if value['name'] == 'HIP_API_ID_NONE' or value['name'] == 'HIP_API_ID_FIRST':
continue
if value['name'] == 'HIP_API_ID_LAST':
break
m = re.match(r'HIP_API_ID_(\S*)', value['name'])
if m:
api_callback_ids.append((m.group(1), value['value']))
break
# Checking for non-conformant APIs with missing HIP_INIT macro
for name in list(opts_map.keys()):
m = re.match(r'\.(\S*)', name)
if m:
message("Init missing: " + m.group(1))
del opts_map[name]
# Converting api map to map of lists
# Checking for not found APIs
not_found = 0
if len(opts_map) != 0:
for name in api_map.keys():
args_str = api_map[name];
api_map[name] = list_api_args(args_str)
if not name in opts_map:
error("implementation not found: " + name)
not_found += 1
if not_found != 0:
error(str(not_found) + " API calls missing in interception layer")
# The output subdirectory seems to exist or not depending on the
# version of cmake.
output_dir = os.path.dirname(OUTPUT)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# Generating output header file
with open(OUTPUT, 'w') as f:
generate_prof_header(f, api_map, api_callback_ids, opts_map)
if not filecmp.cmp(INPUT, OUTPUT):
fatal("\"" + INPUT + "\" needs to be re-generated and checked-in with the current changes")
# Successfull exit
sys.exit(0)
| 33.211538
| 126
| 0.596741
|
c0c4296d64a097beff2f928105b872c76bc01c72
| 20,072
|
py
|
Python
|
discord/ui/view.py
|
Ryomen-Sukuna/discord.py
|
0bcb0d0e3ce395d42a5b1dae61b0090791ee018d
|
[
"MIT"
] | 1
|
2021-09-11T09:24:38.000Z
|
2021-09-11T09:24:38.000Z
|
discord/ui/view.py
|
Ryomen-Sukuna/discord.py
|
0bcb0d0e3ce395d42a5b1dae61b0090791ee018d
|
[
"MIT"
] | null | null | null |
discord/ui/view.py
|
Ryomen-Sukuna/discord.py
|
0bcb0d0e3ce395d42a5b1dae61b0090791ee018d
|
[
"MIT"
] | null | null | null |
"""
The MIT License (MIT)
Copyright (c) 2015-present Rapptz
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
"""
from __future__ import annotations
from typing import Any, Callable, ClassVar, Coroutine, Dict, Iterator, List, Optional, Sequence, TYPE_CHECKING, Tuple
from functools import partial
from itertools import groupby
import traceback
import asyncio
import logging
import sys
import time
import os
from .item import Item, ItemCallbackType
from ..components import (
Component,
ActionRow as ActionRowComponent,
_component_factory,
Button as ButtonComponent,
SelectMenu as SelectComponent,
)
# fmt: off
__all__ = (
'View',
)
# fmt: on
if TYPE_CHECKING:
from typing_extensions import Self
from ..interactions import Interaction
from ..message import Message
from ..types.components import Component as ComponentPayload
from ..types.interactions import ModalSubmitComponentInteractionData as ModalSubmitComponentInteractionDataPayload
from ..state import ConnectionState
from .modal import Modal
_log = logging.getLogger(__name__)
def _walk_all_components(components: List[Component]) -> Iterator[Component]:
for item in components:
if isinstance(item, ActionRowComponent):
yield from item.children
else:
yield item
def _component_to_item(component: Component) -> Item:
if isinstance(component, ButtonComponent):
from .button import Button
return Button.from_component(component)
if isinstance(component, SelectComponent):
from .select import Select
return Select.from_component(component)
return Item.from_component(component)
class _ViewWeights:
# fmt: off
__slots__ = (
'weights',
)
# fmt: on
def __init__(self, children: List[Item]):
self.weights: List[int] = [0, 0, 0, 0, 0]
key = lambda i: sys.maxsize if i.row is None else i.row
children = sorted(children, key=key)
for row, group in groupby(children, key=key):
for item in group:
self.add_item(item)
def find_open_space(self, item: Item) -> int:
for index, weight in enumerate(self.weights):
if weight + item.width <= 5:
return index
raise ValueError('could not find open space for item')
def add_item(self, item: Item) -> None:
if item.row is not None:
total = self.weights[item.row] + item.width
if total > 5:
raise ValueError(f'item would not fit at row {item.row} ({total} > 5 width)')
self.weights[item.row] = total
item._rendered_row = item.row
else:
index = self.find_open_space(item)
self.weights[index] += item.width
item._rendered_row = index
def remove_item(self, item: Item) -> None:
if item._rendered_row is not None:
self.weights[item._rendered_row] -= item.width
item._rendered_row = None
def clear(self) -> None:
self.weights = [0, 0, 0, 0, 0]
class _ViewCallback:
__slots__ = ('view', 'callback', 'item')
def __init__(self, callback: ItemCallbackType[Any, Any], view: View, item: Item[View]) -> None:
self.callback: ItemCallbackType[Any, Any] = callback
self.view: View = view
self.item: Item[View] = item
def __call__(self, interaction: Interaction) -> Coroutine[Any, Any, Any]:
return self.callback(self.view, interaction, self.item)
class View:
"""Represents a UI view.
This object must be inherited to create a UI within Discord.
.. versionadded:: 2.0
Parameters
-----------
timeout: Optional[:class:`float`]
Timeout in seconds from last interaction with the UI before no longer accepting input.
If ``None`` then there is no timeout.
Attributes
------------
timeout: Optional[:class:`float`]
Timeout from last interaction with the UI before no longer accepting input.
If ``None`` then there is no timeout.
children: List[:class:`Item`]
The list of children attached to this view.
"""
__discord_ui_view__: ClassVar[bool] = True
__discord_ui_modal__: ClassVar[bool] = False
__view_children_items__: ClassVar[List[ItemCallbackType[Any, Any]]] = []
def __init_subclass__(cls) -> None:
children: List[ItemCallbackType[Any, Any]] = []
for base in reversed(cls.__mro__):
for member in base.__dict__.values():
if hasattr(member, '__discord_ui_model_type__'):
children.append(member)
if len(children) > 25:
raise TypeError('View cannot have more than 25 children')
cls.__view_children_items__ = children
def _init_children(self) -> List[Item[Self]]:
children = []
for func in self.__view_children_items__:
item: Item = func.__discord_ui_model_type__(**func.__discord_ui_model_kwargs__)
item.callback = _ViewCallback(func, self, item)
item._view = self
setattr(self, func.__name__, item)
children.append(item)
return children
def __init__(self, *, timeout: Optional[float] = 180.0):
self.timeout = timeout
self.children: List[Item[Self]] = self._init_children()
self.__weights = _ViewWeights(self.children)
self.id: str = os.urandom(16).hex()
self.__cancel_callback: Optional[Callable[[View], None]] = None
self.__timeout_expiry: Optional[float] = None
self.__timeout_task: Optional[asyncio.Task[None]] = None
self.__stopped: asyncio.Future[bool] = asyncio.get_running_loop().create_future()
def __repr__(self) -> str:
return f'<{self.__class__.__name__} timeout={self.timeout} children={len(self.children)}>'
async def __timeout_task_impl(self) -> None:
while True:
# Guard just in case someone changes the value of the timeout at runtime
if self.timeout is None:
return
if self.__timeout_expiry is None:
return self._dispatch_timeout()
# Check if we've elapsed our currently set timeout
now = time.monotonic()
if now >= self.__timeout_expiry:
return self._dispatch_timeout()
# Wait N seconds to see if timeout data has been refreshed
await asyncio.sleep(self.__timeout_expiry - now)
def to_components(self) -> List[Dict[str, Any]]:
def key(item: Item) -> int:
return item._rendered_row or 0
children = sorted(self.children, key=key)
components: List[Dict[str, Any]] = []
for _, group in groupby(children, key=key):
children = [item.to_component_dict() for item in group]
if not children:
continue
components.append(
{
'type': 1,
'components': children,
}
)
return components
@classmethod
def from_message(cls, message: Message, /, *, timeout: Optional[float] = 180.0) -> View:
"""Converts a message's components into a :class:`View`.
The :attr:`.Message.components` of a message are read-only
and separate types from those in the ``discord.ui`` namespace.
In order to modify and edit message components they must be
converted into a :class:`View` first.
Parameters
-----------
message: :class:`discord.Message`
The message with components to convert into a view.
timeout: Optional[:class:`float`]
The timeout of the converted view.
Returns
--------
:class:`View`
The converted view. This always returns a :class:`View` and not
one of its subclasses.
"""
view = View(timeout=timeout)
for component in _walk_all_components(message.components):
view.add_item(_component_to_item(component))
return view
def add_item(self, item: Item[Any]) -> Self:
"""Adds an item to the view.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
item: :class:`Item`
The item to add to the view.
Raises
--------
TypeError
An :class:`Item` was not passed.
ValueError
Maximum number of children has been exceeded (25)
or the row the item is trying to be added to is full.
"""
if len(self.children) > 25:
raise ValueError('maximum number of children exceeded')
if not isinstance(item, Item):
raise TypeError(f'expected Item not {item.__class__!r}')
self.__weights.add_item(item)
item._view = self
self.children.append(item)
return self
def remove_item(self, item: Item[Any]) -> Self:
"""Removes an item from the view.
This function returns the class instance to allow for fluent-style
chaining.
Parameters
-----------
item: :class:`Item`
The item to remove from the view.
"""
try:
self.children.remove(item)
except ValueError:
pass
else:
self.__weights.remove_item(item)
return self
def clear_items(self) -> Self:
"""Removes all items from the view.
This function returns the class instance to allow for fluent-style
chaining.
"""
self.children.clear()
self.__weights.clear()
return self
async def interaction_check(self, interaction: Interaction) -> bool:
"""|coro|
A callback that is called when an interaction happens within the view
that checks whether the view should process item callbacks for the interaction.
This is useful to override if, for example, you want to ensure that the
interaction author is a given user.
The default implementation of this returns ``True``.
.. note::
If an exception occurs within the body then the check
is considered a failure and :meth:`on_error` is called.
Parameters
-----------
interaction: :class:`~discord.Interaction`
The interaction that occurred.
Returns
---------
:class:`bool`
Whether the view children's callbacks should be called.
"""
return True
async def on_timeout(self) -> None:
"""|coro|
A callback that is called when a view's timeout elapses without being explicitly stopped.
"""
pass
async def on_error(self, error: Exception, item: Item[Any], interaction: Interaction) -> None:
"""|coro|
A callback that is called when an item's callback or :meth:`interaction_check`
fails with an error.
The default implementation prints the traceback to stderr.
Parameters
-----------
error: :class:`Exception`
The exception that was raised.
item: :class:`Item`
The item that failed the dispatch.
interaction: :class:`~discord.Interaction`
The interaction that led to the failure.
"""
print(f'Ignoring exception in view {self} for item {item}:', file=sys.stderr)
traceback.print_exception(error.__class__, error, error.__traceback__, file=sys.stderr)
async def _scheduled_task(self, item: Item, interaction: Interaction):
try:
if self.timeout:
self.__timeout_expiry = time.monotonic() + self.timeout
allow = await self.interaction_check(interaction)
if not allow:
return
await item.callback(interaction)
except Exception as e:
return await self.on_error(e, item, interaction)
def _start_listening_from_store(self, store: ViewStore) -> None:
self.__cancel_callback = partial(store.remove_view)
if self.timeout:
if self.__timeout_task is not None:
self.__timeout_task.cancel()
self.__timeout_expiry = time.monotonic() + self.timeout
self.__timeout_task = asyncio.create_task(self.__timeout_task_impl())
def _dispatch_timeout(self):
if self.__stopped.done():
return
if self.__cancel_callback:
self.__cancel_callback(self)
self.__cancel_callback = None
self.__stopped.set_result(True)
asyncio.create_task(self.on_timeout(), name=f'discord-ui-view-timeout-{self.id}')
def _dispatch_item(self, item: Item, interaction: Interaction):
if self.__stopped.done():
return
asyncio.create_task(self._scheduled_task(item, interaction), name=f'discord-ui-view-dispatch-{self.id}')
def _refresh(self, components: List[Component]) -> None:
# This is pretty hacky at the moment
# fmt: off
old_state: Dict[Tuple[int, str], Item[Any]] = {
(item.type.value, item.custom_id): item # type: ignore
for item in self.children
if item.is_dispatchable()
}
# fmt: on
children: List[Item[Any]] = []
for component in _walk_all_components(components):
try:
older = old_state[(component.type.value, component.custom_id)] # type: ignore
except (KeyError, AttributeError):
children.append(_component_to_item(component))
else:
older._refresh_component(component)
children.append(older)
self.children = children
def stop(self) -> None:
"""Stops listening to interaction events from this view.
This operation cannot be undone.
"""
if not self.__stopped.done():
self.__stopped.set_result(False)
self.__timeout_expiry = None
if self.__timeout_task is not None:
self.__timeout_task.cancel()
self.__timeout_task = None
if self.__cancel_callback:
self.__cancel_callback(self)
self.__cancel_callback = None
def is_finished(self) -> bool:
""":class:`bool`: Whether the view has finished interacting."""
return self.__stopped.done()
def is_dispatching(self) -> bool:
""":class:`bool`: Whether the view has been added for dispatching purposes."""
return self.__cancel_callback is not None
def is_persistent(self) -> bool:
""":class:`bool`: Whether the view is set up as persistent.
A persistent view has all their components with a set ``custom_id`` and
a :attr:`timeout` set to ``None``.
"""
return self.timeout is None and all(item.is_persistent() for item in self.children)
async def wait(self) -> bool:
"""Waits until the view has finished interacting.
A view is considered finished when :meth:`stop` is called
or it times out.
Returns
--------
:class:`bool`
If ``True``, then the view timed out. If ``False`` then
the view finished normally.
"""
return await self.__stopped
class ViewStore:
def __init__(self, state: ConnectionState):
# (component_type, message_id, custom_id): (View, Item)
self._views: Dict[Tuple[int, Optional[int], str], Tuple[View, Item]] = {}
# message_id: View
self._synced_message_views: Dict[int, View] = {}
# custom_id: Modal
self._modals: Dict[str, Modal] = {}
self._state: ConnectionState = state
@property
def persistent_views(self) -> Sequence[View]:
# fmt: off
views = {
view.id: view
for (_, (view, _)) in self._views.items()
if view.is_persistent()
}
# fmt: on
return list(views.values())
def __verify_integrity(self):
to_remove: List[Tuple[int, Optional[int], str]] = []
for (k, (view, _)) in self._views.items():
if view.is_finished():
to_remove.append(k)
for k in to_remove:
del self._views[k]
def add_view(self, view: View, message_id: Optional[int] = None) -> None:
view._start_listening_from_store(self)
if view.__discord_ui_modal__:
self._modals[view.custom_id] = view # type: ignore
return
self.__verify_integrity()
for item in view.children:
if item.is_dispatchable():
self._views[(item.type.value, message_id, item.custom_id)] = (view, item) # type: ignore
if message_id is not None:
self._synced_message_views[message_id] = view
def remove_view(self, view: View) -> None:
if view.__discord_ui_modal__:
self._modals.pop(view.custom_id, None) # type: ignore
return
for item in view.children:
if item.is_dispatchable():
self._views.pop((item.type.value, item.custom_id), None) # type: ignore
for key, value in self._synced_message_views.items():
if value.id == view.id:
del self._synced_message_views[key]
break
def dispatch_view(self, component_type: int, custom_id: str, interaction: Interaction) -> None:
self.__verify_integrity()
message_id: Optional[int] = interaction.message and interaction.message.id
key = (component_type, message_id, custom_id)
# Fallback to None message_id searches in case a persistent view
# was added without an associated message_id
value = self._views.get(key) or self._views.get((component_type, None, custom_id))
if value is None:
return
view, item = value
item._refresh_state(interaction.data) # type: ignore
view._dispatch_item(item, interaction)
def dispatch_modal(
self,
custom_id: str,
interaction: Interaction,
components: List[ModalSubmitComponentInteractionDataPayload],
) -> None:
modal = self._modals.get(custom_id)
if modal is None:
_log.debug("Modal interaction referencing unknown custom_id %s. Discarding", custom_id)
return
modal._refresh(components)
modal._dispatch_submit(interaction)
def is_message_tracked(self, message_id: int) -> bool:
return message_id in self._synced_message_views
def remove_message_tracking(self, message_id: int) -> Optional[View]:
return self._synced_message_views.pop(message_id, None)
def update_from_message(self, message_id: int, components: List[ComponentPayload]) -> None:
# pre-req: is_message_tracked == true
view = self._synced_message_views[message_id]
view._refresh([_component_factory(d) for d in components])
| 34.078098
| 118
| 0.6251
|
63f92e16793d137549c31e3a1cd9362a2d4c035d
| 1,109
|
py
|
Python
|
sauer/6.1.py
|
odderikf/matfys_math
|
c0600faa59ac5becf0614ac83065216f1a1520f2
|
[
"MIT"
] | null | null | null |
sauer/6.1.py
|
odderikf/matfys_math
|
c0600faa59ac5becf0614ac83065216f1a1520f2
|
[
"MIT"
] | null | null | null |
sauer/6.1.py
|
odderikf/matfys_math
|
c0600faa59ac5becf0614ac83065216f1a1520f2
|
[
"MIT"
] | null | null | null |
#%% setup
import numpy as np
from matplotlib import pyplot as plt
def euler(f, y_0, start, stop, step):
w_i = y_0
w = [w_i]
t = np.arange(start, stop+step, step) # include endpoint, therefore add step
for t_i in t[:-1]:
w_i += step*f(t_i, w_i)
w.append(w_i)
return t, w
def y_a(t):
return 0.5*t*t + 1
def y_b(t):
return np.e ** (t*t*t/3)
def dydt_a(t, y):
return t
def dydt_b(t, y):
return t*t*y
#%% run
h = 0.1
a, b = 0., 1.
y_0 = 1.
t, w_a = euler(dydt_a, y_0, a, b, h)
_, w_b = euler(dydt_b, y_0, a, b, h)
#%% print a
print("t: ", '|', "w: ", '|', "error: ")
for i in range(len(t)):
print(f'{t[i]:+6f} | {w_a[i]:+6f} | {y_a(t[i]) - w_a[i]:+6f}')
#%% print b ØVING
print("t: ", '|', "w: ", '|', "error: ")
for i in range(len(t)):
print(f'{t[i]:+6f} | {w_b[i]:+6f} | {y_b(t[i]) - w_b[i]:+6f}')
#%% plot a
plt.plot(t, w_a, 'r')
plt.scatter(t, w_a, c='r', s=15)
plt.plot(t, y_a(t), 'b')
plt.show()
#%% plot b
plt.plot(t, w_b, 'r')
plt.scatter(t, w_b, c='r', s=15)
plt.plot(t, y_b(t), 'b')
plt.show()
| 18.79661
| 81
| 0.499549
|
cb798fe66483f9ecc8abd12ce66737f0d6c3eb33
| 24,478
|
py
|
Python
|
mlp/views.py
|
cocoemily/paleocore2
|
34b9ba30358963a0a1c8ae7252ed7c5ef178a758
|
[
"MIT"
] | null | null | null |
mlp/views.py
|
cocoemily/paleocore2
|
34b9ba30358963a0a1c8ae7252ed7c5ef178a758
|
[
"MIT"
] | null | null | null |
mlp/views.py
|
cocoemily/paleocore2
|
34b9ba30358963a0a1c8ae7252ed7c5ef178a758
|
[
"MIT"
] | null | null | null |
# External Libraries
import os
from fastkml import kml, Placemark, Folder, Document
from lxml import etree
from datetime import datetime
from django.contrib.gis.geos import GEOSGeometry, Point
from pygeoif import geometry
from zipfile import ZipFile
# Django Libraries
from django.conf import settings
from django.views import generic
from django.http import HttpResponse
from django.shortcuts import render_to_response, redirect
from django.template import RequestContext
from django.contrib import messages
from dateutil.parser import parse
from django.core.files.base import ContentFile
# App Libraries
from .models import Occurrence, Biology, Archaeology, Geology, Taxon, IdentificationQualifier
from .forms import UploadKMLForm, DownloadKMLForm, ChangeXYForm, Occurrence2Biology, DeleteAllForm
from .utilities import html_escape, get_finds
from .ontologies import * # import vocabularies and choice lists
class Confirmation(generic.ListView):
template_name = 'projects/confirmation.html'
model = Occurrence
class ImportKMZ(generic.FormView):
template_name = 'admin/projects/import_kmz.html'
form_class = UploadKMLForm
context_object_name = 'upload'
success_url = '../?last_import__exact=1'
# Define a routine for importing Placemarks from a list of placemark elements
def get_import_file(self):
return self.request.FILES['kmlfileUpload'] # get a handle on the file
def get_import_file_extension(self):
import_file = self.get_import_file()
import_file_name = import_file.name
return import_file_name[import_file_name.rfind('.') + 1:] # get the file extension
def get_kmz_file(self):
import_file = self.get_import_file()
return ZipFile(import_file, 'r')
def get_kml_file(self):
"""
read the form and fetch the kml or kmz file
:return: return a kml.KML() object
"""
# TODO parse the kml file more smartly to locate the first placemark and work from there.
import_file = self.get_import_file()
#kml_file_upload_name = kml_file_upload.name # get the file name
# kml_file_name = kml_file_upload_name[:kml_file_upload_name.rfind('.')] # get the file name no extension
kml_file_extension = self.get_import_file_extension() # get the file extension
kml_file_path = os.path.join(settings.MEDIA_ROOT)
kml_file = kml.KML()
if kml_file_extension == "kmz":
kmz_file = self.get_kmz_file()
kml_document = kmz_file.open('doc.kml', 'r').read()
else:
# read() loads entire file as one string
kml_document = open(kml_file_path + "/" + import_file.name, 'r').read()
kml_file.from_string(kml_document) # pass contents of kml string to kml document instance for parsing
return kml_file
def import_placemarks(self, kml_placemark_list):
"""
A procedure that reads a KML placemark list and saves the data into the django database
:param kml_placemark_list:
:return:
"""
message_string = ''
occurrence_count, archaeology_count, biology_count, geology_count = [0, 0, 0, 0]
Occurrence.objects.all().update(last_import=False) # Toggle off all last imports
for o in kml_placemark_list:
# Check to make sure that the object is a Placemark, filter out folder objects
if type(o) is Placemark:
# Step 1 - parse the xml and copy placemark attributes to a dictionary
escaped_description = html_escape(o.description) # escape &
table = etree.fromstring(escaped_description) # get the table element the data from the xml.
attributes = table.xpath("//text()|//img") # get all text values and image tags from xml string
# Create a dictionary from the attribute list. The list has key value pairs as alternating
# elements in the list, the line below takes the first and every other elements and adds them
# as keys, then the second and every other element and adds them as values.
# e.g.
# attributes[0::2] = ["Basis of Record", "Time", "Item Type" ...]
# attributes[1::2] = ["Collection", "May 27, 2017, 10:12 AM", "Faunal" ...]
# zip creates a list of tuples = [("Basis of Record", "Collection), ...]
# which is converted to a dictionary.
if len(attributes) % 2 == 0: # attributes list should be even length
attributes_dict = dict(zip(attributes[0::2], attributes[1::2]))
else:
raise KeyError
# Step 2 - Create a new Occurrence object (or subtype)
lgrp_occ = None
# Determine the appropriate subtype and initialize
item_type = attributes_dict.get("Item Type")
occurrence_count += 1
# variables imported from .ontologies
if item_type in (ontologies.artifactual, "Artifactual", "Archeology", "Archaeological"):
lgrp_occ = Archaeology()
archaeology_count += 1
elif item_type in (ontologies.faunal, "Fauna", "Floral", "Flora"):
lgrp_occ = Biology()
biology_count += 1
elif item_type in (ontologies.geological, "Geology"):
lgrp_occ = Geology()
geology_count += 1
# Step 3 - Copy attributes from dictionary to Occurrence object, validate as we go.
# Improve by checking each field to see if it has a choice list. If so validate against choice
# list.
# Verbatim Data - save a verbatim copy of the original kml placemark coordinates and attributes.
if o.geometry.wkt:
geom = ['geom', o.geometry.wkt]
else:
geom = ['geom', 'No coordinates']
lgrp_occ.verbatim_kml_data = attributes + geom
# Validate Basis of Record
if attributes_dict.get("Basis Of Record") in (ontologies.fossil_specimen, "Fossil", "Collection"):
# TODO update basis_of_record vocab, change Fossil Specimen to Collection
lgrp_occ.basis_of_record = ontologies.fossil_specimen # from .ontologies
elif attributes_dict.get("Basis Of Record") in (ontologies.human_observation, "Observation"):
lgrp_occ.basis_of_record = ontologies.human_observation # from .ontologies
# Validate Item Type
item_type = attributes_dict.get("Item Type")
if item_type in (ontologies.artifactual, "Artifact", "Archeology", "Archaeological"):
lgrp_occ.item_type = ontologies.artifactual
elif item_type in (ontologies.faunal, "Fauna"):
lgrp_occ.item_type = ontologies.faunal
elif item_type in (ontologies.floral, "Flora"):
lgrp_occ.item_type = ontologies.floral
elif item_type in (ontologies.geological, "Geology"):
lgrp_occ.item_type = ontologies.geological
# Date Recorded
error_string = ''
try:
# parse the time
lgrp_occ.date_recorded = parse(attributes_dict.get("Time"))
# set the year collected form field number
lgrp_occ.year_collected = lgrp_occ.date_recorded.year
except ValueError:
# If there's a problem getting the fieldnumber, use the current date time and set the
# problem flag to True.
lgrp_occ.date_recorded = datetime.now()
lgrp_occ.problem = True
try:
error_string = "Upload error, missing field number, using current date and time instead."
lgrp_occ.problem_comment = lgrp_occ.problem_comment + " " + error_string
except TypeError:
lgrp_occ.problem_comment = error_string
# Process point, comes in as well known text string
# Assuming point is in GCS WGS84 datum = SRID 4326
pnt = GEOSGeometry("POINT (" + str(o.geometry.x) + " " + str(o.geometry.y) + ")", 4326) # WKT
lgrp_occ.geom = pnt
scientific_name_string = attributes_dict.get("Scientific Name")
lgrp_occ.item_scientific_name = scientific_name_string
# Next step only applies to Biology objects
if lgrp_occ.item_scientific_name and lgrp_occ.__class__ is Biology:
match, match_count, match_list = lgrp_occ.match_taxon()
if match and match_count == 1:
lgrp_occ.taxon = match_list[0]
lgrp_occ.item_description = attributes_dict.get("Description")
# if lgrp_occ.item_description:
# match, match_count, match_list = match_element(lgrp_occ)
# if match and match_count ==1:
# lgrp_occ.element = lgrp_occ.item_description.lower()
#######################
# NON-REQUIRED FIELDS #
#######################
lgrp_occ.barcode = attributes_dict.get("Barcode")
lgrp_occ.item_number = lgrp_occ.barcode
lgrp_occ.collection_remarks = attributes_dict.get("Collecting Remarks")
lgrp_occ.geology_remarks = attributes_dict.get("Geology Remarks")
lgrp_occ.collecting_method = attributes_dict.get("Collection Method")
finder_string = attributes_dict.get("Finder")
lgrp_occ.finder = finder_string
# import person object, validated against look up data in Person table
# lgrp_occ.finder_person, created = Person.objects.get_or_create(name=finder_string)
collector_string = attributes_dict.get("Collector")
lgrp_occ.collector = collector_string
# import person object, validated against look up data in Person table
# lgrp_occ.collector_person, created = Person.objects.get_or_create(name=collector_string)
lgrp_occ.individual_count = attributes_dict.get("Count")
if attributes_dict.get("In Situ") in ('No', "NO", 'no'):
lgrp_occ.in_situ = False
elif attributes_dict.get("In Situ") in ('Yes', "YES", 'yes'):
lgrp_occ.in_situ = True
if attributes_dict.get("Ranked Unit") in ('No', "NO", 'no'):
lgrp_occ.ranked = False
elif attributes_dict.get("Ranked Unit") in ('Yes', "YES", 'yes'):
lgrp_occ.ranked = True
unit_found_string = attributes_dict.get("Unit Found")
unit_likely_string = attributes_dict.get("Unit Likely")
lgrp_occ.analytical_unit_found = unit_found_string
lgrp_occ.analytical_unit_likely = unit_likely_string
lgrp_occ.analytical_unit_1 = attributes_dict.get("Unit 1")
lgrp_occ.analytical_unit_2 = attributes_dict.get("Unit 2")
lgrp_occ.analytical_unit_3 = attributes_dict.get("Unit 3")
# import statigraphy object, validate against look up data in Stratigraphy table
# lgrp_occ.unit_found, created = StratigraphicUnit.objects.get_or_create(name=unit_found_string)
# lgrp_occ.unit_likly, created = StratigraphicUnit.objects.get_or_create(name=unit_likely_string)
# Save Occurrence before saving media. Need id to rename media files
lgrp_occ.last_import = True
lgrp_occ.save()
# Save image
kmz_file = self.get_kmz_file()
if self.get_import_file_extension().lower() == "kmz":
# grab image names from XML
image_names = table.xpath("//img/@src")
# grab the name of the first image
# Future: add functionality to import multiple images
if image_names and len(image_names) == 1: # This will break if image_names is None
image_name = image_names[0]
# Check that the image name is in the kmz file list
kmz_file.filenames = [f.orig_filename for f in kmz_file.filelist]
if image_name in kmz_file.filenames:
# etch the kmz image file object, this is a ZipInfo object not a File object
image_file_obj = next(f for f in kmz_file.filelist if f.orig_filename == image_name)
# fetch the upload directory from the model definition
upload_dir = Biology._meta.get_field('image').upload_to
# update image name to include upload path and occurrence id
# e.g. /uploads/images/lgrp/14775_188.jpg
new_image_name = os.path.join(upload_dir, str(lgrp_occ.id) + '_' + image_name)
# Save the image
lgrp_occ.image.save(new_image_name, ContentFile(kmz_file.read(image_file_obj)))
elif type(o) is not Placemark:
raise IOError("KML File is badly formatted")
if occurrence_count == 1:
message_string = '1 occurrence'
if occurrence_count > 1:
message_string = '{} occurrences'.format(occurrence_count)
messages.add_message(self.request, messages.INFO,
'Successfully imported {} occurrences'.format(message_string))
def form_valid(self, form):
# This method is called when valid form data has been POSTed.
# It should return an HttpResponse.
kml_file = self.get_kml_file()
# get the top level features object (this is essentially the layers list)
level1_elements = list(kml_file.features())
# Check that the kml file is well-formed with a single document element.
if len(level1_elements) == 1 and type(level1_elements[0]) == Document:
document = level1_elements[0]
# If well-formed document, check if the file has folders, which correspond to layers
level2_elements = list(document.features())
if len(level2_elements) == 1 and type(level2_elements[0]) == Folder:
folder = level2_elements[0]
# If a single folder is present import placemarks from that folder
# Get features from the folder
level3_elements = list(folder.features())
# Check that the features are Placemarks. If they are, import them
if len(level3_elements) >= 1 and type(level3_elements[0]) == Placemark:
placemark_list = level3_elements
self.import_placemarks(placemark_list)
elif len(level2_elements) >= 1 and type(level2_elements[0]) == Placemark:
placemark_list = level2_elements
self.import_placemarks(placemark_list)
return super(ImportKMZ, self).form_valid(form)
class DeleteAll(generic.FormView):
template_name = 'admin/mlp/occurrence/delete_confirmation.html'
success_url = '../'
form_class = DeleteAllForm
def form_valid(self, form):
for o in Occurrence.objects.all():
o.delete()
return super(DeleteAll, self).form_valid(form)
class Summary(generic.ListView):
template_name = 'admin/mlp/occurrence/summary.html'
model = Occurrence
context_object_name = 'occurrences'
@staticmethod
def create_occurrence_count_table():
"""
Creates a table of occurrence counts by subclass
:return:
"""
html_table = """
<table>
<tr>
<th>Instance</th>
<th>Collected</th>
<th>Observed</th>
<th>Total Count</th>
</tr>
<tr>
<td>No Fossils</td>
<td>--</td>
<td>{no_fossil_count}</td>
<td>{no_fossil_count}</td>
</tr>
<tr>
<td>Archaeology</td>
<td>{collected_archaeology_count}</td>
<td>{observed_archaeology_count}</td>
<td>{total_archaeology_count}</td>
</tr>
<tr>
<td>Biology</td>
<td>{collected_biology_count}</td>
<td>{observed_biology_count}</td>
<td>{total_biology_count}</td>
</tr>
<tr>
<td>Geology</td>
<td>{collected_geology_count}</td>
<td>{observed_geology_count}</td>
<td>{total_geology_count}</td>
</tr>
<tr>
<td>Totals</td>
<td>{collected_occurrence_count}</td>
<td>{observed_occurrence_count}</td>
<td>{total_occurrence_count}</td>
</tr>
</table>
""".format(
no_fossil_count=get_finds().count(),
total_archaeology_count=Archaeology.objects.all().count(),
collected_archaeology_count=Archaeology.objects.filter(basis_of_record='FossilSpecimen').count(),
observed_archaeology_count=Archaeology.objects.filter(basis_of_record='HumanObservation').count(),
total_biology_count=Biology.objects.all().count(),
collected_biology_count=Biology.objects.filter(basis_of_record='FossilSpecimen').count(),
observed_biology_count=Biology.objects.filter(basis_of_record='HumanObservation').count(),
total_geology_count=Geology.objects.all().count(),
collected_geology_count=Geology.objects.filter(basis_of_record='FossilSpecimen').count(),
observed_geology_count=Geology.objects.filter(basis_of_record='HumanObservation').count(),
total_occurrence_count=Occurrence.objects.all().count(),
collected_occurrence_count=Occurrence.objects.filter(basis_of_record='FossilSpecimen').count(),
observed_occurrence_count=Occurrence.objects.filter(basis_of_record='HumanObservation').count(),
)
return html_table
def warnings(self):
result = {'warning_flag': False}
if Occurrence.get_duplicate_barcodes():
result['warning_flag'] = True
result['duplicate_barcodes'] = Occurrence.get_duplicate_barcodes()
result['duplicate_barcode_objects'] = Occurrence.get_duplicate_barcode_objects()
if Occurrence.get_missing_barcode_objects():
result['warning_flag'] = True
result['missing_barcodes'] = Occurrence.get_missing_barcode_objects()
return result
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context['mlp_occurrence_count'] = Occurrence.objects.all().count()
context['mlp_archaeology_count'] = Archaeology.objects.all().count()
context['mlp_biology_count'] = Biology.objects.all().count()
context['mlp_geology_count'] = Geology.objects.all().count()
context['occurrence_count_table'] = self.create_occurrence_count_table()
context['warnings'] = self.warnings()
return context
def change_coordinates_view(request):
if request.method == "POST":
form = ChangeXYForm(request.POST)
if form.is_valid():
obs = Occurrence.objects.get(pk=request.POST["DB_id"])
# coordinates = utm.to_latlon(float(request.POST["new_easting"]),
# float(request.POST["new_northing"]), 37, "N")
# pnt = GEOSGeometry("POINT (" + str(coordinates[1]) + " " + str(coordinates[0]) + ")", 4326) # WKT
pnt = GEOSGeometry("POINT (" + request.POST["new_easting"] + " " + request.POST["new_northing"] + ")",
32637)
obs.geom = pnt
obs.save()
messages.add_message(request, messages.INFO,
'Successfully Updated Coordinates For %s.' % obs.catalog_number)
return redirect("/admin/mlp/occurrence")
else:
selected = list(request.GET.get("ids", "").split(","))
if len(selected) > 1:
messages.error(request, "You can't change the coordinates of multiple points at once.")
return redirect("/admin/mlp/occurrence")
selected_object = Occurrence.objects.get(pk=int(selected[0]))
initial_data = {"DB_id": selected_object.id,
"barcode": selected_object.barcode,
"old_easting": selected_object.easting,
"old_northing": selected_object.northing,
"item_scientific_name": selected_object.item_scientific_name,
"item_description": selected_object.item_description
}
the_form = ChangeXYForm(initial=initial_data)
return render_to_response('projects/changeXY.html', {"theForm": the_form}, RequestContext(request))
def occurrence2biology_view(request):
if request.method == "POST":
form = Occurrence2Biology(request.POST)
if form.is_valid():
occurrence_object = Occurrence.objects.get(barcode__exact=request.POST["barcode"])
if occurrence_object.item_type in ('Faunal', 'Floral'):
taxon = Taxon.objects.get(pk=request.POST["taxon"])
id_qual = IdentificationQualifier.objects.get(pk=request.POST["identification_qualifier"])
new_biology = Biology(barcode=occurrence_object.barcode,
item_type=occurrence_object.item_type,
basis_of_record=occurrence_object.basis_of_record,
collecting_method=occurrence_object.collecting_method,
field_number=occurrence_object.field_number,
taxon=taxon,
identification_qualifier=id_qual,
geom=occurrence_object.geom
)
for key in list(occurrence_object.__dict__.keys()):
new_biology.__dict__[key] = occurrence_object.__dict__[key]
occurrence_object.delete()
new_biology.save()
messages.add_message(request, messages.INFO,
'Successfully converted occurrence to biology.')
else:
pass
messages.error(request, "Can only convert items of type Faunal or Floral")
return redirect("/admin/mlp/occurrence")
else:
selected = list(request.GET.get("ids", "").split(","))
if len(selected) > 1:
messages.add_message(request, messages.INFO, "Do you wish to update all the following occurrences?")
return redirect("/admin/mlp/occurrence")
selected_object = Occurrence.objects.get(pk=int(selected[0]))
initial_data = {
"barcode": selected_object.barcode,
"catalog_number": selected_object.catalog_number,
"basis_of_record": selected_object.basis_of_record,
"item_type": selected_object.item_type,
"collector": selected_object.collector,
"collecting_method": selected_object.collecting_method,
"field_number": selected_object.field_number,
"year_collected": selected_object.year_collected,
"item_scientific_name": selected_object.item_scientific_name,
"item_description": selected_object.item_description
}
the_form = Occurrence2Biology(initial=initial_data)
return render_to_response('projects/occurrence2biology.html',
{"theForm": the_form, "initial_data": initial_data}, RequestContext(request))
| 50.889813
| 114
| 0.601234
|
90ea92d791287aa76140d8110c343e54466acee9
| 9,591
|
py
|
Python
|
tests/__init__.py
|
leucosticte/speedypywikibot
|
f03d2070aa5d30a1f82540b0eda6fa603eb341f9
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
leucosticte/speedypywikibot
|
f03d2070aa5d30a1f82540b0eda6fa603eb341f9
|
[
"MIT"
] | null | null | null |
tests/__init__.py
|
leucosticte/speedypywikibot
|
f03d2070aa5d30a1f82540b0eda6fa603eb341f9
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Package tests."""
#
# (C) Pywikibot team, 2007-2018
#
# Distributed under the terms of the MIT license.
#
from __future__ import absolute_import, print_function, unicode_literals
__all__ = ('requests', 'unittest', 'TestRequest',
'patch_request', 'unpatch_request', 'mock', 'Mock', 'patch')
import functools
import os
import warnings
# Verify that the unit tests have a base working environment:
# - requests is mandatory
# however if unavailable this will fail on use; see pywikibot/tools.py
# - unittest2; see below
# - mwparserfromhell is optional, so is only imported in textlib_tests
import requests
from pywikibot import config
import pywikibot.data.api
from pywikibot.data.api import CachedRequest
from pywikibot.data.api import Request as _original_Request
from pywikibot import i18n
from pywikibot.tools import PYTHON_VERSION
if PYTHON_VERSION == (2, 7, 2):
# Use unittest2 for python 2.7.2 (T106512)
import unittest2 as unittest
else:
import unittest
try:
import unittest.mock as mock
from unittest.mock import Mock, patch
except ImportError:
import mock
from mock import Mock, patch
_root_dir = os.path.split(os.path.split(__file__)[0])[0]
def join_root_path(*names):
"""Return a path relative to the root directory."""
return os.path.join(_root_dir, *names)
def create_path_func(base_func, subpath):
"""Return a function returning a path relative to the given directory."""
func = functools.partial(base_func, subpath)
func.path = base_func.path + '/' + subpath
func.__doc__ = 'Return a path relative to `{0}/`.'.format(func.path)
return func
join_root_path.path = 'root'
join_tests_path = create_path_func(join_root_path, 'tests')
join_cache_path = create_path_func(join_tests_path,
'apicache-py%d' % PYTHON_VERSION[0])
join_data_path = create_path_func(join_tests_path, 'data')
join_pages_path = create_path_func(join_tests_path, 'pages')
join_images_path = create_path_func(join_data_path, 'images')
join_xml_data_path = create_path_func(join_data_path, 'xml')
join_html_data_path = create_path_func(join_data_path, 'html')
# Find the root directory of the checkout
_pwb_py = join_root_path('pwb.py')
library_test_modules = [
'python',
'plural',
'deprecation',
'ui',
'ui_options',
'thread',
'tests',
'date',
'timestamp',
'mediawikiversion',
'tools',
'tools_chars',
'tools_ip',
'xmlreader',
'textlib',
'diff',
'http',
'namespace',
'dry_api',
'dry_site',
'api',
'exceptions',
'oauth',
'family',
'site',
'link',
'interwiki_link',
'interwiki_graph',
'basepage',
'page',
'category',
'file',
'djvu',
'proofreadpage',
'edit_failure',
'edit',
'logentry',
'timestripper',
'pagegenerators',
'cosmetic_changes',
'wikistats',
'weblib',
'i18n',
'tk',
'wikibase',
'wikibase_edit',
'flow',
'flow_edit',
'upload',
'site_detect',
'bot',
]
script_test_modules = [
'pwb',
'script',
'l10n',
'add_text',
'archivebot',
'category_bot',
'checkimages',
'data_ingestion',
'deletionbot',
'disambredir',
'isbn',
'protectbot',
'reflinks',
'template_bot',
'replacebot',
'uploadbot',
'weblinkchecker',
'cache',
]
disabled_test_modules = [
'tests', # tests of the tests package
'l10n',
]
if not i18n.messages_available():
disabled_test_modules.append('l10n')
disabled_tests = {
'textlib': [
'test_interwiki_format', # example; very slow test
],
'weblib': [
'testWebCiteOlder', # fails. T110640
],
}
def _unknown_test_modules():
"""List tests which are to be executed."""
dir_list = os.listdir(join_tests_path())
all_test_list = [name[0:-9] for name in dir_list # strip '_tests.py'
if name.endswith('_tests.py')
and not name.startswith('_')] # skip __init__.py and _*
unknown_test_modules = [name
for name in all_test_list
if name not in library_test_modules
and name not in script_test_modules]
return unknown_test_modules
extra_test_modules = sorted(_unknown_test_modules())
test_modules = library_test_modules + extra_test_modules + script_test_modules
if 'PYWIKIBOT_TEST_MODULES' in os.environ:
_enabled_test_modules = os.environ['PYWIKIBOT_TEST_MODULES'].split(',')
disabled_test_modules = set(test_modules) - set(_enabled_test_modules)
def unittest_print(*args, **kwargs):
"""Print information in test log."""
print(*args, **kwargs) # noqa: T001
def collector(loader=unittest.loader.defaultTestLoader):
"""Load the default modules.
This is the entry point is specified in setup.py
"""
# Note: Raising SkipTest during load_tests will
# cause the loader to fallback to its own
# discover() ordering of unit tests.
if disabled_test_modules:
unittest_print(
'Disabled test modules (to run: python -m unittest ...):\n %s'
% ', '.join(disabled_test_modules))
if extra_test_modules:
unittest_print(
'Extra test modules (run after library, before scripts):\n %s'
% ', '.join(extra_test_modules))
if disabled_tests:
unittest_print(
'Skipping tests (to run: python -m unittest ...):\n %r'
% disabled_tests)
modules = [module
for module in test_modules
if module not in disabled_test_modules]
test_list = []
for module in modules:
module_class_name = 'tests.' + module + '_tests'
if module in disabled_tests:
discovered = loader.loadTestsFromName(module_class_name)
enabled_tests = []
for cls in discovered:
for test_func in cls:
if test_func._testMethodName not in disabled_tests[module]:
enabled_tests.append(
module_class_name + '.' +
test_func.__class__.__name__ + '.' +
test_func._testMethodName)
test_list.extend(enabled_tests)
else:
test_list.append(module_class_name)
tests = loader.loadTestsFromNames(test_list)
suite = unittest.TestSuite()
suite.addTests(tests)
return suite
def load_tests(loader=unittest.loader.defaultTestLoader,
tests=None, pattern=None):
"""Load the default modules."""
return collector(loader)
CachedRequest._get_cache_dir = classmethod(
lambda cls, *args: cls._make_dir(join_cache_path()))
# Travis-CI builds are set to retry twice, which aims to reduce the number
# of 'red' builds caused by intermittant server problems, while also avoiding
# the builds taking a long time due to retries.
# The following allows builds to retry twice, but higher default values are
# overridden here to restrict retries to only 1, so developer builds fail more
# frequently in code paths resulting from mishandled server problems.
if config.max_retries > 2:
if 'PYWIKIBOT_TEST_QUIET' not in os.environ:
unittest_print(
'tests: max_retries reduced from %d to 1' % config.max_retries)
config.max_retries = 1
# Raise CaptchaError if a test requires solving a captcha
config.solve_captcha = False
cache_misses = 0
cache_hits = 0
warnings.filterwarnings("always")
class TestRequest(CachedRequest):
"""Add caching to every Request except logins."""
def __init__(self, *args, **kwargs):
"""Constructor."""
super(TestRequest, self).__init__(0, *args, **kwargs)
@classmethod
def create_simple(cls, **kwargs):
"""Circumvent CachedRequest implementation."""
site = kwargs.pop('site')
return cls(site=site, parameters=kwargs)
def _expired(self, dt):
"""Never invalidate cached data."""
return False
def _load_cache(self):
"""Return whether the cache can be used."""
if not super(TestRequest, self)._load_cache():
global cache_misses
cache_misses += 1
return False
# tokens need careful management in the cache
# and cant be aggressively cached.
# FIXME: remove once 'badtoken' is reliably handled in api.py
if 'intoken' in self._uniquedescriptionstr():
self._data = None
return False
if 'lgpassword' in self._uniquedescriptionstr():
self._data = None
return False
global cache_hits
cache_hits += 1
return True
def _write_cache(self, data):
"""Write data except login details."""
if 'intoken' in self._uniquedescriptionstr():
return
if 'lgpassword' in self._uniquedescriptionstr():
return
return super(TestRequest, self)._write_cache(data)
original_expired = None
def patch_request():
"""Patch Request classes with TestRequest."""
global original_expired
pywikibot.data.api.Request = TestRequest
original_expired = pywikibot.data.api.CachedRequest._expired
pywikibot.data.api.CachedRequest._expired = lambda *args, **kwargs: False
def unpatch_request():
"""Un-patch Request classes with TestRequest."""
pywikibot.data.api.Request = _original_Request
pywikibot.data.api.CachedRequest._expired = original_expired
| 27.880814
| 79
| 0.650193
|
b337311a3787ee379906a86083bb86439d147419
| 28,754
|
py
|
Python
|
synapse/rest/admin/users.py
|
kleph/synapse
|
c97da1e45dd0b1f7f31a7e204a304ec2fba17687
|
[
"Apache-2.0"
] | null | null | null |
synapse/rest/admin/users.py
|
kleph/synapse
|
c97da1e45dd0b1f7f31a7e204a304ec2fba17687
|
[
"Apache-2.0"
] | null | null | null |
synapse/rest/admin/users.py
|
kleph/synapse
|
c97da1e45dd0b1f7f31a7e204a304ec2fba17687
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import hashlib
import hmac
import logging
from http import HTTPStatus
from typing import Tuple
from synapse.api.constants import UserTypes
from synapse.api.errors import Codes, NotFoundError, SynapseError
from synapse.http.servlet import (
RestServlet,
assert_params_in_dict,
parse_boolean,
parse_integer,
parse_json_object_from_request,
parse_string,
)
from synapse.http.site import SynapseRequest
from synapse.rest.admin._base import (
admin_patterns,
assert_requester_is_admin,
assert_user_is_admin,
historical_admin_path_patterns,
)
from synapse.types import JsonDict, UserID
logger = logging.getLogger(__name__)
_GET_PUSHERS_ALLOWED_KEYS = {
"app_display_name",
"app_id",
"data",
"device_display_name",
"kind",
"lang",
"profile_tag",
"pushkey",
}
class UsersRestServlet(RestServlet):
PATTERNS = historical_admin_path_patterns("/users/(?P<user_id>[^/]*)$")
def __init__(self, hs):
self.hs = hs
self.store = hs.get_datastore()
self.auth = hs.get_auth()
self.admin_handler = hs.get_admin_handler()
async def on_GET(self, request, user_id):
target_user = UserID.from_string(user_id)
await assert_requester_is_admin(self.auth, request)
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Can only users a local user")
ret = await self.store.get_users()
return 200, ret
class UsersRestServletV2(RestServlet):
PATTERNS = admin_patterns("/users$", "v2")
"""Get request to list all local users.
This needs user to have administrator access in Synapse.
GET /_synapse/admin/v2/users?from=0&limit=10&guests=false
returns:
200 OK with list of users if success otherwise an error.
The parameters `from` and `limit` are required only for pagination.
By default, a `limit` of 100 is used.
The parameter `user_id` can be used to filter by user id.
The parameter `name` can be used to filter by user id or display name.
The parameter `guests` can be used to exclude guest users.
The parameter `deactivated` can be used to include deactivated users.
"""
def __init__(self, hs):
self.hs = hs
self.store = hs.get_datastore()
self.auth = hs.get_auth()
self.admin_handler = hs.get_admin_handler()
async def on_GET(self, request):
await assert_requester_is_admin(self.auth, request)
start = parse_integer(request, "from", default=0)
limit = parse_integer(request, "limit", default=100)
user_id = parse_string(request, "user_id", default=None)
name = parse_string(request, "name", default=None)
guests = parse_boolean(request, "guests", default=True)
deactivated = parse_boolean(request, "deactivated", default=False)
users, total = await self.store.get_users_paginate(
start, limit, user_id, name, guests, deactivated
)
ret = {"users": users, "total": total}
if len(users) >= limit:
ret["next_token"] = str(start + len(users))
return 200, ret
class UserRestServletV2(RestServlet):
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]+)$", "v2")
"""Get request to list user details.
This needs user to have administrator access in Synapse.
GET /_synapse/admin/v2/users/<user_id>
returns:
200 OK with user details if success otherwise an error.
Put request to allow an administrator to add or modify a user.
This needs user to have administrator access in Synapse.
We use PUT instead of POST since we already know the id of the user
object to create. POST could be used to create guests.
PUT /_synapse/admin/v2/users/<user_id>
{
"password": "secret",
"displayname": "User"
}
returns:
201 OK with new user object if user was created or
200 OK with modified user object if user was modified
otherwise an error.
"""
def __init__(self, hs):
self.hs = hs
self.auth = hs.get_auth()
self.admin_handler = hs.get_admin_handler()
self.store = hs.get_datastore()
self.auth_handler = hs.get_auth_handler()
self.profile_handler = hs.get_profile_handler()
self.set_password_handler = hs.get_set_password_handler()
self.deactivate_account_handler = hs.get_deactivate_account_handler()
self.registration_handler = hs.get_registration_handler()
self.pusher_pool = hs.get_pusherpool()
async def on_GET(self, request, user_id):
await assert_requester_is_admin(self.auth, request)
target_user = UserID.from_string(user_id)
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Can only lookup local users")
ret = await self.admin_handler.get_user(target_user)
if not ret:
raise NotFoundError("User not found")
return 200, ret
async def on_PUT(self, request, user_id):
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
target_user = UserID.from_string(user_id)
body = parse_json_object_from_request(request)
if not self.hs.is_mine(target_user):
raise SynapseError(400, "This endpoint can only be used with local users")
user = await self.admin_handler.get_user(target_user)
user_id = target_user.to_string()
if user: # modify user
if "displayname" in body:
await self.profile_handler.set_displayname(
target_user, requester, body["displayname"], True
)
if "threepids" in body:
# check for required parameters for each threepid
for threepid in body["threepids"]:
assert_params_in_dict(threepid, ["medium", "address"])
# remove old threepids from user
threepids = await self.store.user_get_threepids(user_id)
for threepid in threepids:
try:
await self.auth_handler.delete_threepid(
user_id, threepid["medium"], threepid["address"], None
)
except Exception:
logger.exception("Failed to remove threepids")
raise SynapseError(500, "Failed to remove threepids")
# add new threepids to user
current_time = self.hs.get_clock().time_msec()
for threepid in body["threepids"]:
await self.auth_handler.add_threepid(
user_id, threepid["medium"], threepid["address"], current_time
)
if "avatar_url" in body and type(body["avatar_url"]) == str:
await self.profile_handler.set_avatar_url(
target_user, requester, body["avatar_url"], True
)
if "admin" in body:
set_admin_to = bool(body["admin"])
if set_admin_to != user["admin"]:
auth_user = requester.user
if target_user == auth_user and not set_admin_to:
raise SynapseError(400, "You may not demote yourself.")
await self.store.set_server_admin(target_user, set_admin_to)
if "password" in body:
if not isinstance(body["password"], str) or len(body["password"]) > 512:
raise SynapseError(400, "Invalid password")
else:
new_password = body["password"]
logout_devices = True
new_password_hash = await self.auth_handler.hash(new_password)
await self.set_password_handler.set_password(
target_user.to_string(),
new_password_hash,
logout_devices,
requester,
)
if "deactivated" in body:
deactivate = body["deactivated"]
if not isinstance(deactivate, bool):
raise SynapseError(
400, "'deactivated' parameter is not of type boolean"
)
if deactivate and not user["deactivated"]:
await self.deactivate_account_handler.deactivate_account(
target_user.to_string(), False
)
elif not deactivate and user["deactivated"]:
if "password" not in body:
raise SynapseError(
400, "Must provide a password to re-activate an account."
)
await self.deactivate_account_handler.activate_account(
target_user.to_string()
)
user = await self.admin_handler.get_user(target_user)
return 200, user
else: # create user
password = body.get("password")
password_hash = None
if password is not None:
if not isinstance(password, str) or len(password) > 512:
raise SynapseError(400, "Invalid password")
password_hash = await self.auth_handler.hash(password)
admin = body.get("admin", None)
user_type = body.get("user_type", None)
displayname = body.get("displayname", None)
if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
raise SynapseError(400, "Invalid user type")
user_id = await self.registration_handler.register_user(
localpart=target_user.localpart,
password_hash=password_hash,
admin=bool(admin),
default_display_name=displayname,
user_type=user_type,
by_admin=True,
)
if "threepids" in body:
# check for required parameters for each threepid
for threepid in body["threepids"]:
assert_params_in_dict(threepid, ["medium", "address"])
current_time = self.hs.get_clock().time_msec()
for threepid in body["threepids"]:
await self.auth_handler.add_threepid(
user_id, threepid["medium"], threepid["address"], current_time
)
if (
self.hs.config.email_enable_notifs
and self.hs.config.email_notif_for_new_users
):
await self.pusher_pool.add_pusher(
user_id=user_id,
access_token=None,
kind="email",
app_id="m.email",
app_display_name="Email Notifications",
device_display_name=threepid["address"],
pushkey=threepid["address"],
lang=None, # We don't know a user's language here
data={},
)
if "avatar_url" in body and type(body["avatar_url"]) == str:
await self.profile_handler.set_avatar_url(
user_id, requester, body["avatar_url"], True
)
ret = await self.admin_handler.get_user(target_user)
return 201, ret
class UserRegisterServlet(RestServlet):
"""
Attributes:
NONCE_TIMEOUT (int): Seconds until a generated nonce won't be accepted
nonces (dict[str, int]): The nonces that we will accept. A dict of
nonce to the time it was generated, in int seconds.
"""
PATTERNS = historical_admin_path_patterns("/register")
NONCE_TIMEOUT = 60
def __init__(self, hs):
self.auth_handler = hs.get_auth_handler()
self.reactor = hs.get_reactor()
self.nonces = {}
self.hs = hs
def _clear_old_nonces(self):
"""
Clear out old nonces that are older than NONCE_TIMEOUT.
"""
now = int(self.reactor.seconds())
for k, v in list(self.nonces.items()):
if now - v > self.NONCE_TIMEOUT:
del self.nonces[k]
def on_GET(self, request):
"""
Generate a new nonce.
"""
self._clear_old_nonces()
nonce = self.hs.get_secrets().token_hex(64)
self.nonces[nonce] = int(self.reactor.seconds())
return 200, {"nonce": nonce}
async def on_POST(self, request):
self._clear_old_nonces()
if not self.hs.config.registration_shared_secret:
raise SynapseError(400, "Shared secret registration is not enabled")
body = parse_json_object_from_request(request)
if "nonce" not in body:
raise SynapseError(400, "nonce must be specified", errcode=Codes.BAD_JSON)
nonce = body["nonce"]
if nonce not in self.nonces:
raise SynapseError(400, "unrecognised nonce")
# Delete the nonce, so it can't be reused, even if it's invalid
del self.nonces[nonce]
if "username" not in body:
raise SynapseError(
400, "username must be specified", errcode=Codes.BAD_JSON
)
else:
if not isinstance(body["username"], str) or len(body["username"]) > 512:
raise SynapseError(400, "Invalid username")
username = body["username"].encode("utf-8")
if b"\x00" in username:
raise SynapseError(400, "Invalid username")
if "password" not in body:
raise SynapseError(
400, "password must be specified", errcode=Codes.BAD_JSON
)
else:
password = body["password"]
if not isinstance(password, str) or len(password) > 512:
raise SynapseError(400, "Invalid password")
password_bytes = password.encode("utf-8")
if b"\x00" in password_bytes:
raise SynapseError(400, "Invalid password")
password_hash = await self.auth_handler.hash(password)
admin = body.get("admin", None)
user_type = body.get("user_type", None)
if user_type is not None and user_type not in UserTypes.ALL_USER_TYPES:
raise SynapseError(400, "Invalid user type")
got_mac = body["mac"]
want_mac_builder = hmac.new(
key=self.hs.config.registration_shared_secret.encode(),
digestmod=hashlib.sha1,
)
want_mac_builder.update(nonce.encode("utf8"))
want_mac_builder.update(b"\x00")
want_mac_builder.update(username)
want_mac_builder.update(b"\x00")
want_mac_builder.update(password_bytes)
want_mac_builder.update(b"\x00")
want_mac_builder.update(b"admin" if admin else b"notadmin")
if user_type:
want_mac_builder.update(b"\x00")
want_mac_builder.update(user_type.encode("utf8"))
want_mac = want_mac_builder.hexdigest()
if not hmac.compare_digest(want_mac.encode("ascii"), got_mac.encode("ascii")):
raise SynapseError(403, "HMAC incorrect")
# Reuse the parts of RegisterRestServlet to reduce code duplication
from synapse.rest.client.v2_alpha.register import RegisterRestServlet
register = RegisterRestServlet(self.hs)
user_id = await register.registration_handler.register_user(
localpart=body["username"].lower(),
password_hash=password_hash,
admin=bool(admin),
user_type=user_type,
by_admin=True,
)
result = await register._create_registration_details(user_id, body)
return 200, result
class WhoisRestServlet(RestServlet):
PATTERNS = historical_admin_path_patterns("/whois/(?P<user_id>[^/]*)")
def __init__(self, hs):
self.hs = hs
self.auth = hs.get_auth()
self.admin_handler = hs.get_admin_handler()
async def on_GET(self, request, user_id):
target_user = UserID.from_string(user_id)
requester = await self.auth.get_user_by_req(request)
auth_user = requester.user
if target_user != auth_user:
await assert_user_is_admin(self.auth, auth_user)
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Can only whois a local user")
ret = await self.admin_handler.get_whois(target_user)
return 200, ret
class DeactivateAccountRestServlet(RestServlet):
PATTERNS = historical_admin_path_patterns("/deactivate/(?P<target_user_id>[^/]*)")
def __init__(self, hs):
self._deactivate_account_handler = hs.get_deactivate_account_handler()
self.auth = hs.get_auth()
async def on_POST(self, request, target_user_id):
await assert_requester_is_admin(self.auth, request)
body = parse_json_object_from_request(request, allow_empty_body=True)
erase = body.get("erase", False)
if not isinstance(erase, bool):
raise SynapseError(
HTTPStatus.BAD_REQUEST,
"Param 'erase' must be a boolean, if given",
Codes.BAD_JSON,
)
UserID.from_string(target_user_id)
result = await self._deactivate_account_handler.deactivate_account(
target_user_id, erase
)
if result:
id_server_unbind_result = "success"
else:
id_server_unbind_result = "no-support"
return 200, {"id_server_unbind_result": id_server_unbind_result}
class AccountValidityRenewServlet(RestServlet):
PATTERNS = historical_admin_path_patterns("/account_validity/validity$")
def __init__(self, hs):
"""
Args:
hs (synapse.server.HomeServer): server
"""
self.hs = hs
self.account_activity_handler = hs.get_account_validity_handler()
self.auth = hs.get_auth()
async def on_POST(self, request):
await assert_requester_is_admin(self.auth, request)
body = parse_json_object_from_request(request)
if "user_id" not in body:
raise SynapseError(400, "Missing property 'user_id' in the request body")
expiration_ts = await self.account_activity_handler.renew_account_for_user(
body["user_id"],
body.get("expiration_ts"),
not body.get("enable_renewal_emails", True),
)
res = {"expiration_ts": expiration_ts}
return 200, res
class ResetPasswordRestServlet(RestServlet):
"""Post request to allow an administrator reset password for a user.
This needs user to have administrator access in Synapse.
Example:
http://localhost:8008/_synapse/admin/v1/reset_password/
@user:to_reset_password?access_token=admin_access_token
JsonBodyToSend:
{
"new_password": "secret"
}
Returns:
200 OK with empty object if success otherwise an error.
"""
PATTERNS = historical_admin_path_patterns(
"/reset_password/(?P<target_user_id>[^/]*)"
)
def __init__(self, hs):
self.store = hs.get_datastore()
self.hs = hs
self.auth = hs.get_auth()
self.auth_handler = hs.get_auth_handler()
self._set_password_handler = hs.get_set_password_handler()
async def on_POST(self, request, target_user_id):
"""Post request to allow an administrator reset password for a user.
This needs user to have administrator access in Synapse.
"""
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
UserID.from_string(target_user_id)
params = parse_json_object_from_request(request)
assert_params_in_dict(params, ["new_password"])
new_password = params["new_password"]
logout_devices = params.get("logout_devices", True)
new_password_hash = await self.auth_handler.hash(new_password)
await self._set_password_handler.set_password(
target_user_id, new_password_hash, logout_devices, requester
)
return 200, {}
class SearchUsersRestServlet(RestServlet):
"""Get request to search user table for specific users according to
search term.
This needs user to have administrator access in Synapse.
Example:
http://localhost:8008/_synapse/admin/v1/search_users/
@admin:user?access_token=admin_access_token&term=alice
Returns:
200 OK with json object {list[dict[str, Any]], count} or empty object.
"""
PATTERNS = historical_admin_path_patterns("/search_users/(?P<target_user_id>[^/]*)")
def __init__(self, hs):
self.hs = hs
self.store = hs.get_datastore()
self.auth = hs.get_auth()
async def on_GET(self, request, target_user_id):
"""Get request to search user table for specific users according to
search term.
This needs user to have a administrator access in Synapse.
"""
await assert_requester_is_admin(self.auth, request)
target_user = UserID.from_string(target_user_id)
# To allow all users to get the users list
# if not is_admin and target_user != auth_user:
# raise AuthError(403, "You are not a server admin")
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Can only users a local user")
term = parse_string(request, "term", required=True)
logger.info("term: %s ", term)
ret = await self.store.search_users(term)
return 200, ret
class UserAdminServlet(RestServlet):
"""
Get or set whether or not a user is a server administrator.
Note that only local users can be server administrators, and that an
administrator may not demote themselves.
Only server administrators can use this API.
Examples:
* Get
GET /_synapse/admin/v1/users/@nonadmin:example.com/admin
response on success:
{
"admin": false
}
* Set
PUT /_synapse/admin/v1/users/@reivilibre:librepush.net/admin
request body:
{
"admin": true
}
response on success:
{}
"""
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/admin$")
def __init__(self, hs):
self.hs = hs
self.store = hs.get_datastore()
self.auth = hs.get_auth()
async def on_GET(self, request, user_id):
await assert_requester_is_admin(self.auth, request)
target_user = UserID.from_string(user_id)
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Only local users can be admins of this homeserver")
is_admin = await self.store.is_server_admin(target_user)
return 200, {"admin": is_admin}
async def on_PUT(self, request, user_id):
requester = await self.auth.get_user_by_req(request)
await assert_user_is_admin(self.auth, requester.user)
auth_user = requester.user
target_user = UserID.from_string(user_id)
body = parse_json_object_from_request(request)
assert_params_in_dict(body, ["admin"])
if not self.hs.is_mine(target_user):
raise SynapseError(400, "Only local users can be admins of this homeserver")
set_admin_to = bool(body["admin"])
if target_user == auth_user and not set_admin_to:
raise SynapseError(400, "You may not demote yourself.")
await self.store.set_server_admin(target_user, set_admin_to)
return 200, {}
class UserMembershipRestServlet(RestServlet):
"""
Get room list of an user.
"""
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]+)/joined_rooms$")
def __init__(self, hs):
self.is_mine = hs.is_mine
self.auth = hs.get_auth()
self.store = hs.get_datastore()
async def on_GET(self, request, user_id):
await assert_requester_is_admin(self.auth, request)
if not self.is_mine(UserID.from_string(user_id)):
raise SynapseError(400, "Can only lookup local users")
user = await self.store.get_user_by_id(user_id)
if user is None:
raise NotFoundError("Unknown user")
room_ids = await self.store.get_rooms_for_user(user_id)
ret = {"joined_rooms": list(room_ids), "total": len(room_ids)}
return 200, ret
class PushersRestServlet(RestServlet):
"""
Gets information about all pushers for a specific `user_id`.
Example:
http://localhost:8008/_synapse/admin/v1/users/
@user:server/pushers
Returns:
pushers: Dictionary containing pushers information.
total: Number of pushers in dictonary `pushers`.
"""
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]*)/pushers$")
def __init__(self, hs):
self.is_mine = hs.is_mine
self.store = hs.get_datastore()
self.auth = hs.get_auth()
async def on_GET(
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
if not self.is_mine(UserID.from_string(user_id)):
raise SynapseError(400, "Can only lookup local users")
if not await self.store.get_user_by_id(user_id):
raise NotFoundError("User not found")
pushers = await self.store.get_pushers_by_user_id(user_id)
filtered_pushers = [
{k: v for k, v in p.items() if k in _GET_PUSHERS_ALLOWED_KEYS}
for p in pushers
]
return 200, {"pushers": filtered_pushers, "total": len(filtered_pushers)}
class UserMediaRestServlet(RestServlet):
"""
Gets information about all uploaded local media for a specific `user_id`.
Example:
http://localhost:8008/_synapse/admin/v1/users/
@user:server/media
Args:
The parameters `from` and `limit` are required for pagination.
By default, a `limit` of 100 is used.
Returns:
A list of media and an integer representing the total number of
media that exist given for this user
"""
PATTERNS = admin_patterns("/users/(?P<user_id>[^/]+)/media$")
def __init__(self, hs):
self.is_mine = hs.is_mine
self.auth = hs.get_auth()
self.store = hs.get_datastore()
async def on_GET(
self, request: SynapseRequest, user_id: str
) -> Tuple[int, JsonDict]:
await assert_requester_is_admin(self.auth, request)
if not self.is_mine(UserID.from_string(user_id)):
raise SynapseError(400, "Can only lookup local users")
user = await self.store.get_user_by_id(user_id)
if user is None:
raise NotFoundError("Unknown user")
start = parse_integer(request, "from", default=0)
limit = parse_integer(request, "limit", default=100)
if start < 0:
raise SynapseError(
400,
"Query parameter from must be a string representing a positive integer.",
errcode=Codes.INVALID_PARAM,
)
if limit < 0:
raise SynapseError(
400,
"Query parameter limit must be a string representing a positive integer.",
errcode=Codes.INVALID_PARAM,
)
media, total = await self.store.get_local_media_by_user_paginate(
start, limit, user_id
)
ret = {"media": media, "total": total}
if (start + limit) < total:
ret["next_token"] = start + len(media)
return 200, ret
| 34.685163
| 90
| 0.611741
|
f08be6fa9b035dd0b44f72e86e08a3ad3688bdcd
| 4,331
|
py
|
Python
|
pynodegl-utils/pynodegl_utils/ui/seekbar.py
|
jerry-belaston/gopro-lib-node.gl
|
623031489ddc82ed980c15bad349391c5b6bab5c
|
[
"Apache-2.0"
] | 45
|
2017-02-07T13:13:52.000Z
|
2022-03-18T07:12:39.000Z
|
pynodegl-utils/pynodegl_utils/ui/seekbar.py
|
mrobertseidowsky-gpsw/gopro-lib-node.gl
|
fbe427e4ea108468a63cde5920cf6f6ce03478bc
|
[
"Apache-2.0"
] | 148
|
2017-02-02T18:35:32.000Z
|
2022-03-28T13:53:22.000Z
|
pynodegl-utils/pynodegl_utils/ui/seekbar.py
|
mrobertseidowsky-gpsw/gopro-lib-node.gl
|
fbe427e4ea108468a63cde5920cf6f6ce03478bc
|
[
"Apache-2.0"
] | 28
|
2017-02-01T10:06:47.000Z
|
2022-03-18T07:12:26.000Z
|
#!/usr/bin/env python
#
# Copyright 2018 GoPro Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import math
from fractions import Fraction
from PySide2 import QtCore, QtGui, QtWidgets
class Seekbar(QtWidgets.QWidget):
seek = QtCore.Signal(float)
step = QtCore.Signal(int)
SLIDER_TIMEBASE = 1000
SLIDER_TIMESCALE = 1. / SLIDER_TIMEBASE
def __init__(self, config):
super().__init__()
self._slider = QtWidgets.QSlider(QtCore.Qt.Horizontal)
self._time_lbl = QtWidgets.QLabel()
self._time_lbl.setFont(QtGui.QFontDatabase.systemFont(QtGui.QFontDatabase.FixedFont))
fw_btn = QtWidgets.QToolButton()
fw_btn.setText('>')
bw_btn = QtWidgets.QToolButton()
bw_btn.setText('<')
layout = QtWidgets.QHBoxLayout(self)
layout.setContentsMargins(0, 0, 0, 0)
layout.addWidget(bw_btn)
layout.addWidget(fw_btn)
layout.addWidget(self._slider)
layout.addWidget(self._time_lbl)
self._frame_index = 0
self._scene_duration = 0
self._framerate = Fraction(*config.get('framerate'))
self._slider.sliderMoved.connect(self._slider_moved)
self._slider.sliderPressed.connect(self._slider_pressed)
self._slider.sliderReleased.connect(self._slider_released)
self._slider_dragged = False
fw_btn.clicked.connect(self._step_fw)
bw_btn.clicked.connect(self._step_bw)
@QtCore.Slot(int)
def _slider_moved(self, value): # only user move
if not self._scene_duration:
return
self.seek.emit(value * self.SLIDER_TIMESCALE)
@QtCore.Slot()
def _slider_pressed(self):
self._slider_dragged = True
@QtCore.Slot()
def _slider_released(self):
self._slider_dragged = False
self._refresh()
@QtCore.Slot()
def _step_fw(self):
self.step.emit(1)
@QtCore.Slot()
def _step_bw(self):
self.step.emit(-1)
def _get_time_lbl_text(self, frame_index, frame_time):
cur_time = '%02d:%02d' % divmod(frame_time, 60)
duration = '%02d:%02d' % divmod(self._scene_duration, 60)
return '%s / %s (%d @ %.4gHz)' % (cur_time, duration, frame_index, self._framerate)
def _adjust_time_label_size(self):
# Make the time label flexible again
self._time_lbl.setMinimumSize(0, 0)
self._time_lbl.setMaximumSize(0xffffff, 0xffffff)
# Set the label to its largest possible content (last frame)
last_frame_index = int(math.ceil(self._scene_duration * self._framerate))
text = self._get_time_lbl_text(last_frame_index, self._scene_duration)
self._time_lbl.setText(text)
# Probe the occupied size and make it fixed for the current scene
hint = self._time_lbl.sizeHint()
self._time_lbl.setFixedSize(hint)
@QtCore.Slot(dict)
def set_scene_metadata(self, cfg):
self._scene_duration = cfg['duration']
self._framerate = Fraction(*cfg['framerate'])
self._slider.setRange(0, self._scene_duration * self.SLIDER_TIMEBASE)
self._adjust_time_label_size()
self._refresh()
@QtCore.Slot(int, float)
def set_frame_time(self, frame_index, frame_time):
self._frame_index = frame_index
self._refresh()
def _refresh(self):
t = self._frame_index / self._framerate
text = self._get_time_lbl_text(self._frame_index, t)
self._time_lbl.setText(text)
if not self._slider_dragged:
self._slider.setValue(int(t * self.SLIDER_TIMEBASE))
| 33.835938
| 93
| 0.684138
|
287fa01be037648aa80ebc8b6afbf29035919428
| 4,001
|
py
|
Python
|
clients_1.2/test/test_verify_installation.py
|
MetaAnalyticsAdmin/meta-analytics
|
9a8408d3e414e837d84939ad711d0ae9f83c46ed
|
[
"Apache-2.0"
] | 5
|
2020-05-04T17:02:35.000Z
|
2022-01-13T17:41:09.000Z
|
clients_1.2/test/test_verify_installation.py
|
MetaAnalytics/meta-analytics
|
9a8408d3e414e837d84939ad711d0ae9f83c46ed
|
[
"Apache-2.0"
] | null | null | null |
clients_1.2/test/test_verify_installation.py
|
MetaAnalytics/meta-analytics
|
9a8408d3e414e837d84939ad711d0ae9f83c46ed
|
[
"Apache-2.0"
] | null | null | null |
import sys
import alphaqubo_client as aq
from alphaqubo_client.rest import ApiException
import dimod
from dimod.core.sampler import Sampler
from dimod.sampleset import SampleSet
from six import iteritems
import pathlib
import os.path
def read_alphaqubo_bqm(filename:str):
lines = []
with open(filename, "rt") as myfile:
for line in myfile:
lines.append(line.rstrip('\n'))
Q = {}
for line in lines:
p = line.split()
if len(p) > 0:
if p[0] == 'c':
continue
elif p[0] == 'p':
nVars = int(p[1])
else:
if len(p) == 3:
i = int(p[0]) - 1
j = int(p[1]) - 1
w = float(p[2])
if i != j:
w *= 2
Q[(i, j)] = w
bqm = dimod.BinaryQuadraticModel.from_qubo(Q)
return bqm
class AlphaQuboSampler(Sampler):
properties = None
parameters = None
def __init__(self):
self.properties = {}
self.parameters = {'time_limit': [],
'accuracy_min': [],
'greediness': [],
'maximize': [] }
def sample(self, bqm, api_instance, time_limit=60, accuracy_min=5, greediness=0.0, maximize=False):
n = len(bqm.variables)
if n == 0:
return SampleSet.from_samples([], bqm.vartype, energy=[])
linear = bqm.linear
inverse_mapping = dict(enumerate(linear))
mapping = {v: i for i, v in iteritems(inverse_mapping)}
bqm2 = bqm.relabel_variables(mapping, inplace=False)
body = aq.SolverAPI()
body.num_vars = n
if maximize:
body.min_max = 1
else:
body.min_max = 0
body.timeout = time_limit
body.inputs = []
variables = sorted(bqm2.iter_variables())
for idx, u in enumerate(variables):
for v in variables[idx:]:
if u == v and bqm2.linear[u]:
body.inputs.append(aq.Points(x=u + 1, y=u + 1, z=bqm2.linear[u]))
for idx, u in enumerate(variables):
for v in variables[idx:]:
if u in bqm2.adj[v]:
body.inputs.append(aq.Points(x=u + 1, y=v + 1, z=bqm2.adj[u][v] / 2))
body.non_zero = len(body.inputs)
body.parameters = "-am " + str(accuracy_min)
try:
# Use the inputs to define a QUBO and solve it synchronously.
api_response = api_instance.api_qubo_solve_qubo_post(body=body)
except ApiException as e:
print("Exception when calling QuboApi->api_qubo_solve_qubo_post: %s\n" % e)
samples = []
energies = []
samples.append( api_response.solved_result )
energies.append( api_response.solved_value )
response = SampleSet.from_samples_bqm((samples, list(bqm.variables)), bqm)
return response
def main():
try:
solver_host = "http://localhost:44336"
n = len(sys.argv)
if n > 1:
solver_host = sys.argv[1]
test_dir = pathlib.Path(__file__).parent.resolve()
test_data_file = test_dir.parent.as_posix() + "/data/bqp2500_1.txt"
if not os.path.isfile(test_data_file):
print("Unable to find test data file : " + test_data_file)
return
bqm = read_alphaqubo_bqm(test_data_file)
print("Size of BQM: ", len(bqm))
print("running AlphaQUBO solver")
configuration = aq.Configuration()
configuration.debug = False
configuration.host = solver_host
api_instance = aq.QuboApi(aq.ApiClient(configuration))
time, response = AlphaQuboSampler().sample(bqm, api_instance, maximize=True, time_limit=15)
except Exception as e:
print(e)
if __name__ == "__main__":
main()
| 32.528455
| 103
| 0.543864
|
236866f23333e6e8092bfd5f34ce65e4c7e57ee1
| 5,413
|
py
|
Python
|
src/api/dataflow/shared/api/modules/test/test_call_stream.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 84
|
2021-06-30T06:20:23.000Z
|
2022-03-22T03:05:49.000Z
|
src/api/dataflow/shared/api/modules/test/test_call_stream.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 7
|
2021-06-30T06:21:16.000Z
|
2022-03-29T07:36:13.000Z
|
src/api/dataflow/shared/api/modules/test/test_call_stream.py
|
Chromico/bk-base
|
be822d9bbee544a958bed4831348185a75604791
|
[
"MIT"
] | 40
|
2021-06-30T06:21:26.000Z
|
2022-03-29T12:42:26.000Z
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making BK-BASE 蓝鲸基础平台 available.
Copyright (C) 2021 THL A29 Limited, a Tencent company. All rights reserved.
BK-BASE 蓝鲸基础平台 is licensed under the MIT License.
License for BK-BASE 蓝鲸基础平台:
--------------------------------------------------------------------
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
documentation files (the "Software"), to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial
portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT
LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN
NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import random
from .base import BaseTest
class TestStream(BaseTest):
def jobs(self):
data = {
"job_id": "default_job_id",
"default_job_id": random.choice(["ACTIVE", None]),
"jar_name": "master-0.0.1.jar",
}
return self.success_response(data)
def jobs_submit(self):
data = {"operate_info": {"operate": "start"}}
return self.success_response(data)
def jobs_cancel(self):
data = {"operate_info": {"operate": "stop"}}
return self.success_response(data)
def processings(self):
data = {
"processing_id": "591_stream_test_abc",
"result_table_ids": ["591_stream_test_abc"],
"heads": ["591_stream_test_abc"],
"tails": ["591_stream_test_abc"],
}
return self.success_response(data)
def debugs(self):
data = {"debug_id": 1200}
return self.success_response(data)
def get_debug_basic_info(self):
data = {
"debug_id": 1200,
"result_tables": {
"123_parser": {"output_total_count": 2190, "warning_count": 0},
"123_filter": {"output_total_count": 200, "warning_count": 1},
},
"debug_error": {
# "error_result_table_id": "123_filter"
},
}
return self.success_response(data)
def get_debug_node_info(self):
data = {
"debug_id": 1200,
"debug_errcode": {
"error_code": 101,
"error_message": "在【123_test】中的字段【aaa】转换失败",
},
"debug_metric": {
"input_total_count": 45210,
"output_total_count": 2,
"filter_discard_count": 0,
"filter_discard_rate": 0,
"transformer_discard_count": 45210,
"transformer_discard_rate": 1,
"aggregator_discard_count": 0,
"aggregator_discard_rate": 0,
"metric_info": ["数据过滤丢弃率0%", "数据转换丢弃率100%", "数据聚合丢弃率0%"],
"warning_info": ["【警告】数据转换丢失率为100%"],
},
"debug_data": {
"result_data": [
{
"dtEventTime": "2017-10-18 20:36:04",
"ip": "x.x.x.x",
"cc_set": "test",
"cc_module": "test",
},
{
"dtEventTime": "2017-10-18 20:36:04",
"ip": "x.x.x.x",
"cc_set": "test",
"cc_module": "test",
},
],
"discard_data": {
"filter": [
{
"dtEventTime": "2017-10-18 20:36:04",
"ip": "1x.x.x.x",
"cc_set": "test",
"cc_module": "test",
},
{
"dtEventTime": "2017-10-18 20:36:04",
"ip": "x.x.x.x",
"cc_set": "test",
"cc_module": "test",
},
],
"transformer": [
{
"dtEventTime": "2017-10-18 20:36:04",
"ip": "x.x.x.x",
"cc_set": "test",
"cc_module": "test",
}
],
"aggregator": [
{
"dtEventTime": "2017-10-18 20:36:04",
"ip": "x.x.x.x",
"cc_set": "test",
"cc_module": "test",
}
],
},
},
}
return self.success_response(data)
| 38.119718
| 111
| 0.477185
|
41612c58b2d0788a239aae6a1703c4040a566d85
| 11,858
|
py
|
Python
|
mainwindow.py
|
nsw42/pijuui
|
fee9b96ceaab6b4f5db7ea41cb86da9a1a3b9eac
|
[
"BSD-2-Clause"
] | 1
|
2021-12-20T15:02:54.000Z
|
2021-12-20T15:02:54.000Z
|
mainwindow.py
|
nsw42/pijuui
|
fee9b96ceaab6b4f5db7ea41cb86da9a1a3b9eac
|
[
"BSD-2-Clause"
] | null | null | null |
mainwindow.py
|
nsw42/pijuui
|
fee9b96ceaab6b4f5db7ea41cb86da9a1a3b9eac
|
[
"BSD-2-Clause"
] | null | null | null |
import logging
import os.path
import gi
gi.require_version('Gtk', '3.0')
from gi.repository import Gtk # noqa: E402 # need to call require_version before we can call this
from gi.repository import Gdk # noqa: E402 # need to call require_version before we can call this
from gi.repository import GdkPixbuf # noqa: E402 # need to call require_version before we can call this
gi.require_version('Pango', '1.0')
from gi.repository import Pango # noqa: E402 # need to call require_version before we can call this
from jsonrpc import JsonRPC # noqa: E402 # libraries before local imports
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 480
MAX_IMAGE_SIZE = 300
def load_local_image(icon_name, icon_size):
leafname = icon_name
if icon_size:
leafname += '_%u' % icon_size
leafname += '.png'
icon_filename = os.path.join(os.path.dirname(__file__), leafname)
assert os.path.exists(icon_filename)
return Gtk.Image.new_from_file(icon_filename)
def set_font(label, weight, font_size, colour):
context = label.create_pango_context()
font_desc = context.get_font_description()
font_desc.set_family('sans')
font_desc.set_weight(weight)
font_desc.set_size(font_size * Pango.SCALE)
label.override_font(font_desc)
label.modify_fg(Gtk.StateType.NORMAL, colour)
class MainWindow(Gtk.ApplicationWindow):
"""
Main application window
"""
def __init__(self,
jsonrpc: JsonRPC,
full_screen: bool,
fixed_layout: bool,
show_close_button: bool,
hide_mouse_pointer: bool):
Gtk.Window.__init__(self, title="PiJu")
self.connect("destroy", self.on_quit)
self.jsonrpc = jsonrpc
if full_screen:
self.fullscreen()
else:
self.set_size_request(SCREEN_WIDTH, SCREEN_HEIGHT)
self.play_icon = None
self.pause_icon = None
self.current_image_uri = None
def mk_label(justification=Gtk.Justification.LEFT,
large=True):
label = Gtk.Label()
label.set_hexpand(True)
label.set_vexpand(True)
label.set_line_wrap(True)
label.set_justify(justification)
set_font(label,
Pango.Weight.BOLD if large else Pango.Weight.NORMAL,
32 if large else 24,
Gdk.Color.from_floats(0.0, 0.0, 0.0) if large else Gdk.Color.from_floats(0.3, 0.3, 0.3))
return label
self.artwork = Gtk.Image()
self.artwork.set_hexpand(False)
self.artwork.set_vexpand(False)
self.artwork.set_size_request(MAX_IMAGE_SIZE, MAX_IMAGE_SIZE)
self.track_name_label = mk_label(large=True)
self.artist_label = mk_label(large=False)
self.prev_button = Gtk.Button()
self.prev_button.connect('clicked', self.on_previous)
self.play_pause_button = Gtk.Button()
self.next_button = Gtk.Button()
self.next_button.connect('clicked', self.on_next)
self.prev_button.set_halign(Gtk.Align.START)
self.play_pause_button.set_halign(Gtk.Align.CENTER)
self.next_button.set_halign(Gtk.Align.END)
for button in (self.prev_button, self.play_pause_button, self.next_button):
button.set_valign(Gtk.Align.CENTER)
self.play_pause_button.connect('clicked', self.on_play_pause)
self.play_pause_action = None
close_icon = load_local_image('window-close-solid', 0)
close = Gtk.Button()
close.set_image(close_icon)
close.connect('clicked', self.on_quit)
# image track
# .. artist
# prev play/pause next
if fixed_layout:
self.no_track_label = mk_label(justification=Gtk.Justification.CENTER, large=False)
fixed_container = Gtk.Fixed.new()
x_padding = 10
y0_padding = 10
label_h = MAX_IMAGE_SIZE / 2
fixed_container.put(self.artwork, x_padding, y0_padding)
track_artist_x0 = x_padding + MAX_IMAGE_SIZE + x_padding
fixed_container.put(self.track_name_label, track_artist_x0, y0_padding)
artist_y0 = y0_padding + label_h + y0_padding
fixed_container.put(self.artist_label, track_artist_x0, artist_y0)
for label in (self.track_name_label, self.artist_label):
label.set_size_request(SCREEN_WIDTH - track_artist_x0 - x_padding,
label_h)
no_track_label_w = 200
fixed_container.put(self.no_track_label,
(SCREEN_WIDTH - no_track_label_w) / 2,
150)
self.no_track_label.set_size_request(no_track_label_w, 32)
# buttons
# image is 100x100; button padding takes it to 112x110
# (on macOS, at least)
# SPC IMG 2xSPC IMG 2xSPC IMG SPC
# 6xSPC + 3xIMG = SCREEN_WIDTH
# => SPC = (SCREEN_WIDTH - 3*IMG) / 6
img_button_w = 112
img_button_h = 110
y1_padding = 20
button_y0 = SCREEN_HEIGHT - y1_padding - img_button_h
button_x_padding = (SCREEN_WIDTH - 3 * img_button_w) / 6
fixed_container.put(self.prev_button, button_x_padding, button_y0)
fixed_container.put(self.play_pause_button, (SCREEN_WIDTH - img_button_w) / 2, button_y0)
fixed_container.put(self.next_button, SCREEN_WIDTH - button_x_padding - img_button_w, button_y0)
self.add(fixed_container)
else:
self.no_track_label = self.artist_label
track_artist_container = Gtk.Box.new(Gtk.Orientation.VERTICAL, 10)
track_artist_container.pack_start(self.track_name_label, expand=True, fill=True, padding=10)
track_artist_container.pack_start(self.artist_label, expand=True, fill=True, padding=10)
top_row_container = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 10)
top_row_container.pack_start(self.artwork, expand=False, fill=False, padding=10)
top_row_container.pack_start(track_artist_container, expand=True, fill=True, padding=10)
top_row_container.set_valign(Gtk.Align.START)
bottom_row_container = Gtk.Box.new(Gtk.Orientation.HORIZONTAL, 10)
bottom_row_container.pack_start(self.prev_button, expand=True, fill=False, padding=10)
bottom_row_container.pack_start(self.play_pause_button, expand=True, fill=False, padding=10)
bottom_row_container.pack_start(self.next_button, expand=True, fill=False, padding=10)
bottom_row_container.set_valign(Gtk.Align.START)
child_container = Gtk.Box.new(Gtk.Orientation.VERTICAL, 10)
child_container.pack_start(top_row_container, expand=True, fill=True, padding=10)
child_container.pack_end(bottom_row_container, expand=True, fill=False, padding=10)
if show_close_button:
overlay = Gtk.Overlay()
overlay.add(child_container)
top_right = Gtk.Alignment.new(1, 0, 0, 0)
top_right.add(close)
overlay.add_overlay(top_right)
overlay.set_overlay_pass_through(top_right, True)
self.add(overlay)
else:
self.add(child_container)
self.hide_mouse_pointer = hide_mouse_pointer
self.connect('realize', self.on_realized)
def on_next(self, *args):
self.jsonrpc.request('core.playback.next')
def on_play_pause(self, *args):
if self.play_pause_action:
self.jsonrpc.request(self.play_pause_action)
def on_previous(self, *args):
self.jsonrpc.request('core.playback.previous')
def on_quit(self, *args):
Gtk.main_quit()
def on_realized(self, *args):
if self.hide_mouse_pointer:
self.get_window().set_cursor(Gdk.Cursor(Gdk.CursorType.BLANK_CURSOR))
logging.debug("Main window realized: allocated size %ux%u",
self.get_allocated_width(), self.get_allocated_height())
icon_size = 200 if (self.get_allocated_width() > 1000) else 100
self.pause_icon = load_local_image('pause-solid', icon_size)
self.play_icon = load_local_image('play-solid', icon_size)
prev_icon = load_local_image('backward-solid', icon_size)
self.prev_button.set_image(prev_icon)
next_icon = load_local_image('forward-solid', icon_size)
self.next_button.set_image(next_icon)
def show_now_playing(self, connection_error, now_playing):
if connection_error:
self.artist_label.hide()
self.track_name_label.hide()
self.artwork.hide()
self.no_track_label.show()
self.no_track_label.set_label("Connection error")
self.play_pause_button.set_image(self.play_icon)
self.prev_button.set_sensitive(False)
self.play_pause_button.set_sensitive(False)
self.next_button.set_sensitive(False)
else:
if now_playing.is_track:
self.no_track_label.hide()
self.artist_label.set_label(now_playing.artist_name or 'Unknown artist')
self.track_name_label.set_label(now_playing.track_name or 'Unknown track')
self.artist_label.show()
self.track_name_label.show()
else:
self.artist_label.hide()
self.track_name_label.hide()
self.no_track_label.set_label('No track')
self.no_track_label.show()
if now_playing.image_uri != self.current_image_uri:
logging.debug("Updating image display")
if now_playing.image:
loader = GdkPixbuf.PixbufLoader()
loader.write(now_playing.image)
pixbuf = loader.get_pixbuf()
loader.close()
if (now_playing.image_width > MAX_IMAGE_SIZE) or (now_playing.image_height > MAX_IMAGE_SIZE):
if now_playing.image_width > now_playing.image_height:
dest_width = MAX_IMAGE_SIZE
dest_height = now_playing.image_height * dest_width / now_playing.image_width
else:
dest_height = MAX_IMAGE_SIZE
dest_width = now_playing.image_width * dest_height / now_playing.image_height
pixbuf = pixbuf.scale_simple(dest_width, dest_height, GdkPixbuf.InterpType.BILINEAR)
self.artwork.set_from_pixbuf(pixbuf)
self.artwork.show()
else:
self.artwork.hide()
self.current_image_uri = now_playing.image_uri
if now_playing.current_state == 'playing':
self.play_pause_button.set_image(self.pause_icon)
self.play_pause_action = 'core.playback.pause'
else:
self.play_pause_button.set_image(self.play_icon)
self.play_pause_action = 'core.playback.play'
if now_playing.track_number:
self.prev_button.set_sensitive(now_playing.track_number > 1)
self.play_pause_button.set_sensitive(True)
self.next_button.set_sensitive((now_playing.album_tracks is not None)
and (now_playing.track_number < now_playing.album_tracks))
else:
self.prev_button.set_sensitive(False)
self.play_pause_button.set_sensitive(False)
self.next_button.set_sensitive(False)
| 44.246269
| 113
| 0.629195
|
6a0b832897810bcf59c5e5969c37d2fe8171560f
| 554
|
py
|
Python
|
mysite/polls/urls.py
|
liuhy-xly/test_dev05
|
a987eab25763e1fdd641fed5494d6b5b7c0f13fa
|
[
"Apache-2.0"
] | null | null | null |
mysite/polls/urls.py
|
liuhy-xly/test_dev05
|
a987eab25763e1fdd641fed5494d6b5b7c0f13fa
|
[
"Apache-2.0"
] | null | null | null |
mysite/polls/urls.py
|
liuhy-xly/test_dev05
|
a987eab25763e1fdd641fed5494d6b5b7c0f13fa
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# @Time : 2021/7/12 10:04
# @Author : alvin
# @File : urls.py
# @Software: PyCharm
from django.urls import path
from . import views
#真正的Django项目中,可能有多个应用程序,Django如何区分它们之间的URL名称,向您
# 的URLconf添加名称空间,app_name要设置应用程序命名空间,模板文件需要同步加上名称空间
app_name = 'polls'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:question_id>/', views.detail, name='detail'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
path('<int:question_id>/vote/', views.vote, name='vote'),
]
| 34.625
| 75
| 0.6787
|
87aca9c8dc7084a2f8f848b6d939f29d7c1ba0df
| 673
|
py
|
Python
|
Exercicios em Python/ex070.py
|
Raphael-Azevedo/Exercicios_Python
|
dece138f38edd02b0731aed78e44acccb021b3cb
|
[
"MIT"
] | null | null | null |
Exercicios em Python/ex070.py
|
Raphael-Azevedo/Exercicios_Python
|
dece138f38edd02b0731aed78e44acccb021b3cb
|
[
"MIT"
] | null | null | null |
Exercicios em Python/ex070.py
|
Raphael-Azevedo/Exercicios_Python
|
dece138f38edd02b0731aed78e44acccb021b3cb
|
[
"MIT"
] | null | null | null |
soma = count = p = 0
while True:
print('-' * 30)
print(' CAIXA ')
print('-' * 30)
produto = str(input('Produto: '))
preco = float(input('Preço: RS$ '))
soma += preco
if preco >= 1000:
count += 1
if p == 0 or preco < p:
p = preco
prod = produto
s = str(input('Deseja continuar? [S/N] ')).strip().upper()[0]
while s not in 'SN':
s = str(input('Deseja continuar? [S/N] ')).strip().upper()[0]
if s == 'N':
break
print(f'O total gasto na compra é RS${soma:.2f}'
f'Foram {count} produtos com preço acima de RS$1000,00'
f'O produto mais barato é {prod} que custa RS${p:.2f}')
| 30.590909
| 69
| 0.523031
|
8b97f171deb58d4364d6822032e3124e1940edd9
| 279
|
py
|
Python
|
conftest.py
|
ljnath/PySnakeAndLadder
|
d817b7520bc7e4ce0fc65b66a4a412f2951afb49
|
[
"MIT"
] | null | null | null |
conftest.py
|
ljnath/PySnakeAndLadder
|
d817b7520bc7e4ce0fc65b66a4a412f2951afb49
|
[
"MIT"
] | null | null | null |
conftest.py
|
ljnath/PySnakeAndLadder
|
d817b7520bc7e4ce0fc65b66a4a412f2951afb49
|
[
"MIT"
] | null | null | null |
"""
Empty python file to fix pytest relative module import issue
ref: https://stackoverflow.com/questions/10253826/path-issue-with-pytest-
importerror-no-module-named-yadayadayada
https://stackoverflow.com/questions/34466027/in-pytest-what-is-the-use-of-
conftest-py-files
"""
| 27.9
| 74
| 0.795699
|
048640e59974850b15625849cce8bb9c8a8e3e2b
| 2,816
|
py
|
Python
|
Rock Paper Scissors Python Game/rock_paper_scissors_python_game.py
|
umangtank/ML-CaPsule
|
e0a72c7f9f0e5733ec05f28ba1444d30276a731a
|
[
"MIT"
] | null | null | null |
Rock Paper Scissors Python Game/rock_paper_scissors_python_game.py
|
umangtank/ML-CaPsule
|
e0a72c7f9f0e5733ec05f28ba1444d30276a731a
|
[
"MIT"
] | null | null | null |
Rock Paper Scissors Python Game/rock_paper_scissors_python_game.py
|
umangtank/ML-CaPsule
|
e0a72c7f9f0e5733ec05f28ba1444d30276a731a
|
[
"MIT"
] | null | null | null |
#import randint from random module
from random import randint
#moves of the game
moves = ["r", "p", "s"]
player_score = 0
computer_score = 0
#start the game
x = int(input("(1)Start. (2)End the game. \n"))
if x != 1 and x != 2:
x = int(input("Invalid input! Please enter '1' or '2': \n"))
if x == 1:
while True:
#randomly selects option for computer
computer_moves = moves[randint(0, 2)]
#ask for player move
player_moves = input("(r)Rock. (p)Paper. (s)Scissors. (e)End the game. \n")
#if player and computer moves are equal it's a tie
if player_moves == computer_moves:
print("Computer chose the same move. It's a tie! Please play again!")
continue
elif player_moves == "r":
if computer_moves == "p":
computer_score+=1
print("Computer chose paper. You lose! your score:",player_score, "computer score:",computer_score)
continue
elif computer_moves == "s":
player_score+=1
print("Computer chose scissors. You win! your score:",player_score, "computer score:",computer_score)
continue
elif player_moves == "p":
if computer_moves == "s":
computer_score+=1
print("Computer chose scissors. You lose! your score:",player_score, "computer score:",computer_score)
continue
elif computer_moves == "r":
player_score+=1
print("Computer chose rock. You win! your score:",player_score, "computer score:",computer_score)
continue
elif player_moves == "s":
if computer_moves == "r":
computer_score+=1
print("Computer chose rock. You lose! your score:",player_score, "computer score:",computer_score)
continue
elif computer_moves == "p":
player_score+=1
print("Computer chose paper. You win! your score:",player_score, "computer score:",computer_score)
continue
#quit the game
elif player_moves == "e":
exit()
#if the player entered a wrong letter raise an error
else:
print("Invalid input! Please enter 'r','p','s' or 'e': ")
continue
#quit the game
elif x == 2:
exit()
| 41.411765
| 127
| 0.480114
|
9c6ef71dc3cf0f2eed548fccf4bed75923334249
| 4,398
|
py
|
Python
|
electrumsv/gui/qt/qrcodewidget.py
|
electrumsv/electrumsv
|
a2d9027ccec338cadfca778888e6ef7f077b1651
|
[
"MIT"
] | 136
|
2019-01-10T15:49:09.000Z
|
2022-02-20T04:46:39.000Z
|
electrumsv/gui/qt/qrcodewidget.py
|
electrumsv/electrumsv
|
a2d9027ccec338cadfca778888e6ef7f077b1651
|
[
"MIT"
] | 790
|
2019-01-07T01:53:35.000Z
|
2022-03-30T23:04:28.000Z
|
electrumsv/gui/qt/qrcodewidget.py
|
electrumsv/electrumsv
|
a2d9027ccec338cadfca778888e6ef7f077b1651
|
[
"MIT"
] | 65
|
2019-01-10T23:55:30.000Z
|
2021-12-19T06:47:13.000Z
|
import os
from typing import Optional
from PyQt5.QtCore import pyqtSignal, QEvent, Qt
from PyQt5.QtGui import QColor, QCursor, QMouseEvent, QPainter, QPaintEvent
from PyQt5.QtWidgets import (
QApplication, QVBoxLayout, QTextEdit, QHBoxLayout, QPushButton, QWidget)
import qrcode
from electrumsv.i18n import _
from electrumsv.app_state import app_state, get_app_state_qt
from .util import WindowModalDialog
class QRCodeWidget(QWidget):
mouse_release_signal = pyqtSignal()
def __init__(self, data: Optional[str]=None, fixedSize: int=0) -> None:
QWidget.__init__(self)
self.data: Optional[str] = None
self.qr: Optional[qrcode.QRCode] = None
self.fixedSize=fixedSize
if fixedSize:
self.setFixedSize(fixedSize, fixedSize)
self.setData(data)
def enterEvent(self, event: QEvent) -> None:
get_app_state_qt().app_qt.setOverrideCursor(QCursor(Qt.CursorShape.PointingHandCursor))
super().enterEvent(event)
def leaveEvent(self, event: QEvent) -> None:
get_app_state_qt().app_qt.setOverrideCursor(QCursor(Qt.CursorShape.ArrowCursor))
super().leaveEvent(event)
def mouseReleaseEvent(self, event: QMouseEvent) -> None:
self.mouse_release_signal.emit()
super().mouseReleaseEvent(event)
def setData(self, data: Optional[str]) -> None:
if self.data != data:
self.data = data
if self.data:
self.qr = qrcode.QRCode()
self.qr.add_data(self.data)
if not self.fixedSize:
k = len(self.qr.get_matrix())
self.setMinimumSize(k*5,k*5)
else:
self.qr = None
self.update()
def paintEvent(self, event: QPaintEvent) -> None:
if not self.data:
return
black = QColor(0, 0, 0, 255)
white = QColor(255, 255, 255, 255)
if not self.qr:
qp = QPainter()
qp.begin(self)
qp.setBrush(white)
qp.setPen(white)
rect = qp.viewport()
qp.drawRect(0, 0, rect.width(), rect.height())
qp.end()
return
matrix = self.qr.get_matrix()
k = len(matrix)
qp = QPainter()
qp.begin(self)
rect = qp.viewport()
margin = 10
framesize = min(rect.width(), rect.height())
boxsize = int( (framesize - 2*margin)/k )
size = k*boxsize
left = (rect.width() - size)//2
top = (rect.height() - size)//2
# Make a white margin around the QR in case of dark theme use
qp.setBrush(white)
qp.setPen(white)
qp.drawRect(left-margin, top-margin, size+(margin*2), size+(margin*2))
qp.setBrush(black)
qp.setPen(black)
for rv in range(k):
for c in range(k):
if matrix[rv][c]:
qp.drawRect(left+c*boxsize, top+rv*boxsize, boxsize - 1, boxsize - 1)
qp.end()
class QRDialog(WindowModalDialog):
def __init__(self, data: str, parent: Optional[QWidget]=None, title: str = "",
show_text: bool=False) -> None:
WindowModalDialog.__init__(self, parent, title)
vbox = QVBoxLayout()
qrw = QRCodeWidget(data)
qscreen = QApplication.primaryScreen()
vbox.addWidget(qrw, 1)
if show_text:
text = QTextEdit()
text.setText(data)
text.setReadOnly(True)
vbox.addWidget(text)
hbox = QHBoxLayout()
hbox.addStretch(1)
filename = os.path.join(app_state.config.path, "qrcode.png")
def print_qr() -> None:
pixmap = qrw.grab()
pixmap.save(filename, 'png')
self.show_message(_("QR code saved to file") + " " + filename)
def copy_to_clipboard() -> None:
pixmap = qrw.grab()
QApplication.clipboard().setPixmap(pixmap)
self.show_message(_("QR code copied to clipboard"))
b = QPushButton(_("Copy"))
hbox.addWidget(b)
b.clicked.connect(copy_to_clipboard)
b = QPushButton(_("Save"))
hbox.addWidget(b)
b.clicked.connect(print_qr)
b = QPushButton(_("Close"))
hbox.addWidget(b)
b.clicked.connect(self.accept)
b.setDefault(True)
vbox.addLayout(hbox)
self.setLayout(vbox)
| 30.331034
| 95
| 0.588677
|
47f44b99f8945d52df1c7e2be713ff30a435860b
| 358
|
py
|
Python
|
Python Files/utilities.py
|
ThermoNuclearPanda/Project_Automail
|
ebefb36879f01659c09e9d11aaf772e343dea283
|
[
"MIT"
] | 8
|
2016-03-25T05:56:49.000Z
|
2021-01-14T19:52:58.000Z
|
Python Files/utilities.py
|
ThermoNuclearPanda/Project_Automail
|
ebefb36879f01659c09e9d11aaf772e343dea283
|
[
"MIT"
] | 3
|
2016-07-10T23:22:45.000Z
|
2018-02-02T14:43:58.000Z
|
Python Files/utilities.py
|
ThermoNuclearPanda/Project_Automail
|
ebefb36879f01659c09e9d11aaf772e343dea283
|
[
"MIT"
] | 4
|
2016-07-10T23:23:00.000Z
|
2022-03-19T06:01:45.000Z
|
"""
@Author: Kiran Gurajala & Alex Lee
@Project: Project Automail
@Version: 1.0
"""
# Required imports
import struct
# Utils
def pack(fmt, *args):
return struct.pack('<' + fmt, *args)
def unpack(fmt, *args):
return struct.unpack('<' + fmt, *args)
def multichr(values):
return ''.join(map(chr, values))
def multiord(values):
return map(ord, values)
| 15.565217
| 39
| 0.673184
|
3377c80e4554b94a92f673074913e6c634a7782b
| 48,645
|
py
|
Python
|
Scripted/CIP_PAARatio/CIP_PAARatio.py
|
pieper/SlicerCIP
|
115959155cdb5f6bfbeffc6c1f794fcb6bdeb509
|
[
"BSD-3-Clause"
] | null | null | null |
Scripted/CIP_PAARatio/CIP_PAARatio.py
|
pieper/SlicerCIP
|
115959155cdb5f6bfbeffc6c1f794fcb6bdeb509
|
[
"BSD-3-Clause"
] | null | null | null |
Scripted/CIP_PAARatio/CIP_PAARatio.py
|
pieper/SlicerCIP
|
115959155cdb5f6bfbeffc6c1f794fcb6bdeb509
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import logging
from collections import OrderedDict
import vtk, qt, ctk, slicer
from slicer.ScriptedLoadableModule import *
from CIP.logic.SlicerUtil import SlicerUtil
from CIP.logic import Util
from CIP.ui import CaseReportsWidget
#
# CIP_PAARatio
#
class CIP_PAARatio(ScriptedLoadableModule):
"""Uses ScriptedLoadableModule base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
def __init__(self, parent):
ScriptedLoadableModule.__init__(self, parent)
self.parent.title = "PAA Ratio"
self.parent.categories = SlicerUtil.CIP_ModulesCategory
self.parent.dependencies = [SlicerUtil.CIP_ModuleName]
self.parent.contributors = ["Jorge Onieva (jonieva@bwh.harvard.edu)", "Applied Chest Imaging Laboratory", "Brigham and Women's Hospital"]
self.parent.helpText = """Calculate the ratio between pulmonary arterial and aorta.<br>
A quick tutorial of the module can be found <a href='https://chestimagingplatform.org/files/chestimagingplatform/files/paa_ratio.pdf'>here</a>.<br><br>
The PAA Ratio biomarker has been proved to predict acute exacerbations of COPD (Wells, J. M., Washko, G. R.,
Han, M. K., Abbas, N., Nath, H., Mamary, a. J., Dransfield, M. T. (2012).
Pulmonary Arterial Enlargement and Acute Exacerbations of COPD. New England Journal of Medicine, 367(10), 913-921).
For more information refer to: <a href='http://www.nejm.org/doi/full/10.1056/NEJMoa1203830'>http://www.nejm.org/doi/full/10.1056/NEJMoa1203830</a>"""
self.parent.acknowledgementText = SlicerUtil.ACIL_AcknowledgementText
#
# CIP_PAARatioWidget
#
class CIP_PAARatioWidget(ScriptedLoadableModuleWidget):
"""Uses ScriptedLoadableModuleWidget base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
@property
def moduleName(self):
return os.path.basename(__file__).replace(".py", "")
@property
def currentVolumeId(self):
return self.volumeSelector.currentNodeID
def __init__(self, parent):
ScriptedLoadableModuleWidget.__init__(self, parent)
self.moduleName = "CIP_PAARatio"
from functools import partial
def __onNodeAddedObserver__(self, caller, eventId, callData):
"""Node added to the Slicer scene"""
if callData.GetClassName() == 'vtkMRMLScalarVolumeNode' \
and slicer.util.mainWindow().moduleSelector().selectedModule == self.moduleName: # Current module visible
self.volumeSelector.setCurrentNode(callData)
SlicerUtil.changeContrastWindow(350, 40)
self.__onNodeAddedObserver__ = partial(__onNodeAddedObserver__, self)
self.__onNodeAddedObserver__.CallDataType = vtk.VTK_OBJECT
def setup(self):
"""This is called one time when the module GUI is initialized
"""
ScriptedLoadableModuleWidget.setup(self)
# Create objects that can be used anywhere in the module. Example: in most cases there should be just one
# object of the logic class
self.logic = CIP_PAARatioLogic()
#
# Create all the widgets. Example Area
mainAreaCollapsibleButton = ctk.ctkCollapsibleButton()
mainAreaCollapsibleButton.text = "Main parameters"
self.layout.addWidget(mainAreaCollapsibleButton)
self.mainAreaLayout = qt.QGridLayout(mainAreaCollapsibleButton)
self.label = qt.QLabel("Select the volume")
self.label.setStyleSheet("margin:10px 0 20px 7px")
self.mainAreaLayout.addWidget(self.label, 0, 0)
self.volumeSelector = slicer.qMRMLNodeComboBox()
self.volumeSelector.nodeTypes = ( "vtkMRMLScalarVolumeNode", "" )
self.volumeSelector.name = "paa_volumeSelector"
self.volumeSelector.selectNodeUponCreation = True
self.volumeSelector.autoFillBackground = True
self.volumeSelector.addEnabled = True
self.volumeSelector.noneEnabled = False
self.volumeSelector.removeEnabled = False
self.volumeSelector.showHidden = False
self.volumeSelector.showChildNodeTypes = False
self.volumeSelector.setMRMLScene( slicer.mrmlScene )
self.volumeSelector.setStyleSheet("margin:0px 0 0px 0; padding:2px 0 2px 5px")
self.mainAreaLayout.addWidget(self.volumeSelector, 0, 1)
self.jumptToTemptativeSliceButton = ctk.ctkPushButton()
self.jumptToTemptativeSliceButton.name = "jumptToTemptativeSliceButton"
self.jumptToTemptativeSliceButton.text = "Jump to temptative slice"
self.jumptToTemptativeSliceButton.toolTip = "Jump to the best estimated slice to place the rulers"
self.jumptToTemptativeSliceButton.setIcon(qt.QIcon("{0}/ruler.png".format(SlicerUtil.CIP_ICON_DIR)))
self.jumptToTemptativeSliceButton.setIconSize(qt.QSize(20, 20))
self.jumptToTemptativeSliceButton.setStyleSheet("font-weight: bold;")
# self.jumptToTemptativeSliceButton.setFixedWidth(140)
self.mainAreaLayout.addWidget(self.jumptToTemptativeSliceButton, 1, 1)
### Structure Selector
self.structuresGroupbox = qt.QGroupBox("Select the structure")
self.groupboxLayout = qt.QVBoxLayout()
self.structuresGroupbox.setLayout(self.groupboxLayout)
self.mainAreaLayout.addWidget(self.structuresGroupbox, 2, 0)
self.structuresButtonGroup=qt.QButtonGroup()
# btn = qt.QRadioButton("None")
# btn.visible = False
# self.structuresButtonGroup.addButton(btn)
# self.groupboxLayout.addWidget(btn)
btn = qt.QRadioButton("Both")
btn.name = "paaButton"
btn.checked = True
self.structuresButtonGroup.addButton(btn, 0)
self.groupboxLayout.addWidget(btn)
btn = qt.QRadioButton("Pulmonary Arterial")
btn.name = "paRadioButton"
self.structuresButtonGroup.addButton(btn, 1)
self.groupboxLayout.addWidget(btn)
btn = qt.QRadioButton("Aorta")
btn.name = "aortaRadioButton"
self.structuresButtonGroup.addButton(btn, 2)
self.groupboxLayout.addWidget(btn)
### Buttons toolbox
self.buttonsToolboxFrame = qt.QFrame()
self.buttonsToolboxLayout = qt.QGridLayout()
self.buttonsToolboxFrame.setLayout(self.buttonsToolboxLayout)
self.mainAreaLayout.addWidget(self.buttonsToolboxFrame, 2, 1)
self.placeRulersButton = ctk.ctkPushButton()
self.placeRulersButton.text = "Place ruler/s"
self.placeRulersButton.name = "placeRulersButton"
self.placeRulersButton.toolTip = "Place the ruler/s for the selected structure/s in the current slice"
self.placeRulersButton.setIcon(qt.QIcon("{0}/ruler.png".format(SlicerUtil.CIP_ICON_DIR)))
self.placeRulersButton.setIconSize(qt.QSize(20,20))
self.placeRulersButton.setFixedWidth(105)
self.placeRulersButton.setStyleSheet("font-weight:bold")
self.buttonsToolboxLayout.addWidget(self.placeRulersButton, 0, 0)
self.moveUpButton = ctk.ctkPushButton()
self.moveUpButton.text = "Move up"
self.moveUpButton.toolTip = "Move the selected ruler/s one slice up"
self.moveUpButton.setIcon(qt.QIcon("{0}/move_up.png".format(SlicerUtil.CIP_ICON_DIR)))
self.moveUpButton.setIconSize(qt.QSize(20,20))
self.moveUpButton.setFixedWidth(95)
self.buttonsToolboxLayout.addWidget(self.moveUpButton, 0, 1)
self.moveDownButton = ctk.ctkPushButton()
self.moveDownButton.text = "Move down"
self.moveDownButton.toolTip = "Move the selected ruler/s one slice down"
self.moveDownButton.setIcon(qt.QIcon("{0}/move_down.png".format(SlicerUtil.CIP_ICON_DIR)))
self.moveDownButton.setIconSize(qt.QSize(20,20))
self.moveDownButton.setFixedWidth(95)
self.buttonsToolboxLayout.addWidget(self.moveDownButton, 0, 2)
self.removeButton = ctk.ctkPushButton()
self.removeButton.text = "Remove ALL rulers"
self.removeButton.toolTip = "Remove all the rulers for this volume"
self.removeButton.setIcon(qt.QIcon("{0}/delete.png".format(SlicerUtil.CIP_ICON_DIR)))
self.removeButton.setIconSize(qt.QSize(20,20))
self.buttonsToolboxLayout.addWidget(self.removeButton, 1, 1, 1, 2, 2)
### Textboxes
self.textboxesFrame = qt.QFrame()
self.textboxesLayout = qt.QFormLayout()
self.textboxesFrame.setLayout(self.textboxesLayout)
self.textboxesFrame.setFixedWidth(190)
self.mainAreaLayout.addWidget(self.textboxesFrame, 3, 0)
self.paTextBox = qt.QLineEdit()
self.paTextBox.setReadOnly(True)
self.textboxesLayout.addRow("PA (mm): ", self.paTextBox)
self.aortaTextBox = qt.QLineEdit()
self.aortaTextBox.setReadOnly(True)
self.textboxesLayout.addRow("Aorta (mm): ", self.aortaTextBox)
self.ratioTextBox = qt.QLineEdit()
self.ratioTextBox.name = "ratioTextBox"
self.ratioTextBox.setReadOnly(True)
self.textboxesLayout.addRow("Ratio PA/A: ", self.ratioTextBox)
# Save case data
self.reportsCollapsibleButton = ctk.ctkCollapsibleButton()
self.reportsCollapsibleButton.text = "Reporting"
self.layout.addWidget(self.reportsCollapsibleButton)
self.reportsLayout = qt.QHBoxLayout(self.reportsCollapsibleButton)
self.storedColumnNames = ["caseId", "paDiameterMm", "aortaDiameterMm",
"pa1r", "pa1a", "pa1s", "pa2r", "pa2a", "pa2s",
"a1r", "a1a", "a1s", "a2r", "a2a", "a2s"]
columns = CaseReportsWidget.getColumnKeysNormalizedDictionary(self.storedColumnNames)
self.reportsWidget = CaseReportsWidget(self.moduleName, columns, parentWidget=self.reportsCollapsibleButton)
self.reportsWidget.setup()
# Init state
self.resetModuleState()
self.preventSavingState = False
self.saveStateBeforeEnteringModule()
self.preventSavingState = True
self.switchToRedView()
#####
# Case navigator
if SlicerUtil.isSlicerACILLoaded():
caseNavigatorAreaCollapsibleButton = ctk.ctkCollapsibleButton()
caseNavigatorAreaCollapsibleButton.text = "Case navigator"
self.layout.addWidget(caseNavigatorAreaCollapsibleButton, 0x0020)
# caseNavigatorLayout = qt.QVBoxLayout(caseNavigatorAreaCollapsibleButton)
# Add a case list navigator
from ACIL.ui import CaseNavigatorWidget
self.caseNavigatorWidget = CaseNavigatorWidget(self.moduleName, caseNavigatorAreaCollapsibleButton)
self.caseNavigatorWidget.setup()
self.layout.addStretch()
# Connections
self.observers = []
self.volumeSelector.connect('currentNodeChanged(vtkMRMLNode*)', self.onVolumeSelectorChanged)
self.jumptToTemptativeSliceButton.connect('clicked()', self.onJumpToTemptativeSliceButtonClicked)
self.placeRulersButton.connect('clicked()', self.onPlaceRulersClicked)
self.moveUpButton.connect('clicked()', self.onMoveUpRulerClicked)
self.moveDownButton.connect('clicked()', self.onMoveDownRulerClicked)
self.removeButton.connect('clicked()', self.onRemoveRulerClicked)
self.reportsWidget.addObservable(self.reportsWidget.EVENT_SAVE_BUTTON_CLICKED, self.onSaveReport)
# Init state
self.resetModuleState()
self.preventSavingState = False
self.saveStateBeforeEnteringModule()
self.preventSavingState = True
def enter(self):
"""This is invoked every time that we select this module as the active module in Slicer (not only the first time)"""
# activeVolumeId = SlicerUtil.getActiveVolumeIdInRedSlice()
# if activeVolumeId is not None:
# self.volumeSelector.setCurrentNodeID(activeVolumeId)
# if activeVolumeId not in self.logic.currentVolumesLoaded:
# self.placeDefaultRulers(activeVolumeId)
# Save state
self.saveStateBeforeEnteringModule()
# Start listening again to scene events
self.__addSceneObservables__()
volumeId = self.volumeSelector.currentNodeID
if volumeId:
SlicerUtil.displayBackgroundVolume(volumeId)
# Show the current rulers (if existing)
self.logic.rulersVisible(volumeId, visible=True)
# This module always works in Axial
SlicerUtil.changeLayoutToAxial()
self.changeToDefaultContrastLevel()
def exit(self):
"""This is invoked every time that we switch to another module (not only when Slicer is closed)."""
# Stop listening to Scene events
self.__removeSceneObservables()
# Hide rulers
if self.currentVolumeId:
self.logic.rulersVisible(self.currentVolumeId, False)
# Load previous state
self.restoreStateBeforeExitingModule()
def cleanup(self):
"""This is invoked as a destructor of the GUI when the module is no longer going to be used"""
self.__removeSceneObservables()
self.reportsWidget.cleanup()
self.reportsWidget = None
def saveStateBeforeEnteringModule(self):
"""Save the state of the module regarding labelmap, etc. This state will be saved/loaded when
exiting/entering the module
"""
if self.preventSavingState:
# Avoid that the first time that the module loads, the state is saved twice
self.preventSavingState = False
return
# Save existing layout
self.savedLayout = None
if slicer.app.layoutManager() is not None:
self.savedLayout = slicer.app.layoutManager().layout
# Get the active volume (it it exists)
activeVolumeId = SlicerUtil.getFirstActiveVolumeId()
if activeVolumeId is None:
# Reset state
self.resetModuleState()
else:
# There is a Volume loaded. Save state
try:
self.savedVolumeID = activeVolumeId
displayNode = SlicerUtil.getNode(activeVolumeId).GetDisplayNode()
self.savedContrastLevel = (displayNode.GetWindow(), displayNode.GetLevel())
# activeLabelmapId = SlicerUtil.getFirstActiveLabelmapId()
# self.savedLabelmapID = activeLabelmapId
# if activeLabelmapId is None:
# self.savedLabelmapOpacity = None
# else:
# self.savedLabelmapOpacity = SlicerUtil.getLabelmapOpacity()
# # Hide any labelmap
# SlicerUtil.displayLabelmapVolume(None)
except:
Util.print_last_exception()
# Not action really needed
pass
def restoreStateBeforeExitingModule(self):
"""Load the last state of the module when the user exited (labelmap, opacity, contrast window, etc.)
"""
try:
if self.savedVolumeID:
# There is a previously saved valid state.
SlicerUtil.setActiveVolumeIds(self.savedVolumeID)
SlicerUtil.changeContrastWindow(self.savedContrastLevel[0], self.savedContrastLevel[1])
# if self.savedLabelmapID:
# print "Restoring active labelmap: " + self.savedLabelmapID
# # There was a valid labelmap. Restore it
# SlicerUtil.displayLabelmapVolume(self.savedLabelmapID)
# # Restore previous opacity
# SlicerUtil.changeLabelmapOpacity(self.savedLabelmapOpacity)
# else:
# # Hide labelmap
# print "No labelmap saved. Hide all"
# SlicerUtil.displayLabelmapVolume(None)
# else:
# # Hide labelmap
# print "No volume saved. Hide labelmap"
# SlicerUtil.displayLabelmapVolume(None)
# Restore layout
SlicerUtil.changeLayout(self.savedLayout)
except:
Util.print_last_exception()
pass
def resetModuleState(self):
""" Reset all the module state variables
"""
self.savedVolumeID = None # Active grayscale volume ID
self.savedLabelmapID = None # Active labelmap node ID
self.savedLabelmapOpacity = None # Labelmap opacity
self.savedContrastLevel = (None, None) # Contrast window/level that the user had when entering the module
SlicerUtil.changeContrastWindow(350, 40)
def changeToDefaultContrastLevel(self):
# Preferred contrast
SlicerUtil.changeContrastWindow(1000, 200)
def jumpToTemptativeSlice(self, volumeId):
""" Jump the red window to a predefined slice based on the size of the volume
:param volumeId:
"""
# Get the default coordinates of the ruler
aorta1, aorta2, pa1, pa2 = self.logic.getDefaultCoords(volumeId)
# Set the display in the right slice
self.moveRedWindowToSlice(aorta1[2])
redSliceNode = slicer.util.getFirstNodeByClassByName("vtkMRMLSliceNode", "Red")
factor = 0.5
newFOVx = redSliceNode.GetFieldOfView()[0] * factor
newFOVy = redSliceNode.GetFieldOfView()[1] * factor
newFOVz = redSliceNode.GetFieldOfView()[2]
# Move the camera up to fix the view
redSliceNode.SetXYZOrigin(0, 50, 0)
# Update the FOV (zoom in)
redSliceNode.SetFieldOfView(newFOVx, newFOVy, newFOVz)
# Refresh the data in the viewer
redSliceNode.UpdateMatrices()
def placeDefaultRulers(self, volumeId):
""" Set the Aorta and PA rulers to a default estimated position and jump to that slice
:param volumeId:
"""
if not volumeId:
return
# Hide all the actual ruler nodes
self.logic.hideAllRulers()
# Remove the current rulers for this volume
self.logic.removeRulers(volumeId)
# Create the default rulers
self.logic.createDefaultRulers(volumeId, self.onRulerUpdated)
# Activate both structures
self.structuresButtonGroup.buttons()[0].setChecked(True)
# Jump to the slice where the rulers are
self.jumpToTemptativeSlice(volumeId)
# Place the rulers in the current slice
self.placeRuler()
# Add the current volume to the list of loaded volumes
#self.logic.currentVolumesLoaded.add(volumeId)
# Modify the zoom of the Red slice
redSliceNode = slicer.util.getFirstNodeByClassByName("vtkMRMLSliceNode", "Red")
factor = 0.5
newFOVx = redSliceNode.GetFieldOfView()[0] * factor
newFOVy = redSliceNode.GetFieldOfView()[1] * factor
newFOVz = redSliceNode.GetFieldOfView()[2]
redSliceNode.SetFieldOfView( newFOVx, newFOVy, newFOVz )
# Move the camera up to fix the view
redSliceNode.SetXYZOrigin(0, 50, 0)
# Refresh the data in the viewer
redSliceNode.UpdateMatrices()
def placeRuler(self):
""" Place one or the two rulers in the current visible slice in Red node
"""
volumeId = self.volumeSelector.currentNodeID
if volumeId == '':
self.showUnselectedVolumeWarningMessage()
return
selectedStructure = self.getCurrentSelectedStructure()
if selectedStructure == self.logic.NONE:
qt.QMessageBox.warning(slicer.util.mainWindow(), 'Review structure',
'Please select Pulmonary Arterial, Aorta or both to place the right ruler/s')
return
# Get the current slice
currentSlice = self.getCurrentRedWindowSlice()
if selectedStructure == self.logic.BOTH:
structures = [self.logic.PA, self.logic.AORTA]
else:
structures = [selectedStructure]
for structure in structures:
self.logic.placeRulerInSlice(volumeId, structure, currentSlice, self.onRulerUpdated)
self.refreshTextboxes()
def getCurrentSelectedStructure(self):
""" Get the current selected structure id
:return: self.logic.AORTA or self.logic.PA
"""
selectedStructureText = self.structuresButtonGroup.checkedButton().text
if selectedStructureText == "Aorta": return self.logic.AORTA
elif selectedStructureText == "Pulmonary Arterial": return self.logic.PA
elif selectedStructureText == "Both": return self.logic.BOTH
return self.logic.NONE
def stepSlice(self, offset):
""" Move the selected structure one slice up or down
:param offset: +1 or -1
:return:
"""
volumeId = self.volumeSelector.currentNodeID
if volumeId == '':
self.showUnselectedVolumeWarningMessage()
return
selectedStructure = self.getCurrentSelectedStructure()
if selectedStructure == self.logic.NONE:
self.showUnselectedStructureWarningMessage()
return
if selectedStructure == self.logic.BOTH:
# Move both rulers
self.logic.stepSlice(volumeId, self.logic.AORTA, offset)
newSlice = self.logic.stepSlice(volumeId, self.logic.PA, offset)
else:
newSlice = self.logic.stepSlice(volumeId, selectedStructure, offset)
self.moveRedWindowToSlice(newSlice)
def removeRulers(self):
""" Remove all the rulers related to the current volume node
:return:
"""
self.logic.removeRulers(self.volumeSelector.currentNodeID)
self.refreshTextboxes(reset=True)
def getCurrentRedWindowSlice(self):
""" Get the current slice (in RAS) of the Red window
:return:
"""
redNodeSliceNode = slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceNode()
return redNodeSliceNode.GetSliceOffset()
def moveRedWindowToSlice(self, newSlice):
""" Moves the red display to the specified RAS slice
:param newSlice: slice to jump (RAS format)
:return:
"""
redNodeSliceNode = slicer.app.layoutManager().sliceWidget('Red').sliceLogic().GetSliceNode()
redNodeSliceNode.JumpSlice(0,0,newSlice)
def refreshTextboxes(self, reset=False):
""" Update the information of the textboxes that give information about the measurements
"""
self.aortaTextBox.setText("0")
self.paTextBox.setText("0")
self.ratioTextBox.setText("0")
self.ratioTextBox.setStyleSheet(" QLineEdit { background-color: white; color: black}");
volumeId = self.volumeSelector.currentNodeID
# if volumeId not in self.logic.currentVolumesLoaded:
# return
if volumeId:
self.logic.changeActiveRulersColor(volumeId, self.logic.defaultColor)
aorta = None
pa = None
if not reset:
rulerAorta, newAorta = self.logic.getRulerNodeForVolumeAndStructure(self.volumeSelector.currentNodeID,
self.logic.AORTA, createIfNotExist=False)
rulerPA, newPA = self.logic.getRulerNodeForVolumeAndStructure(self.volumeSelector.currentNodeID,
self.logic.PA, createIfNotExist=False)
if rulerAorta:
aorta = rulerAorta.GetDistanceMeasurement()
self.aortaTextBox.setText(str(aorta))
if rulerPA:
pa = rulerPA.GetDistanceMeasurement()
self.paTextBox.setText(str(pa))
if pa is not None and aorta is not None and aorta != 0:
try:
ratio = pa / aorta
self.ratioTextBox.setText(str(ratio))
if ratio > 1.0:
# Switch colors ("alarm")
st = " QLineEdit {{ background-color: rgb({0}, {1}, {2}); color: white }}". \
format(int(self.logic.defaultWarningColor[0]*255),
int(self.logic.defaultWarningColor[1]*255),
int(self.logic.defaultWarningColor[2]*255))
self.ratioTextBox.setStyleSheet(st)
self.logic.changeActiveRulersColor(volumeId, self.logic.defaultWarningColor)
except Exception:
Util.print_last_exception()
def showUnselectedVolumeWarningMessage(self):
qt.QMessageBox.warning(slicer.util.mainWindow(), 'Select a volume',
'Please select a volume')
def showUnselectedStructureWarningMessage(self):
qt.QMessageBox.warning(slicer.util.mainWindow(), 'Review structure',
'Please select Aorta, Pulmonary Arterial or Both to place the right ruler/s')
def switchToRedView(self):
""" Switch the layout to Red slice only
:return:
"""
layoutManager = slicer.app.layoutManager()
# Test the layout manager is not none in case the module is initialized without a main window
# This happens for example in automatic tests
if layoutManager is not None:
layoutManager.setLayout(6)
def __addSceneObservables__(self):
self.observers.append(slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.NodeAddedEvent, self.__onNodeAddedObserver__))
self.observers.append(slicer.mrmlScene.AddObserver(slicer.vtkMRMLScene.EndCloseEvent, self.__onSceneClosed__))
def __removeSceneObservables(self):
for observer in self.observers:
slicer.mrmlScene.RemoveObserver(observer)
self.observers.remove(observer)
#########
# EVENTS
def onVolumeSelectorChanged(self, node):
#if node is not None and node.GetID() not in self.currentVolumesLoaded:
# if node is not None:
# # New node. Load default rulers
# if node.GetID() not in self.logic.currentVolumesLoaded:
# self.placeDefaultRulers(node.GetID())
logging.info("Volume selector node changed: {0}".format(
'(None)' if node is None else node.GetName()
))
# Preferred contrast (TODO: set right level)
SlicerUtil.changeContrastWindow(1144, 447)
self.refreshTextboxes()
def onStructureClicked(self, button):
fiducialsNode = self.getFiducialsNode(self.volumeSelector.currentNodeID)
if fiducialsNode is not None:
self.__addRuler__(button.text, self.volumeSelector.currentNodeID)
markupsLogic = slicer.modules.markups.logic()
markupsLogic.SetActiveListID(fiducialsNode)
applicationLogic = slicer.app.applicationLogic()
selectionNode = applicationLogic.GetSelectionNode()
selectionNode.SetReferenceActivePlaceNodeClassName("vtkMRMLAnnotationRulerNode")
interactionNode = applicationLogic.GetInteractionNode()
interactionNode.SwitchToSinglePlaceMode()
def onJumpToTemptativeSliceButtonClicked(self):
volumeId = self.volumeSelector.currentNodeID
if volumeId == '':
self.showUnselectedVolumeWarningMessage()
return
#self.placeDefaultRulers(volumeId)
self.jumpToTemptativeSlice(volumeId)
def onRulerUpdated(self, node, event):
self.refreshTextboxes()
def onPlaceRulersClicked(self):
self.placeRuler()
def onMoveUpRulerClicked(self):
self.stepSlice(1)
def onMoveDownRulerClicked(self):
self.stepSlice(-1)
def onRemoveRulerClicked(self):
if (qt.QMessageBox.question(slicer.util.mainWindow(), 'Remove rulers',
'Are you sure you want to remove all the rulers from this volume?',
qt.QMessageBox.Yes|qt.QMessageBox.No)) == qt.QMessageBox.Yes:
self.logic.removeRulers(self.volumeSelector.currentNodeID)
self.refreshTextboxes()
def onSaveReport(self):
""" Save the current values in a persistent csv file
:return:
"""
volumeId = self.volumeSelector.currentNodeID
if volumeId:
caseName = slicer.mrmlScene.GetNodeByID(volumeId).GetName()
coords = [0, 0, 0, 0]
pa1 = pa2 = a1 = a2 = None
# PA
rulerNode, newNode = self.logic.getRulerNodeForVolumeAndStructure(volumeId, self.logic.PA, createIfNotExist=False)
if rulerNode:
# Get current RAS coords
rulerNode.GetPositionWorldCoordinates1(coords)
pa1 = list(coords)
rulerNode.GetPositionWorldCoordinates2(coords)
pa2 = list(coords)
# AORTA
rulerNode, newNode = self.logic.getRulerNodeForVolumeAndStructure(volumeId, self.logic.AORTA, createIfNotExist=False)
if rulerNode:
rulerNode.GetPositionWorldCoordinates1(coords)
a1 = list(coords)
rulerNode.GetPositionWorldCoordinates2(coords)
a2 = list(coords)
self.reportsWidget.insertRow(
caseId=caseName,
paDiameterMm=self.paTextBox.text,
aortaDiameterMm=self.aortaTextBox.text,
pa1r = pa1[0] if pa1 is not None else '',
pa1a = pa1[1] if pa1 is not None else '',
pa1s = pa1[2] if pa1 is not None else '',
pa2r = pa2[0] if pa2 is not None else '',
pa2a = pa2[1] if pa2 is not None else '',
pa2s = pa2[2] if pa2 is not None else '',
a1r = a1[0] if a1 is not None else '',
a1a = a1[1] if a1 is not None else '',
a1s = a1[2] if a1 is not None else '',
a2r = a2[0] if a2 is not None else '',
a2a = a2[1] if a2 is not None else '',
a2s = a2[2] if a2 is not None else ''
)
qt.QMessageBox.information(slicer.util.mainWindow(), 'Data saved', 'The data were saved successfully')
def __onSceneClosed__(self, arg1, arg2):
""" Scene closed. Reset currently loaded volumes
:param arg1:
:param arg2:
:return:
"""
#self.logic.currentVolumesLoaded.clear()
self.logic.currentActiveVolumeId = None
# CIP_PAARatioLogic
#
class CIP_PAARatioLogic(ScriptedLoadableModuleLogic):
"""This class should implement all the actual
computation done by your module. The interface
should be such that other python code can import
this class and make use of the functionality without
requiring an instance of the Widget.
Uses ScriptedLoadableModuleLogic base class, available at:
https://github.com/Slicer/Slicer/blob/master/Base/Python/slicer/ScriptedLoadableModule.py
"""
NONE = 0
AORTA = 1
PA = 2
BOTH = 3
SLICEFACTOR = 0.6
# Default XY coordinates for Aorta and PA (the Z will be estimated depending on the number of slices)
defaultAorta1 = [220, 170, 0]
defaultAorta2 = [275, 175, 0]
defaultPA1 = [280, 175, 0]
defaultPA2 = [320, 190, 0]
defaultColor = [0.5, 0.5, 1.0]
defaultWarningColor = [1.0, 0.0, 0.0]
def __init__(self):
self.currentActiveVolumeId = None
# self.currentVolumesLoaded = set()
def getRootAnnotationsNode(self):
""" Get the root annotations node global to the scene, creating it if necessary
:return: "All Annotations" vtkMRMLAnnotationHierarchyNode
"""
return SlicerUtil.getRootAnnotationsNode()
def getRulersListNode(self, volumeId, createIfNotExist=True):
""" Get the rulers node for this volume, creating it if it doesn't exist yet
:param volumeId:
:return: "volumeId_paaRulersNode" vtkMRMLAnnotationHierarchyNode
"""
# Search for the current volume hierarchy node (each volume has its own hierarchy)
nodeName = volumeId + '_paaRulersNode'
rulersNode = SlicerUtil.getNode(nodeName)
if rulersNode is None and createIfNotExist:
# Create the node
annotationsLogic = slicer.modules.annotations.logic()
rootHierarchyNode = self.getRootAnnotationsNode()
annotationsLogic.SetActiveHierarchyNodeID(rootHierarchyNode.GetID())
annotationsLogic.AddHierarchy()
n = rootHierarchyNode.GetNumberOfChildrenNodes()
rulersNode = rootHierarchyNode.GetNthChildNode(n-1)
# Rename the node
rulersNode.SetName(nodeName)
logging.debug("Created node " + nodeName + " (general rulers node for this volume")
# Return the node
return rulersNode
def getRulerNodeForVolumeAndStructure(self, volumeId, structureId, createIfNotExist=True, callbackWhenRulerModified=None):
""" Search for the right ruler node to be created based on the volume and the selected
structure (Aorta or PA).
It also creates the necessary node hierarchy if it doesn't exist.
:param volumeId:
:param structureId: Aorta (1), PA (2)
:param createIfNotExist: create the ruler node if it doesn't exist yet
:param callbackWhenRulerModified: function to call when the ruler node is modified
:return: node and a boolean indicating if the node has been created now
"""
isNewNode = False
if structureId == 0: # none
return None, isNewNode
if structureId == self.AORTA: # Aorta
#nodeName = volumeId + '_paaRulers_aorta'
nodeName = "A"
elif structureId == self.PA: # 'Pulmonary Arterial':
# nodeName = volumeId + '_paaRulers_pa'
nodeName = "PA"
# Get the node that contains all the rulers for this volume
rulersListNode = self.getRulersListNode(volumeId, createIfNotExist=createIfNotExist)
node = None
if rulersListNode:
# Search for the node
for i in range(rulersListNode.GetNumberOfChildrenNodes()):
nodeWrapper = rulersListNode.GetNthChildNode(i)
# nodeWrapper is also a HierarchyNode. We need to look for its only child that will be the rulerNode
col = vtk.vtkCollection()
nodeWrapper.GetChildrenDisplayableNodes(col)
rulerNode = col.GetItemAsObject(0)
if rulerNode.GetName() == nodeName:
node = rulerNode
break
if node is None and createIfNotExist:
# Create the node
# Set the active node, so that the new ruler is a child node
annotationsLogic = slicer.modules.annotations.logic()
annotationsLogic.SetActiveHierarchyNodeID(rulersListNode.GetID())
node = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationRulerNode')
node.SetName(nodeName)
self.__changeColor__(node, self.defaultColor)
slicer.mrmlScene.AddNode(node)
isNewNode = True
node.AddObserver(vtk.vtkCommand.ModifiedEvent, callbackWhenRulerModified)
logging.debug("Created node " + nodeName + " for volume " + volumeId)
return node, isNewNode
def hideAllRulers(self):
"""
Hide all the current rulers in the scene
:return:
"""
nodes = slicer.mrmlScene.GetNodesByClass("vtkMRMLAnnotationRulerNode")
for i in range(nodes.GetNumberOfItems()):
nodes.GetItemAsObject(i).SetDisplayVisibility(False)
def rulersVisible(self, volumeId, visible):
""" Show or hide all the ruler nodes
"""
if volumeId is not None:
rulersListNode = self.getRulersListNode(volumeId, False)
if rulersListNode:
for i in range(rulersListNode.GetNumberOfChildrenNodes()):
nodeWrapper = rulersListNode.GetNthChildNode(i)
# nodeWrapper is also a HierarchyNode. We need to look for its only child that will be the rulerNode
col = vtk.vtkCollection()
nodeWrapper.GetChildrenDisplayableNodes(col)
rulerNode = col.GetItemAsObject(0)
rulerNode.SetDisplayVisibility(visible)
def __changeColor__(self, node, color):
for i in range(3):
n = node.GetNthDisplayNode(i)
if n:
n.SetColor(color)
layoutManager = slicer.app.layoutManager()
# Test the layout manager is not none in case the module is initialized without a main window
# This happens for example in automatic tests
if layoutManager is not None:
# Refresh UI to repaint both rulers. Is this the best way? Who knows...
layoutManager.sliceWidget("Red").sliceView().mrmlSliceNode().Modified()
def changeActiveRulersColor(self, volumeId, color):
""" Change the color for all the rulers in this volume
:param volumeId:
:param color:
:return:
"""
for structureId in [self.PA, self.AORTA]:
node, new = self.getRulerNodeForVolumeAndStructure(volumeId, structureId, createIfNotExist=False)
if node:
self.__changeColor__(node, color)
def createDefaultRulers(self, volumeId, callbackWhenRulerModified):
""" Set the Aorta and PA rulers to their default position.
The X and Y will be configured in "defaultAorta1, defaultAorta2, defaultPA1, defaultPA2" properties
The Z will be estimated based on the number of slices of the volume
:param volumeId: volume id
:param callbackWhenRulerModified: function to invoke when the ruler is modified
:return: a tuple of 4 vales. For each node, return the node and a boolean indicating if the node was
created now
"""
aorta1, aorta2, pa1, pa2 = self.getDefaultCoords(volumeId)
rulerNodeAorta, newNodeAorta = self.getRulerNodeForVolumeAndStructure(volumeId, self.AORTA,
createIfNotExist=True, callbackWhenRulerModified=callbackWhenRulerModified)
rulerNodeAorta.SetPositionWorldCoordinates1(aorta1)
rulerNodeAorta.SetPositionWorldCoordinates2(aorta2)
rulerNodePA, newNodePA = self.getRulerNodeForVolumeAndStructure(volumeId, self.PA,
createIfNotExist=True, callbackWhenRulerModified=callbackWhenRulerModified)
rulerNodePA.SetPositionWorldCoordinates1(pa1)
rulerNodePA.SetPositionWorldCoordinates2(pa2)
return rulerNodeAorta, newNodeAorta, rulerNodePA, newNodePA
def stepSlice(self, volumeId, structureId, sliceStep):
""" Move the selected ruler up or down one slice.
:param volumeId:
:param structureId:
:param sliceStep: +1 or -1
:return: new slice in RAS format
"""
# Calculate the RAS coords of the slice where we should jump to
rulerNode, newNode = self.getRulerNodeForVolumeAndStructure(volumeId, structureId, createIfNotExist=False)
if not rulerNode:
# The ruler has not been created. This op doesn't make sense
return False
coords = [0, 0, 0, 0]
# Get current RAS coords
rulerNode.GetPositionWorldCoordinates1(coords)
# Get the transformation matrixes
rastoijk=vtk.vtkMatrix4x4()
ijktoras=vtk.vtkMatrix4x4()
scalarVolumeNode = slicer.mrmlScene.GetNodeByID(volumeId)
scalarVolumeNode.GetRASToIJKMatrix(rastoijk)
scalarVolumeNode.GetIJKToRASMatrix(ijktoras)
# Get the current slice (Z). It will be the same in both positions
ijkCoords = list(rastoijk.MultiplyPoint(coords))
# Add/substract the offset to Z
ijkCoords[2] += sliceStep
# Convert back to RAS, just replacing the Z
newSlice = ijktoras.MultiplyPoint(ijkCoords)[2]
self._placeRulerInSlice_(rulerNode, structureId, volumeId, newSlice)
return newSlice
def placeRulerInSlice(self, volumeId, structureId, newSlice, callbackWhenUpdated=None):
""" Move the ruler to the specified slice (in RAS format)
:param volumeId:
:param structureId:
:param newSlice: slice in RAS format
:return: tuple with ruler node and a boolean indicating if the node was just created
"""
# Get the correct ruler
rulerNode, newNode = self.getRulerNodeForVolumeAndStructure(volumeId, structureId,
createIfNotExist=True, callbackWhenRulerModified=callbackWhenUpdated)
# Add the volume to the list of volumes that have some ruler
# self.currentVolumesLoaded.add(volumeId)
# Move the ruler
self._placeRulerInSlice_(rulerNode, structureId, volumeId, newSlice)
#return rulerNode, newNode
def _placeRulerInSlice_(self, rulerNode, structureId, volumeId, newSlice):
""" Move the ruler to the specified slice (in RAS format)
:param rulerNode: node of type vtkMRMLAnnotationRulerNode
:param newSlice: slice in RAS format
:return: True if the operation was succesful
"""
coords1 = [0, 0, 0, 0]
coords2 = [0, 0, 0, 0]
# Get RAS coords of the ruler node
rulerNode.GetPositionWorldCoordinates1(coords1)
rulerNode.GetPositionWorldCoordinates2(coords2)
# Set the slice of the coordinate
coords1[2] = coords2[2] = newSlice
if coords1[0] == 0 and coords1[1] == 0:
# New node, get default coordinates depending on the structure
defaultCoords = self.getDefaultCoords(volumeId)
if structureId == self.AORTA:
coords1[0] = defaultCoords[0][0]
coords1[1] = defaultCoords[0][1]
coords2[0] = defaultCoords[1][0]
coords2[1] = defaultCoords[1][1]
elif structureId == self.PA:
coords1[0] = defaultCoords[2][0]
coords1[1] = defaultCoords[2][1]
coords2[0] = defaultCoords[3][0]
coords2[1] = defaultCoords[3][1]
rulerNode.SetPositionWorldCoordinates1(coords1)
rulerNode.SetPositionWorldCoordinates2(coords2)
def getDefaultCoords(self, volumeId):
""" Get the default coords for aorta and PA in this volume (RAS format)
:param volumeId:
:return: (aorta1, aorta2, pa1, pa2). All of them lists of 3 positions in RAS format
"""
volume = slicer.mrmlScene.GetNodeByID(volumeId)
rasBounds = [0,0,0,0,0,0]
volume.GetRASBounds(rasBounds)
# Get the slice (Z)
ijk = self.RAStoIJK(volume, [0, 0, rasBounds[5]])
slice = int(ijk[2] * self.SLICEFACTOR) # Empiric estimation
# Get the default coords, converting from IJK to RAS
aorta1 = list(self.defaultAorta1)
aorta1[2] = slice
aorta1 = self.IJKtoRAS(volume, aorta1)
aorta2 = list(self.defaultAorta2)
aorta2[2] = slice
aorta2 = self.IJKtoRAS(volume, aorta2)
pa1 = list(self.defaultPA1)
pa1[2] = slice
pa1 = self.IJKtoRAS(volume, pa1)
pa2 = list(self.defaultPA2)
pa2[2] = slice
pa2 = self.IJKtoRAS(volume, pa2)
return aorta1, aorta2, pa1, pa2
def removeRulers(self, volumeId):
""" Remove all the rulers for the selected volume
:param volumeId:
:param structureId:
"""
#rulerNode, newNode = self.getRulerNodeForVolumeAndStructure(volumeId, structureId)
rulersListNode = self.getRulersListNode(volumeId, createIfNotExist=False)
if rulersListNode:
rulersListNode.RemoveAllChildrenNodes()
slicer.mrmlScene.RemoveNode(rulersListNode)
def RAStoIJK(self, volumeNode, rasCoords):
""" Transform a list of RAS coords in IJK for a volume
:return: list of IJK coordinates
"""
rastoijk=vtk.vtkMatrix4x4()
volumeNode.GetRASToIJKMatrix(rastoijk)
rasCoords.append(1)
return list(rastoijk.MultiplyPoint(rasCoords))
def IJKtoRAS(self, volumeNode, ijkCoords):
""" Transform a list of IJK coords in RAS for a volume
:return: list of RAS coordinates
"""
ijktoras=vtk.vtkMatrix4x4()
volumeNode.GetIJKToRASMatrix(ijktoras)
ijkCoords.append(1)
return list(ijktoras.MultiplyPoint(ijkCoords))
class CIP_PAARatioTest(ScriptedLoadableModuleTest):
@classmethod
def setUpClass(cls):
""" Executed once for all the tests """
slicer.util.selectModule('CIP_PAARatio')
def setUp(self):
""" Do whatever is needed to reset the state - typically a scene clear will be enough.
"""
slicer.mrmlScene.Clear(0)
def runTest(self):
"""Run as few or as many tests as needed here.
"""
self.setUp()
self.test_CIP_PAARatio()
def test_CIP_PAARatio(self):
self.assertIsNotNone(slicer.modules.cip_paaratio)
# Get the widget
widget = slicer.modules.cip_paaratio.widgetRepresentation()
volume = SlicerUtil.downloadVolumeForTests(widget=widget)
self.assertFalse(volume is None)
# Get the logic
logging.info("Getting logic...")
logic = widget.self().logic
# Actions
# Make sure that the right volume is selected
volumeSelector = SlicerUtil.findChildren(widget=widget, name='paa_volumeSelector')[0]
volumeSelector.setCurrentNode(volume)
button = SlicerUtil.findChildren(widget=widget, name='jumptToTemptativeSliceButton')[0]
# Place default rulers
button.click()
logging.info("Default rulers placed...OK")
# Get rulers
aorta = logic.getRulerNodeForVolumeAndStructure(volume.GetID(), logic.AORTA, createIfNotExist=False)[0]
pa = logic.getRulerNodeForVolumeAndStructure(volume.GetID(), logic.PA, createIfNotExist=False)[0]
# Make sure that rulers are in default color
color = aorta.GetNthDisplayNode(0).GetColor()
for i in range(3):
self.assertEqual(color[i], logic.defaultColor[i])
logging.info("Default color...OK")
# Check that the rulers are properly positioned
coordsAorta1 = [0,0,0]
coordsPa1 = [0,0,0]
aorta.GetPosition1(coordsAorta1)
pa.GetPosition1(coordsPa1)
# Aorta ruler should be on the left
self.assertTrue(coordsAorta1[0] > coordsPa1[0])
# Aorta and PA should be in the same slice
self.assertTrue(coordsAorta1[2] == coordsPa1[2])
logging.info("Default position...OK")
# Change Slice of the Aorta ruler
layoutManager = slicer.app.layoutManager()
redWidget = layoutManager.sliceWidget('Red')
style = redWidget.interactorStyle()
style.MoveSlice(1)
# Click in the radio button
button = SlicerUtil.findChildren(widget=widget, name='aortaRadioButton')[0]
button.click()
# click in the place ruler button
button = SlicerUtil.findChildren(widget=widget, name='placeRulersButton')[0]
button.click()
# Make sure that the slice of the ruler has changed
aorta.GetPosition1(coordsAorta1)
self.assertTrue(coordsAorta1[2] != coordsPa1[2])
logging.info("Position changed...OK")
# Force PAA ratio > 1
coordsAorta2 = [0,0,0]
coordsPa2 = [0,0,0]
aorta.GetPosition2(coordsAorta2)
pa.GetPosition2(coordsPa2)
currentRatio = pa.GetDistanceMeasurement() / aorta.GetDistanceMeasurement()
# Calculate how much do we have to increase the position of the pa marker
delta = 1 - currentRatio + 0.2
pa.SetPosition2(coordsPa2[0] + coordsPa2[0]*delta, coordsPa2[1], coordsPa2[2])
# Make sure that rulers are red now
color = aorta.GetNthDisplayNode(0).GetColor()
for i in range(3):
self.assertEqual(color[i], logic.defaultWarningColor[i])
logging.info("Red color...OK")
self.delayDisplay('Test passed!')
| 43.510733
| 163
| 0.648371
|
fb445aa12c51005c073977f238f86b8702564849
| 31,367
|
py
|
Python
|
tests/test_loader.py
|
p0larstern/opsdroid
|
81fe2d650a7fa6b40f054514f16d77521f49b884
|
[
"Apache-2.0"
] | null | null | null |
tests/test_loader.py
|
p0larstern/opsdroid
|
81fe2d650a7fa6b40f054514f16d77521f49b884
|
[
"Apache-2.0"
] | 10
|
2019-06-22T11:18:55.000Z
|
2019-09-03T13:26:47.000Z
|
tests/test_loader.py
|
p0larstern/opsdroid
|
81fe2d650a7fa6b40f054514f16d77521f49b884
|
[
"Apache-2.0"
] | null | null | null |
import os
import shutil
import subprocess
import tempfile
import contextlib
import unittest
import unittest.mock as mock
from types import ModuleType
import pkg_resources
from yaml import YAMLError
from opsdroid.cli.start import configure_lang
from opsdroid import loader as ld
from opsdroid.loader import Loader
from opsdroid.helper import del_rw
class TestLoader(unittest.TestCase):
"""Test the opsdroid loader class."""
def setup(self):
configure_lang({})
opsdroid = mock.MagicMock()
loader = ld.Loader(opsdroid)
return opsdroid, loader
def setUp(self):
os.umask(000)
self._tmp_dir = os.path.join(tempfile.gettempdir(), "opsdroid_tests")
with contextlib.suppress(FileExistsError):
os.makedirs(self._tmp_dir, mode=0o777)
def tearDown(self):
shutil.rmtree(self._tmp_dir, onerror=del_rw)
def test_load_config_file(self):
opsdroid, loader = self.setup()
config = loader.load_config_file(
[os.path.abspath("tests/configs/minimal.yaml")]
)
self.assertIsNotNone(config)
def test_load_config_valid(self):
opsdroid, loader = self.setup()
config = loader.load_config_file(
[os.path.abspath("tests/configs/full_valid.yaml")]
)
self.assertIsNotNone(config)
def test_load_config_valid_without_welcome_message(self):
opsdroid, loader = self.setup()
config = loader.load_config_file(
[os.path.abspath("tests/configs/valid_without_wellcome_message.yaml")]
)
self.assertIsNotNone(config)
def test_load_config_valid_without_db_and_parsers(self):
opsdroid, loader = self.setup()
config = loader.load_config_file(
[os.path.abspath("tests/configs/valid_without_db_and_parsers.yaml")]
)
self.assertIsNotNone(config)
def test_load_config_broken_without_connectors(self):
opsdroid, loader = self.setup()
with self.assertRaises(SystemExit) as cm:
_ = loader.load_config_file(
[os.path.abspath("tests/configs/broken_without_connectors.yaml")]
)
self.assertEqual(cm.exception.code, 1)
def test_load_config_valid_case_sensitivity(self):
opsdroid, loader = self.setup()
config = loader.load_config_file(
[os.path.abspath("tests/configs/valid_case_sensitivity.yaml")]
)
self.assertIsNotNone(config)
def test_load_config_broken(self):
opsdroid, loader = self.setup()
with self.assertRaises(SystemExit) as cm:
_ = loader.load_config_file(
[os.path.abspath("tests/configs/full_broken.yaml")]
)
self.assertEqual(cm.exception.code, 1)
def test_load_config_file_2(self):
opsdroid, loader = self.setup()
config = loader.load_config_file(
[os.path.abspath("tests/configs/minimal_2.yaml")]
)
self.assertIsNotNone(config)
def test_load_exploit(self):
"""This will test if you can run python code from
config.yaml. The expected result should be:
- Yaml.YAMLError exception raised
- _LOGGER.critical message
- sys.exit
Note: the unittest.main(exit=False) is important so
other tests won't fail when sys.exit is called.
"""
opsdroid, loader = self.setup()
with self.assertRaises(SystemExit):
_ = loader.load_config_file(
[os.path.abspath("tests/configs/include_exploit.yaml")]
)
self.assertLogs("_LOGGER", "critical")
self.assertRaises(YAMLError)
unittest.main(exit=False)
def test_load_config_file_with_include(self):
opsdroid, loader = self.setup()
config = loader.load_config_file(
[os.path.abspath("tests/configs/minimal_with_include.yaml")]
)
config2 = loader.load_config_file(
[os.path.abspath("tests/configs/minimal.yaml")]
)
self.assertIsNotNone(config)
self.assertEqual(config, config2)
def test_yaml_load_exploit(self):
with mock.patch("sys.exit"):
config = Loader.load_config_file(
[os.path.abspath("tests/configs/include_exploit.yaml")]
)
self.assertIsNone(config)
# If the command in exploit.yaml is echoed it will return 0
self.assertNotEqual(config, 0)
def test_load_config_file_with_env_vars(self):
opsdroid, loader = self.setup()
os.environ["ENVVAR"] = "opsdroid"
config = loader.load_config_file(
[os.path.abspath("tests/configs/minimal_with_envs.yaml")]
)
self.assertEqual(config["connectors"][0]["bot-name"], os.environ["ENVVAR"])
def test_create_default_config(self):
test_config_path = os.path.join(
tempfile.gettempdir(), "test_config_path/configuration.yaml"
)
opsdroid, loader = self.setup()
self.assertEqual(
loader.create_default_config(test_config_path), test_config_path
)
self.assertTrue(os.path.isfile(test_config_path))
shutil.rmtree(os.path.split(test_config_path)[0], onerror=del_rw)
def test_generate_config_if_none_exist(self):
cdf_backup = Loader.create_default_config
Loader.create_default_config = mock.Mock(
return_value=os.path.abspath("tests/configs/minimal.yaml")
)
Loader.load_config_file(["file_which_does_not_exist"])
self.assertTrue(Loader.create_default_config.called)
Loader.create_default_config = cdf_backup
def test_load_non_existant_config_file(self):
cdf_backup = Loader.create_default_config
Loader.create_default_config = mock.Mock(
return_value=os.path.abspath("/tmp/my_nonexistant_config")
)
with mock.patch("sys.exit") as mock_sysexit:
Loader.load_config_file(["file_which_does_not_exist"])
self.assertTrue(Loader.create_default_config.called)
self.assertTrue(mock_sysexit.called)
Loader.create_default_config = cdf_backup
def test_load_broken_config_file(self):
with mock.patch("sys.exit") as patched_sysexit:
Loader.load_config_file([os.path.abspath("tests/configs/broken.yaml")])
self.assertTrue(patched_sysexit.called)
def test_git_clone(self):
with mock.patch.object(subprocess, "Popen") as mock_subproc_popen:
opsdroid, loader = self.setup()
myrsa = "/path/to/my/id_rsa"
loader.git_clone(
"https://github.com/rmccue/test-repository.git",
os.path.join(self._tmp_dir, "/test"),
"master",
myrsa,
)
self.assertTrue(mock_subproc_popen.called)
_, mock_subproc_popen_kwargs = mock_subproc_popen.call_args
assert myrsa in mock_subproc_popen_kwargs["env"]["GIT_SSH_COMMAND"]
def test_git_pull(self):
with mock.patch.object(subprocess, "Popen") as mock_subproc_popen:
opsdroid, loader = self.setup()
loader.git_pull(os.path.join(self._tmp_dir, "/test"))
self.assertTrue(mock_subproc_popen.called)
def test_pip_install_deps(self):
with mock.patch.object(subprocess, "Popen") as mocked_popen:
mocked_popen.return_value.communicate.return_value = ["Test\nTest"]
opsdroid, loader = self.setup()
loader.pip_install_deps(os.path.abspath("/path/to/some/file.txt"))
self.assertTrue(mocked_popen.called)
def test_no_pip_install(self):
opsdroid, loader = self.setup()
with mock.patch.object(loader, "pip_install_deps") as mock_pip:
mock_pip.side_effect = FileNotFoundError()
with self.assertRaises(FileNotFoundError):
mock_pip.return_value.communicate.return_value = ["Test\nTest"]
loader.pip_install_deps("/path/to/some/file.txt")
self.assertTrue(mock_pip.called)
def test_no_pip_or_pip3_install(self):
opsdroid, loader = self.setup()
loader.pip_install_deps("/path/to/some/file.txt")
with mock.patch.object(subprocess, "Popen") as mocked_popen:
mocked_popen.side_effect = [FileNotFoundError(), FileNotFoundError()]
with self.assertRaises(OSError) as error:
loader.pip_install_deps("/path/to/some/file.txt")
self.assertEqual(error, "Pip and pip3 not found, exiting...")
def test_build_module_path(self):
config = {"type": "test", "name": "test", "is_builtin": False}
loader = mock.Mock()
loader.modules_directory = ""
self.assertIn("test.test", ld.Loader.build_module_import_path(config))
self.assertIn("test", ld.Loader.build_module_install_path(loader, config))
config["is_builtin"] = True
self.assertIn("opsdroid.test.test", ld.Loader.build_module_import_path(config))
self.assertIn("test", ld.Loader.build_module_install_path(loader, config))
def test_check_cache_removes_dir(self):
config = {}
config["no-cache"] = True
config["install_path"] = os.path.join(
self._tmp_dir, os.path.normpath("test/module")
)
os.makedirs(config["install_path"], mode=0o777)
ld.Loader.check_cache(config)
self.assertFalse(os.path.isdir(config["install_path"]))
def test_check_cache_removes_file(self):
config = {}
config["no-cache"] = True
config["install_path"] = os.path.join(
self._tmp_dir, os.path.normpath("test/module/test")
)
directory, _ = os.path.split(config["install_path"])
os.makedirs(directory, mode=0o777)
open(config["install_path"] + ".py", "w")
ld.Loader.check_cache(config)
self.assertFalse(os.path.isfile(config["install_path"] + ".py"))
shutil.rmtree(directory, onerror=del_rw)
def test_check_cache_leaves(self):
config = {}
config["no-cache"] = False
config["install_path"] = os.path.join(
self._tmp_dir, os.path.normpath("test/module")
)
os.makedirs(config["install_path"], mode=0o777)
ld.Loader.check_cache(config)
self.assertTrue(os.path.isdir(config["install_path"]))
shutil.rmtree(config["install_path"], onerror=del_rw)
def test_loading_intents(self):
config = {}
config["no-cache"] = True
config["install_path"] = os.path.join(
self._tmp_dir, os.path.normpath("test/module/test")
)
os.makedirs(config["install_path"], mode=0o777)
intent_contents = "Hello world"
intents_file = os.path.join(config["install_path"], "intents.yml")
with open(intents_file, "w") as intents:
intents.write(intent_contents)
loaded_intents = ld.Loader._load_intents(config)
self.assertEqual(intent_contents, loaded_intents)
shutil.rmtree(config["install_path"], onerror=del_rw)
def test_loading_intents_failed(self):
config = {}
config["no-cache"] = True
config["install_path"] = os.path.join(
self._tmp_dir, os.path.normpath("test/module/test/")
)
loaded_intents = ld.Loader._load_intents(config)
self.assertEqual(None, loaded_intents)
def test_no_dep(self):
opsdroid, loader = self.setup()
config = {}
config["no-dep"] = True
loader._install_module_dependencies(config)
self.assertLogs("_LOGGER", "debug")
self.assertEqual(loader._install_module_dependencies(config), None)
with mock.patch.object(loader, "_install_module_dependencies") as nodep:
config["no-dep"] = False
self.assertTrue(nodep)
def test_no_req_in_install_module_dependencies(self):
opsdroid, loader = self.setup()
config = {}
config["install_path"] = ""
with mock.patch("os.path.isfile") as file:
file.return_value = False
self.assertEqual(loader._install_module_dependencies(config), None)
def test_import_module(self):
config = {}
config["module_path"] = "os"
config["name"] = "path"
config["type"] = "system"
config["module"] = ""
module = ld.Loader.import_module(config)
self.assertIsInstance(module, ModuleType)
def test_import_module_new(self):
config = {}
config["module_path"] = "os"
config["name"] = ""
config["type"] = "system"
config["module"] = ""
module = ld.Loader.import_module(config)
self.assertIsInstance(module, ModuleType)
def test_import_module_failure(self):
config = {}
config["module_path"] = "nonexistant"
config["name"] = "module"
config["type"] = "broken"
config["module"] = ""
module = ld.Loader.import_module(config)
self.assertEqual(module, None)
def test_import_module_from_path(self):
config = {}
config["module_path"] = ""
config["name"] = "module"
config["type"] = ""
config["module"] = "os.path"
module = ld.Loader.import_module(config)
self.assertIsInstance(module, ModuleType)
def test_import_module_from_entrypoint(self):
config = {}
config["module_path"] = ""
config["name"] = "myep"
config["type"] = ""
config["module"] = ""
distro = pkg_resources.Distribution()
ep = pkg_resources.EntryPoint("myep", "os.path", dist=distro)
config["entrypoint"] = ep
opsdroid, loader = self.setup()
loader.modules_directory = "."
modules = [{"name": "myep"}]
with mock.patch("opsdroid.loader.iter_entry_points") as mock_iter_entry_points:
mock_iter_entry_points.return_value = (ep,)
loaded = loader._load_modules("database", modules)
self.assertEqual(loaded[0]["config"]["name"], "myep")
def test_load_config_move_to_appdir(self):
opsdroid, loader = self.setup()
loader._load_modules = mock.MagicMock()
loader._setup_modules = mock.MagicMock()
config = {}
config["databases"] = mock.MagicMock()
config["skills"] = mock.MagicMock()
config["connectors"] = mock.MagicMock()
config["module-path"] = os.path.join(self._tmp_dir, "opsdroid")
with mock.patch("opsdroid.helper.move_config_to_appdir") as mocked_move:
mocked_move.side_effect = FileNotFoundError()
loader.load_modules_from_config(config)
self.assertLogs("_LOGGER", "info")
self.assertEqual(len(loader._load_modules.mock_calls), 3)
def test_load_config(self):
opsdroid, loader = self.setup()
loader._load_modules = mock.MagicMock()
loader._setup_modules = mock.MagicMock()
config = {}
config["databases"] = mock.MagicMock()
config["skills"] = mock.MagicMock()
config["connectors"] = mock.MagicMock()
config["module-path"] = os.path.join(self._tmp_dir, "opsdroid")
loader.load_modules_from_config(config)
self.assertLogs("_LOGGER", "info")
self.assertEqual(len(loader._load_modules.mock_calls), 3)
def test_load_empty_config(self):
opsdroid, loader = self.setup()
loader._load_modules = mock.MagicMock()
config = {}
loader.load_modules_from_config(config)
self.assertEqual(len(loader._load_modules.mock_calls), 0)
self.assertEqual(len(opsdroid.mock_calls), 2)
def test_load_minimal_config_file(self):
opsdroid, loader = self.setup()
config = Loader.load_config_file(
[os.path.abspath("tests/configs/minimal.yaml")]
)
loader._install_module = mock.MagicMock()
loader.import_module = mock.MagicMock()
modules = loader.load_modules_from_config(config)
self.assertIsNotNone(modules["connectors"])
self.assertIsNone(modules["databases"])
self.assertIsNotNone(modules["skills"])
self.assertIsNotNone(config)
def test_load_minimal_config_file_2(self):
opsdroid, loader = self.setup()
loader._install_module = mock.MagicMock()
loader.import_module = mock.MagicMock()
config = Loader.load_config_file(
[os.path.abspath("tests/configs/minimal_2.yaml")]
)
modules = loader.load_modules_from_config(config)
self.assertIsNotNone(modules["connectors"])
self.assertIsNone(modules["databases"])
self.assertIsNotNone(modules["skills"])
self.assertIsNotNone(config)
def test_load_modules(self):
opsdroid, loader = self.setup()
modules_type = "test"
modules = [{"name": "testmodule"}]
mockedmodule = mock.Mock(return_value={"name": "testmodule"})
with tempfile.TemporaryDirectory() as tmp_dep_path:
with mock.patch.object(
loader, "_install_module"
) as mockinstall, mock.patch(
"opsdroid.loader.DEFAULT_MODULE_DEPS_PATH",
os.path.join(tmp_dep_path, "site-packages"),
), mock.patch.object(
loader, "import_module", mockedmodule
) as mockimport:
loader.setup_modules_directory({})
loader._load_modules(modules_type, modules)
self.assertTrue(mockinstall.called)
self.assertTrue(mockimport.called)
def test_load_modules_not_instance_Mapping(self):
opsdroid, loader = self.setup()
modules_type = "test"
modules = ["testmodule"]
mockedmodule = mock.Mock(return_value={"name": "testmodule"})
with tempfile.TemporaryDirectory() as tmp_dep_path:
with mock.patch.object(
loader, "_install_module"
) as mockinstall, mock.patch(
"opsdroid.loader.DEFAULT_MODULE_DEPS_PATH",
os.path.join(tmp_dep_path, "site-packages"),
), mock.patch.object(
loader, "import_module", mockedmodule
) as mockimport:
loader.setup_modules_directory({})
loader._load_modules(modules_type, modules)
self.assertTrue(mockinstall.called)
self.assertTrue(mockimport.called)
def test_load_modules_fail(self):
opsdroid, loader = self.setup()
modules_type = "test"
modules = [{"name": "testmodule"}]
with mock.patch.object(
loader, "_install_module"
) as mockinstall, mock.patch.object(
loader, "import_module", return_value=None
) as mockimport:
loader.setup_modules_directory({})
loaded_modules = loader._load_modules(modules_type, modules)
self.assertTrue(mockinstall.called)
self.assertTrue(mockimport.called)
self.assertEqual(loaded_modules, [])
def test_load_existing_modules(self):
opsdroid, loader = self.setup()
modules_type = "test"
modules = [{"name": "testmodule"}]
install_path = os.path.join(self._tmp_dir, "test_existing_module")
mockedmodule = mock.Mock(return_value={"name": "testmodule"})
mocked_install_path = mock.Mock(return_value=install_path)
os.mkdir(install_path)
with mock.patch.object(
loader, "_update_module"
) as mockupdate, mock.patch.object(
loader, "import_module", mockedmodule
) as mockimport, mock.patch.object(
loader, "build_module_install_path", mocked_install_path
) as mockbuildpath:
loader.setup_modules_directory({})
loader._load_modules(modules_type, modules)
self.assertTrue(mockbuildpath.called)
self.assertTrue(mockupdate.called)
self.assertTrue(mockimport.called)
shutil.rmtree(install_path, onerror=del_rw)
def test_install_missing_local_module(self):
opsdroid, loader = self.setup()
config = {
"name": "testmodule",
"install_path": os.path.join(self._tmp_dir, "test_missing_local_module"),
"repo": os.path.join(self._tmp_dir, "testrepo"),
"branch": "master",
}
with mock.patch("opsdroid.loader._LOGGER.error") as logmock:
loader._install_module(config)
logmock.assert_any_call("Could not find local git repo %s", config["repo"])
logmock.assert_any_call("Install of %s failed.", config["name"])
def test_install_specific_remote_module(self):
opsdroid, loader = self.setup()
config = {
"name": "testmodule",
"install_path": os.path.join(self._tmp_dir, "test_specific_remote_module"),
"repo": "https://github.com/rmccue/test-repository.git",
"branch": "master",
}
with mock.patch("opsdroid.loader._LOGGER.debug"), mock.patch.object(
loader, "git_clone"
) as mockclone:
loader._install_module(config)
mockclone.assert_called_with(
config["repo"], config["install_path"], config["branch"], None
)
def test_install_specific_remote_module_ssl(self):
opsdroid, loader = self.setup()
config = {
"name": "testmodule",
"install_path": os.path.join(self._tmp_dir, "test_specific_remote_module"),
"repo": "https://github.com/rmccue/test-repository.git",
"branch": "master",
"key_path": os.path.join(
self._tmp_dir, os.path.normpath("install/from/here.key")
),
}
with mock.patch("opsdroid.loader._LOGGER.debug"), mock.patch.object(
loader, "git_clone"
) as mockclone:
loader._install_module(config)
mockclone.assert_called_with(
config["repo"],
config["install_path"],
config["branch"],
config["key_path"],
)
def test_install_specific_local_git_module(self):
opsdroid, loader = self.setup()
repo_path = os.path.join(self._tmp_dir, "testrepo")
config = {
"name": "testmodule",
"install_path": repo_path,
"repo": "https://github.com/rmccue/test-repository.git",
"branch": "master",
}
config["repo"] = repo_path
config["install_path"] = os.path.join(
self._tmp_dir, "test_specific_local_module"
)
os.makedirs(repo_path)
with mock.patch("opsdroid.loader._LOGGER.debug"), mock.patch.object(
loader, "git_clone"
) as mockclone:
loader._install_module(config)
mockclone.assert_called_with(
config["repo"], config["install_path"], config["branch"]
)
shutil.rmtree(repo_path, onerror=del_rw)
def test_install_gist_module(self):
opsdroid, loader = self.setup()
config = {
"name": "ping",
"type": "skill",
"install_path": os.path.join(self._tmp_dir, "test_gist_module_file"),
"gist": "https://gist.github.com/jacobtomlinson/"
"c9852fa17d3463acc14dca1217d911f6",
}
with mock.patch.object(loader, "_install_gist_module") as mockgist:
loader._install_module(config)
self.assertTrue(mockgist.called)
def test_install_specific_local_path_module(self):
opsdroid, loader = self.setup()
repo_path = os.path.join(self._tmp_dir, "testrepo")
config = {
"name": "testmodule",
"install_path": repo_path,
"repo": "https://github.com/rmccue/test-repository.git",
"branch": "master",
}
os.makedirs(config["install_path"])
config["path"] = config["install_path"]
config["install_path"] = os.path.join(
self._tmp_dir, "test_specific_local_module"
)
with mock.patch("opsdroid.loader._LOGGER.debug"), mock.patch.object(
loader, "_install_local_module"
) as mockclone:
loader._install_module(config)
mockclone.assert_called_with(config)
shutil.rmtree(repo_path, onerror=del_rw)
def test_install_default_remote_module(self):
opsdroid, loader = self.setup()
config = {
"name": "slack",
"type": "connector",
"install_path": os.path.join(self._tmp_dir, "test_default_remote_module"),
"branch": "master",
}
with mock.patch("opsdroid.loader.Loader.git_clone") as mockclone:
with mock.patch.object(loader, "pip_install_deps") as mockdeps:
os.makedirs(config["install_path"])
mockclone.side_effect = shutil.copy(
"requirements.txt", config["install_path"]
)
loader._install_module(config)
self.assertLogs("_LOGGER", "debug")
mockdeps.assert_called_with(
os.path.join(config["install_path"], "requirements.txt")
)
mockclone.assert_called_with(
"https://github.com/opsdroid/"
+ config["type"]
+ "-"
+ config["name"]
+ ".git",
config["install_path"],
config["branch"],
None,
)
shutil.rmtree(config["install_path"], onerror=del_rw)
def test_install_local_module_dir(self):
opsdroid, loader = self.setup()
base_path = os.path.join(self._tmp_dir, "long")
config = {
"name": "slack",
"type": "connector",
"install_path": os.path.join(base_path, os.path.normpath("test/path/test")),
"path": os.path.join(self._tmp_dir, os.path.normpath("install/from/here")),
}
os.makedirs(config["path"], exist_ok=True, mode=0o777)
loader._install_local_module(config)
self.assertTrue(os.path.isdir(config["install_path"]))
shutil.rmtree(base_path, onerror=del_rw)
def test_install_local_module_file(self):
opsdroid, loader = self.setup()
config = {
"name": "slack",
"type": "connector",
"install_path": os.path.join(self._tmp_dir, "test_local_module_file"),
"path": os.path.join(
self._tmp_dir, os.path.normpath("install/from/here.py")
),
}
directory, _ = os.path.split(config["path"])
os.makedirs(directory, exist_ok=True, mode=0o777)
open(config["path"], "w")
loader._install_local_module(config)
self.assertTrue(
os.path.isfile(os.path.join(config["install_path"], "__init__.py"))
)
shutil.rmtree(config["install_path"], onerror=del_rw)
def test_install_local_module_notebook(self):
opsdroid, loader = self.setup()
config = {
"name": "slack",
"type": "connector",
"install_path": os.path.join(self._tmp_dir, "test_local_module_file"),
"path": os.path.abspath("tests/mockmodules/skills/test_notebook.ipynb"),
}
directory, _ = os.path.split(config["path"])
os.makedirs(directory, exist_ok=True, mode=0o777)
loader._install_local_module(config)
self.assertTrue(
os.path.isfile(os.path.join(config["install_path"], "__init__.py"))
)
shutil.rmtree(config["install_path"], onerror=del_rw)
def test_install_local_module_failure(self):
opsdroid, loader = self.setup()
config = {
"name": "slack",
"type": "connector",
"install_path": os.path.join(self._tmp_dir, "test_local_module_failure"),
"path": os.path.join(self._tmp_dir, "doesnotexist"),
}
loader._install_local_module(config)
self.assertLogs("_LOGGER", "error")
def test_update_existing_local_module(self):
opsdroid, loader = self.setup()
base_path = os.path.join(self._tmp_dir, "long")
config = {
"name": "testmodule",
"type": "test",
"install_path": os.path.join(base_path, os.path.normpath("test/path/test")),
"path": os.path.join(self._tmp_dir, os.path.normpath("install/from/here")),
}
os.makedirs(config["install_path"], exist_ok=True, mode=0o777)
os.makedirs(config["path"], exist_ok=True, mode=0o777)
loader._update_module(config)
self.assertLogs("_LOGGER", "debug")
shutil.rmtree(base_path, onerror=del_rw)
def test_update_existing_git_module(self):
opsdroid, loader = self.setup()
config = {
"name": "testmodule",
"install_path": os.path.join(self._tmp_dir, "test_specific_remote_module"),
"repo": "https://github.com/rmccue/test-repository.git",
"branch": "master",
}
os.mkdir(config["install_path"])
with mock.patch.object(loader, "git_pull") as mockpull:
loader._update_module(config)
mockpull.assert_called_with(config["install_path"])
shutil.rmtree(config["install_path"], onerror=del_rw)
def test_install_gist_module_file(self):
opsdroid, loader = self.setup()
config = {
"name": "ping",
"type": "skill",
"install_path": os.path.join(self._tmp_dir, "test_gist_module_file"),
"gist": "https://gist.github.com/jacobtomlinson/"
"6dd35e0f62d6b779d3d0d140f338d3e5",
}
with mock.patch("urllib.request.urlopen") as mock_urlopen:
with open(
os.path.abspath("tests/responses/gist_module_file.json"), "rb"
) as fh:
mock_urlopen.return_value = fh
loader._install_gist_module(config)
self.assertTrue(
os.path.isfile(os.path.join(config["install_path"], "__init__.py"))
)
shutil.rmtree(config["install_path"], onerror=del_rw)
def test_install_gist_module_notebook(self):
opsdroid, loader = self.setup()
config = {
"name": "ping",
"type": "skill",
"install_path": os.path.join(self._tmp_dir, "test_gist_module_file"),
"gist": "https://gist.github.com/jacobtomlinson/"
"c9852fa17d3463acc14dca1217d911f6",
}
with mock.patch("urllib.request.urlopen") as mock_urlopen:
with open(
os.path.abspath("tests/responses/gist_module_notebook.json"), "rb"
) as fh:
mock_urlopen.return_value = fh
loader._install_gist_module(config)
self.assertTrue(
os.path.isfile(os.path.join(config["install_path"], "__init__.py"))
)
shutil.rmtree(config["install_path"], onerror=del_rw)
| 39.257822
| 88
| 0.611439
|
6951b0f009763906bab3ee42b8566ea0081be9cd
| 23,151
|
py
|
Python
|
tornado/netutil.py
|
louis-she/tornado
|
f7d94d0e8a57f51ffc0aac5d55ed7bf9f9936b4d
|
[
"Apache-2.0"
] | 6
|
2015-05-11T18:41:50.000Z
|
2018-08-26T14:30:24.000Z
|
tornado/netutil.py
|
louis-she/tornado
|
f7d94d0e8a57f51ffc0aac5d55ed7bf9f9936b4d
|
[
"Apache-2.0"
] | null | null | null |
tornado/netutil.py
|
louis-she/tornado
|
f7d94d0e8a57f51ffc0aac5d55ed7bf9f9936b4d
|
[
"Apache-2.0"
] | 2
|
2016-07-29T19:27:54.000Z
|
2020-10-09T01:09:08.000Z
|
#
# Copyright 2011 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Miscellaneous network utility code."""
import concurrent.futures
import errno
import os
import sys
import socket
import ssl
import stat
from tornado.concurrent import dummy_executor, run_on_executor
from tornado.ioloop import IOLoop
from tornado.platform.auto import set_close_exec
from tornado.util import Configurable, errno_from_exception
from typing import List, Callable, Any, Type, Dict, Union, Tuple, Awaitable, Optional
# Note that the naming of ssl.Purpose is confusing; the purpose
# of a context is to authentiate the opposite side of the connection.
_client_ssl_defaults = ssl.create_default_context(ssl.Purpose.SERVER_AUTH)
_server_ssl_defaults = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
if hasattr(ssl, "OP_NO_COMPRESSION"):
# See netutil.ssl_options_to_context
_client_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
_server_ssl_defaults.options |= ssl.OP_NO_COMPRESSION
# ThreadedResolver runs getaddrinfo on a thread. If the hostname is unicode,
# getaddrinfo attempts to import encodings.idna. If this is done at
# module-import time, the import lock is already held by the main thread,
# leading to deadlock. Avoid it by caching the idna encoder on the main
# thread now.
u"foo".encode("idna")
# For undiagnosed reasons, 'latin1' codec may also need to be preloaded.
u"foo".encode("latin1")
# Default backlog used when calling sock.listen()
_DEFAULT_BACKLOG = 128
def bind_sockets(
port: int,
address: Optional[str] = None,
family: socket.AddressFamily = socket.AF_UNSPEC,
backlog: int = _DEFAULT_BACKLOG,
flags: Optional[int] = None,
reuse_port: bool = False,
) -> List[socket.socket]:
"""Creates listening sockets bound to the given port and address.
Returns a list of socket objects (multiple sockets are returned if
the given address maps to multiple IP addresses, which is most common
for mixed IPv4 and IPv6 use).
Address may be either an IP address or hostname. If it's a hostname,
the server will listen on all IP addresses associated with the
name. Address may be an empty string or None to listen on all
available interfaces. Family may be set to either `socket.AF_INET`
or `socket.AF_INET6` to restrict to IPv4 or IPv6 addresses, otherwise
both will be used if available.
The ``backlog`` argument has the same meaning as for
`socket.listen() <socket.socket.listen>`.
``flags`` is a bitmask of AI_* flags to `~socket.getaddrinfo`, like
``socket.AI_PASSIVE | socket.AI_NUMERICHOST``.
``reuse_port`` option sets ``SO_REUSEPORT`` option for every socket
in the list. If your platform doesn't support this option ValueError will
be raised.
"""
if reuse_port and not hasattr(socket, "SO_REUSEPORT"):
raise ValueError("the platform doesn't support SO_REUSEPORT")
sockets = []
if address == "":
address = None
if not socket.has_ipv6 and family == socket.AF_UNSPEC:
# Python can be compiled with --disable-ipv6, which causes
# operations on AF_INET6 sockets to fail, but does not
# automatically exclude those results from getaddrinfo
# results.
# http://bugs.python.org/issue16208
family = socket.AF_INET
if flags is None:
flags = socket.AI_PASSIVE
bound_port = None
unique_addresses = set() # type: set
for res in sorted(
socket.getaddrinfo(address, port, family, socket.SOCK_STREAM, 0, flags),
key=lambda x: x[0],
):
if res in unique_addresses:
continue
unique_addresses.add(res)
af, socktype, proto, canonname, sockaddr = res
if (
sys.platform == "darwin"
and address == "localhost"
and af == socket.AF_INET6
and sockaddr[3] != 0
):
# Mac OS X includes a link-local address fe80::1%lo0 in the
# getaddrinfo results for 'localhost'. However, the firewall
# doesn't understand that this is a local address and will
# prompt for access (often repeatedly, due to an apparent
# bug in its ability to remember granting access to an
# application). Skip these addresses.
continue
try:
sock = socket.socket(af, socktype, proto)
except socket.error as e:
if errno_from_exception(e) == errno.EAFNOSUPPORT:
continue
raise
set_close_exec(sock.fileno())
if os.name != "nt":
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error as e:
if errno_from_exception(e) != errno.ENOPROTOOPT:
# Hurd doesn't support SO_REUSEADDR.
raise
if reuse_port:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
if af == socket.AF_INET6:
# On linux, ipv6 sockets accept ipv4 too by default,
# but this makes it impossible to bind to both
# 0.0.0.0 in ipv4 and :: in ipv6. On other systems,
# separate sockets *must* be used to listen for both ipv4
# and ipv6. For consistency, always disable ipv4 on our
# ipv6 sockets and use a separate ipv4 socket when needed.
#
# Python 2.x on windows doesn't have IPPROTO_IPV6.
if hasattr(socket, "IPPROTO_IPV6"):
sock.setsockopt(socket.IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
# automatic port allocation with port=None
# should bind on the same port on IPv4 and IPv6
host, requested_port = sockaddr[:2]
if requested_port == 0 and bound_port is not None:
sockaddr = tuple([host, bound_port] + list(sockaddr[2:]))
sock.setblocking(False)
try:
sock.bind(sockaddr)
except OSError as e:
if (
errno_from_exception(e) == errno.EADDRNOTAVAIL
and address == "localhost"
and sockaddr[0] == "::1"
):
# On some systems (most notably docker with default
# configurations), ipv6 is partially disabled:
# socket.has_ipv6 is true, we can create AF_INET6
# sockets, and getaddrinfo("localhost", ...,
# AF_PASSIVE) resolves to ::1, but we get an error
# when binding.
#
# Swallow the error, but only for this specific case.
# If EADDRNOTAVAIL occurs in other situations, it
# might be a real problem like a typo in a
# configuration.
sock.close()
continue
else:
raise
bound_port = sock.getsockname()[1]
sock.listen(backlog)
sockets.append(sock)
return sockets
if hasattr(socket, "AF_UNIX"):
def bind_unix_socket(
file: str, mode: int = 0o600, backlog: int = _DEFAULT_BACKLOG
) -> socket.socket:
"""Creates a listening unix socket.
If a socket with the given name already exists, it will be deleted.
If any other file with that name exists, an exception will be
raised.
Returns a socket object (not a list of socket objects like
`bind_sockets`)
"""
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
set_close_exec(sock.fileno())
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
except socket.error as e:
if errno_from_exception(e) != errno.ENOPROTOOPT:
# Hurd doesn't support SO_REUSEADDR
raise
sock.setblocking(False)
try:
st = os.stat(file)
except FileNotFoundError:
pass
else:
if stat.S_ISSOCK(st.st_mode):
os.remove(file)
else:
raise ValueError("File %s exists and is not a socket", file)
sock.bind(file)
os.chmod(file, mode)
sock.listen(backlog)
return sock
def add_accept_handler(
sock: socket.socket, callback: Callable[[socket.socket, Any], None]
) -> Callable[[], None]:
"""Adds an `.IOLoop` event handler to accept new connections on ``sock``.
When a connection is accepted, ``callback(connection, address)`` will
be run (``connection`` is a socket object, and ``address`` is the
address of the other end of the connection). Note that this signature
is different from the ``callback(fd, events)`` signature used for
`.IOLoop` handlers.
A callable is returned which, when called, will remove the `.IOLoop`
event handler and stop processing further incoming connections.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. versionchanged:: 5.0
A callable is returned (``None`` was returned before).
"""
io_loop = IOLoop.current()
removed = [False]
def accept_handler(fd: socket.socket, events: int) -> None:
# More connections may come in while we're handling callbacks;
# to prevent starvation of other tasks we must limit the number
# of connections we accept at a time. Ideally we would accept
# up to the number of connections that were waiting when we
# entered this method, but this information is not available
# (and rearranging this method to call accept() as many times
# as possible before running any callbacks would have adverse
# effects on load balancing in multiprocess configurations).
# Instead, we use the (default) listen backlog as a rough
# heuristic for the number of connections we can reasonably
# accept at once.
for i in range(_DEFAULT_BACKLOG):
if removed[0]:
# The socket was probably closed
return
try:
connection, address = sock.accept()
except BlockingIOError:
# EWOULDBLOCK indicates we have accepted every
# connection that is available.
return
except ConnectionAbortedError:
# ECONNABORTED indicates that there was a connection
# but it was closed while still in the accept queue.
# (observed on FreeBSD).
continue
set_close_exec(connection.fileno())
callback(connection, address)
def remove_handler() -> None:
io_loop.remove_handler(sock)
removed[0] = True
io_loop.add_handler(sock, accept_handler, IOLoop.READ)
return remove_handler
def is_valid_ip(ip: str) -> bool:
"""Returns ``True`` if the given string is a well-formed IP address.
Supports IPv4 and IPv6.
"""
if not ip or "\x00" in ip:
# getaddrinfo resolves empty strings to localhost, and truncates
# on zero bytes.
return False
try:
res = socket.getaddrinfo(
ip, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0, socket.AI_NUMERICHOST
)
return bool(res)
except socket.gaierror as e:
if e.args[0] == socket.EAI_NONAME:
return False
raise
return True
class Resolver(Configurable):
"""Configurable asynchronous DNS resolver interface.
By default, a blocking implementation is used (which simply calls
`socket.getaddrinfo`). An alternative implementation can be
chosen with the `Resolver.configure <.Configurable.configure>`
class method::
Resolver.configure('tornado.netutil.ThreadedResolver')
The implementations of this interface included with Tornado are
* `tornado.netutil.DefaultExecutorResolver`
* `tornado.netutil.BlockingResolver` (deprecated)
* `tornado.netutil.ThreadedResolver` (deprecated)
* `tornado.netutil.OverrideResolver`
* `tornado.platform.twisted.TwistedResolver`
* `tornado.platform.caresresolver.CaresResolver`
.. versionchanged:: 5.0
The default implementation has changed from `BlockingResolver` to
`DefaultExecutorResolver`.
"""
@classmethod
def configurable_base(cls) -> Type["Resolver"]:
return Resolver
@classmethod
def configurable_default(cls) -> Type["Resolver"]:
return DefaultExecutorResolver
def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> Awaitable[List[Tuple[int, Any]]]:
"""Resolves an address.
The ``host`` argument is a string which may be a hostname or a
literal IP address.
Returns a `.Future` whose result is a list of (family,
address) pairs, where address is a tuple suitable to pass to
`socket.connect <socket.socket.connect>` (i.e. a ``(host,
port)`` pair for IPv4; additional fields may be present for
IPv6). If a ``callback`` is passed, it will be run with the
result as an argument when it is complete.
:raises IOError: if the address cannot be resolved.
.. versionchanged:: 4.4
Standardized all implementations to raise `IOError`.
.. versionchanged:: 6.0 The ``callback`` argument was removed.
Use the returned awaitable object instead.
"""
raise NotImplementedError()
def close(self) -> None:
"""Closes the `Resolver`, freeing any resources used.
.. versionadded:: 3.1
"""
pass
def _resolve_addr(
host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> List[Tuple[int, Any]]:
# On Solaris, getaddrinfo fails if the given port is not found
# in /etc/services and no socket type is given, so we must pass
# one here. The socket type used here doesn't seem to actually
# matter (we discard the one we get back in the results),
# so the addresses we return should still be usable with SOCK_DGRAM.
addrinfo = socket.getaddrinfo(host, port, family, socket.SOCK_STREAM)
results = []
for fam, socktype, proto, canonname, address in addrinfo:
results.append((fam, address))
return results # type: ignore
class DefaultExecutorResolver(Resolver):
"""Resolver implementation using `.IOLoop.run_in_executor`.
.. versionadded:: 5.0
"""
async def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> List[Tuple[int, Any]]:
result = await IOLoop.current().run_in_executor(
None, _resolve_addr, host, port, family
)
return result
class ExecutorResolver(Resolver):
"""Resolver implementation using a `concurrent.futures.Executor`.
Use this instead of `ThreadedResolver` when you require additional
control over the executor being used.
The executor will be shut down when the resolver is closed unless
``close_resolver=False``; use this if you want to reuse the same
executor elsewhere.
.. versionchanged:: 5.0
The ``io_loop`` argument (deprecated since version 4.1) has been removed.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
"""
def initialize(
self,
executor: Optional[concurrent.futures.Executor] = None,
close_executor: bool = True,
) -> None:
self.io_loop = IOLoop.current()
if executor is not None:
self.executor = executor
self.close_executor = close_executor
else:
self.executor = dummy_executor
self.close_executor = False
def close(self) -> None:
if self.close_executor:
self.executor.shutdown()
self.executor = None # type: ignore
@run_on_executor
def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> List[Tuple[int, Any]]:
return _resolve_addr(host, port, family)
class BlockingResolver(ExecutorResolver):
"""Default `Resolver` implementation, using `socket.getaddrinfo`.
The `.IOLoop` will be blocked during the resolution, although the
callback will not be run until the next `.IOLoop` iteration.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
"""
def initialize(self) -> None: # type: ignore
super(BlockingResolver, self).initialize()
class ThreadedResolver(ExecutorResolver):
"""Multithreaded non-blocking `Resolver` implementation.
Requires the `concurrent.futures` package to be installed
(available in the standard library since Python 3.2,
installable with ``pip install futures`` in older versions).
The thread pool size can be configured with::
Resolver.configure('tornado.netutil.ThreadedResolver',
num_threads=10)
.. versionchanged:: 3.1
All ``ThreadedResolvers`` share a single thread pool, whose
size is set by the first one to be created.
.. deprecated:: 5.0
The default `Resolver` now uses `.IOLoop.run_in_executor`; use that instead
of this class.
"""
_threadpool = None # type: ignore
_threadpool_pid = None # type: int
def initialize(self, num_threads: int = 10) -> None: # type: ignore
threadpool = ThreadedResolver._create_threadpool(num_threads)
super(ThreadedResolver, self).initialize(
executor=threadpool, close_executor=False
)
@classmethod
def _create_threadpool(
cls, num_threads: int
) -> concurrent.futures.ThreadPoolExecutor:
pid = os.getpid()
if cls._threadpool_pid != pid:
# Threads cannot survive after a fork, so if our pid isn't what it
# was when we created the pool then delete it.
cls._threadpool = None
if cls._threadpool is None:
cls._threadpool = concurrent.futures.ThreadPoolExecutor(num_threads)
cls._threadpool_pid = pid
return cls._threadpool
class OverrideResolver(Resolver):
"""Wraps a resolver with a mapping of overrides.
This can be used to make local DNS changes (e.g. for testing)
without modifying system-wide settings.
The mapping can be in three formats::
{
# Hostname to host or ip
"example.com": "127.0.1.1",
# Host+port to host+port
("login.example.com", 443): ("localhost", 1443),
# Host+port+address family to host+port
("login.example.com", 443, socket.AF_INET6): ("::1", 1443),
}
.. versionchanged:: 5.0
Added support for host-port-family triplets.
"""
def initialize(self, resolver: Resolver, mapping: dict) -> None:
self.resolver = resolver
self.mapping = mapping
def close(self) -> None:
self.resolver.close()
def resolve(
self, host: str, port: int, family: socket.AddressFamily = socket.AF_UNSPEC
) -> Awaitable[List[Tuple[int, Any]]]:
if (host, port, family) in self.mapping:
host, port = self.mapping[(host, port, family)]
elif (host, port) in self.mapping:
host, port = self.mapping[(host, port)]
elif host in self.mapping:
host = self.mapping[host]
return self.resolver.resolve(host, port, family)
# These are the keyword arguments to ssl.wrap_socket that must be translated
# to their SSLContext equivalents (the other arguments are still passed
# to SSLContext.wrap_socket).
_SSL_CONTEXT_KEYWORDS = frozenset(
["ssl_version", "certfile", "keyfile", "cert_reqs", "ca_certs", "ciphers"]
)
def ssl_options_to_context(
ssl_options: Union[Dict[str, Any], ssl.SSLContext]
) -> ssl.SSLContext:
"""Try to convert an ``ssl_options`` dictionary to an
`~ssl.SSLContext` object.
The ``ssl_options`` dictionary contains keywords to be passed to
`ssl.wrap_socket`. In Python 2.7.9+, `ssl.SSLContext` objects can
be used instead. This function converts the dict form to its
`~ssl.SSLContext` equivalent, and may be used when a component which
accepts both forms needs to upgrade to the `~ssl.SSLContext` version
to use features like SNI or NPN.
"""
if isinstance(ssl_options, ssl.SSLContext):
return ssl_options
assert isinstance(ssl_options, dict)
assert all(k in _SSL_CONTEXT_KEYWORDS for k in ssl_options), ssl_options
# Can't use create_default_context since this interface doesn't
# tell us client vs server.
context = ssl.SSLContext(ssl_options.get("ssl_version", ssl.PROTOCOL_SSLv23))
if "certfile" in ssl_options:
context.load_cert_chain(
ssl_options["certfile"], ssl_options.get("keyfile", None)
)
if "cert_reqs" in ssl_options:
context.verify_mode = ssl_options["cert_reqs"]
if "ca_certs" in ssl_options:
context.load_verify_locations(ssl_options["ca_certs"])
if "ciphers" in ssl_options:
context.set_ciphers(ssl_options["ciphers"])
if hasattr(ssl, "OP_NO_COMPRESSION"):
# Disable TLS compression to avoid CRIME and related attacks.
# This constant depends on openssl version 1.0.
# TODO: Do we need to do this ourselves or can we trust
# the defaults?
context.options |= ssl.OP_NO_COMPRESSION
return context
def ssl_wrap_socket(
socket: socket.socket,
ssl_options: Union[Dict[str, Any], ssl.SSLContext],
server_hostname: Optional[str] = None,
**kwargs: Any
) -> ssl.SSLSocket:
"""Returns an ``ssl.SSLSocket`` wrapping the given socket.
``ssl_options`` may be either an `ssl.SSLContext` object or a
dictionary (as accepted by `ssl_options_to_context`). Additional
keyword arguments are passed to ``wrap_socket`` (either the
`~ssl.SSLContext` method or the `ssl` module function as
appropriate).
"""
context = ssl_options_to_context(ssl_options)
if ssl.HAS_SNI:
# In python 3.4, wrap_socket only accepts the server_hostname
# argument if HAS_SNI is true.
# TODO: add a unittest (python added server-side SNI support in 3.4)
# In the meantime it can be manually tested with
# python3 -m tornado.httpclient https://sni.velox.ch
return context.wrap_socket(socket, server_hostname=server_hostname, **kwargs)
else:
return context.wrap_socket(socket, **kwargs)
| 37.100962
| 85
| 0.650253
|
b4b586e14d2c61dd3a6f68b3ca5dc1f752860028
| 11,940
|
py
|
Python
|
homeassistant/components/mqtt/device_trigger.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 4
|
2018-03-06T20:21:56.000Z
|
2022-03-02T11:47:33.000Z
|
homeassistant/components/mqtt/device_trigger.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 69
|
2020-08-04T09:03:43.000Z
|
2022-03-31T06:13:01.000Z
|
homeassistant/components/mqtt/device_trigger.py
|
basicpail/core
|
5cc54618c5af3f75c08314bf2375cc7ac40d2b7e
|
[
"Apache-2.0"
] | 1
|
2020-12-13T08:27:33.000Z
|
2020-12-13T08:27:33.000Z
|
"""Provides device automations for MQTT."""
from __future__ import annotations
import logging
from typing import Any, Callable
import attr
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import DEVICE_TRIGGER_BASE_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_PLATFORM,
CONF_TYPE,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.typing import ConfigType
from . import CONF_PAYLOAD, CONF_QOS, DOMAIN, debug_info, trigger as mqtt_trigger
from .. import mqtt
from .const import ATTR_DISCOVERY_HASH, ATTR_DISCOVERY_TOPIC
from .discovery import MQTT_DISCOVERY_DONE, MQTT_DISCOVERY_UPDATED, clear_discovery_hash
from .mixins import (
CONF_CONNECTIONS,
CONF_IDENTIFIERS,
MQTT_ENTITY_DEVICE_INFO_SCHEMA,
cleanup_device_registry,
device_info_from_config,
validate_device_has_at_least_one_identifier,
)
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATION_TYPE = "automation_type"
CONF_DISCOVERY_ID = "discovery_id"
CONF_SUBTYPE = "subtype"
CONF_TOPIC = "topic"
DEFAULT_ENCODING = "utf-8"
DEVICE = "device"
MQTT_TRIGGER_BASE = {
# Trigger when MQTT message is received
CONF_PLATFORM: DEVICE,
CONF_DOMAIN: DOMAIN,
}
TRIGGER_SCHEMA = DEVICE_TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): DEVICE,
vol.Required(CONF_DOMAIN): DOMAIN,
vol.Required(CONF_DEVICE_ID): str,
vol.Required(CONF_DISCOVERY_ID): str,
vol.Required(CONF_TYPE): cv.string,
vol.Required(CONF_SUBTYPE): cv.string,
}
)
TRIGGER_DISCOVERY_SCHEMA = mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_AUTOMATION_TYPE): str,
vol.Required(CONF_DEVICE): MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_PAYLOAD, default=None): vol.Any(None, cv.string),
vol.Required(CONF_SUBTYPE): cv.string,
vol.Required(CONF_TOPIC): cv.string,
vol.Required(CONF_TYPE): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE, default=None): vol.Any(None, cv.string),
},
validate_device_has_at_least_one_identifier,
)
DEVICE_TRIGGERS = "mqtt_device_triggers"
@attr.s(slots=True)
class TriggerInstance:
"""Attached trigger settings."""
action: AutomationActionType = attr.ib()
automation_info: dict = attr.ib()
trigger: Trigger = attr.ib()
remove: CALLBACK_TYPE | None = attr.ib(default=None)
async def async_attach_trigger(self):
"""Attach MQTT trigger."""
mqtt_config = {
mqtt_trigger.CONF_PLATFORM: mqtt.DOMAIN,
mqtt_trigger.CONF_TOPIC: self.trigger.topic,
mqtt_trigger.CONF_ENCODING: DEFAULT_ENCODING,
mqtt_trigger.CONF_QOS: self.trigger.qos,
}
if self.trigger.payload:
mqtt_config[CONF_PAYLOAD] = self.trigger.payload
if self.trigger.value_template:
mqtt_config[CONF_VALUE_TEMPLATE] = self.trigger.value_template
mqtt_config = mqtt_trigger.TRIGGER_SCHEMA(mqtt_config)
if self.remove:
self.remove()
self.remove = await mqtt_trigger.async_attach_trigger(
self.trigger.hass,
mqtt_config,
self.action,
self.automation_info,
)
@attr.s(slots=True)
class Trigger:
"""Device trigger settings."""
device_id: str = attr.ib()
discovery_data: dict | None = attr.ib()
hass: HomeAssistant = attr.ib()
payload: str | None = attr.ib()
qos: int | None = attr.ib()
remove_signal: Callable[[], None] | None = attr.ib()
subtype: str = attr.ib()
topic: str | None = attr.ib()
type: str = attr.ib()
value_template: str | None = attr.ib()
trigger_instances: list[TriggerInstance] = attr.ib(factory=list)
async def add_trigger(self, action, automation_info):
"""Add MQTT trigger."""
instance = TriggerInstance(action, automation_info, self)
self.trigger_instances.append(instance)
if self.topic is not None:
# If we know about the trigger, subscribe to MQTT topic
await instance.async_attach_trigger()
@callback
def async_remove() -> None:
"""Remove trigger."""
if instance not in self.trigger_instances:
raise HomeAssistantError("Can't remove trigger twice")
if instance.remove:
instance.remove()
self.trigger_instances.remove(instance)
return async_remove
async def update_trigger(self, config, discovery_hash, remove_signal):
"""Update MQTT device trigger."""
self.remove_signal = remove_signal
self.type = config[CONF_TYPE]
self.subtype = config[CONF_SUBTYPE]
self.payload = config[CONF_PAYLOAD]
self.qos = config[CONF_QOS]
topic_changed = self.topic != config[CONF_TOPIC]
self.topic = config[CONF_TOPIC]
self.value_template = config[CONF_VALUE_TEMPLATE]
# Unsubscribe+subscribe if this trigger is in use and topic has changed
# If topic is same unsubscribe+subscribe will execute in the wrong order
# because unsubscribe is done with help of async_create_task
if topic_changed:
for trig in self.trigger_instances:
await trig.async_attach_trigger()
def detach_trigger(self):
"""Remove MQTT device trigger."""
# Mark trigger as unknown
self.topic = None
# Unsubscribe if this trigger is in use
for trig in self.trigger_instances:
if trig.remove:
trig.remove()
trig.remove = None
async def _update_device(hass, config_entry, config):
"""Update device registry."""
device_registry = await hass.helpers.device_registry.async_get_registry()
config_entry_id = config_entry.entry_id
device_info = device_info_from_config(config[CONF_DEVICE])
if config_entry_id is not None and device_info is not None:
device_info["config_entry_id"] = config_entry_id
device_registry.async_get_or_create(**device_info)
async def async_setup_trigger(hass, config, config_entry, discovery_data):
"""Set up the MQTT device trigger."""
config = TRIGGER_DISCOVERY_SCHEMA(config)
discovery_hash = discovery_data[ATTR_DISCOVERY_HASH]
discovery_id = discovery_hash[1]
remove_signal = None
async def discovery_update(payload):
"""Handle discovery update."""
_LOGGER.info(
"Got update for trigger with hash: %s '%s'", discovery_hash, payload
)
if not payload:
# Empty payload: Remove trigger
_LOGGER.info("Removing trigger: %s", discovery_hash)
debug_info.remove_trigger_discovery_data(hass, discovery_hash)
if discovery_id in hass.data[DEVICE_TRIGGERS]:
device_trigger = hass.data[DEVICE_TRIGGERS][discovery_id]
device_trigger.detach_trigger()
clear_discovery_hash(hass, discovery_hash)
remove_signal()
await cleanup_device_registry(hass, device.id)
else:
# Non-empty payload: Update trigger
_LOGGER.info("Updating trigger: %s", discovery_hash)
debug_info.update_trigger_discovery_data(hass, discovery_hash, payload)
config = TRIGGER_DISCOVERY_SCHEMA(payload)
await _update_device(hass, config_entry, config)
device_trigger = hass.data[DEVICE_TRIGGERS][discovery_id]
await device_trigger.update_trigger(config, discovery_hash, remove_signal)
async_dispatcher_send(hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None)
remove_signal = async_dispatcher_connect(
hass, MQTT_DISCOVERY_UPDATED.format(discovery_hash), discovery_update
)
await _update_device(hass, config_entry, config)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get_device(
{(DOMAIN, id_) for id_ in config[CONF_DEVICE][CONF_IDENTIFIERS]},
{tuple(x) for x in config[CONF_DEVICE][CONF_CONNECTIONS]},
)
if device is None:
async_dispatcher_send(hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None)
return
if DEVICE_TRIGGERS not in hass.data:
hass.data[DEVICE_TRIGGERS] = {}
if discovery_id not in hass.data[DEVICE_TRIGGERS]:
hass.data[DEVICE_TRIGGERS][discovery_id] = Trigger(
hass=hass,
device_id=device.id,
discovery_data=discovery_data,
type=config[CONF_TYPE],
subtype=config[CONF_SUBTYPE],
topic=config[CONF_TOPIC],
payload=config[CONF_PAYLOAD],
qos=config[CONF_QOS],
remove_signal=remove_signal,
value_template=config[CONF_VALUE_TEMPLATE],
)
else:
await hass.data[DEVICE_TRIGGERS][discovery_id].update_trigger(
config, discovery_hash, remove_signal
)
debug_info.add_trigger_discovery_data(
hass, discovery_hash, discovery_data, device.id
)
async_dispatcher_send(hass, MQTT_DISCOVERY_DONE.format(discovery_hash), None)
async def async_device_removed(hass: HomeAssistant, device_id: str):
"""Handle the removal of a device."""
triggers = await async_get_triggers(hass, device_id)
for trig in triggers:
device_trigger = hass.data[DEVICE_TRIGGERS].pop(trig[CONF_DISCOVERY_ID])
if device_trigger:
discovery_hash = device_trigger.discovery_data[ATTR_DISCOVERY_HASH]
discovery_topic = device_trigger.discovery_data[ATTR_DISCOVERY_TOPIC]
debug_info.remove_trigger_discovery_data(hass, discovery_hash)
device_trigger.detach_trigger()
clear_discovery_hash(hass, discovery_hash)
device_trigger.remove_signal()
mqtt.publish(
hass,
discovery_topic,
"",
retain=True,
)
async def async_get_triggers(
hass: HomeAssistant, device_id: str
) -> list[dict[str, Any]]:
"""List device triggers for MQTT devices."""
triggers: list[dict] = []
if DEVICE_TRIGGERS not in hass.data:
return triggers
for discovery_id, trig in hass.data[DEVICE_TRIGGERS].items():
if trig.device_id != device_id or trig.topic is None:
continue
trigger = {
**MQTT_TRIGGER_BASE,
"device_id": device_id,
"type": trig.type,
"subtype": trig.subtype,
"discovery_id": discovery_id,
}
triggers.append(trigger)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
if DEVICE_TRIGGERS not in hass.data:
hass.data[DEVICE_TRIGGERS] = {}
device_id = config[CONF_DEVICE_ID]
discovery_id = config[CONF_DISCOVERY_ID]
if discovery_id not in hass.data[DEVICE_TRIGGERS]:
hass.data[DEVICE_TRIGGERS][discovery_id] = Trigger(
hass=hass,
device_id=device_id,
discovery_data=None,
remove_signal=None,
type=config[CONF_TYPE],
subtype=config[CONF_SUBTYPE],
topic=None,
payload=None,
qos=None,
value_template=None,
)
return await hass.data[DEVICE_TRIGGERS][discovery_id].add_trigger(
action, automation_info
)
| 34.810496
| 88
| 0.674707
|
de5004bf203f3396abf84a4a4bf4f35d94faff27
| 4,931
|
py
|
Python
|
eventdriven/rltools/tfutil.py
|
sisl/rllab
|
932d6cc0911071443d1d43277ee011f4d832631e
|
[
"MIT"
] | 9
|
2018-04-04T10:46:31.000Z
|
2020-05-19T09:47:36.000Z
|
eventdriven/rltools/tfutil.py
|
sisl/rllab
|
932d6cc0911071443d1d43277ee011f4d832631e
|
[
"MIT"
] | null | null | null |
eventdriven/rltools/tfutil.py
|
sisl/rllab
|
932d6cc0911071443d1d43277ee011f4d832631e
|
[
"MIT"
] | 2
|
2021-12-11T22:53:05.000Z
|
2021-12-14T08:18:16.000Z
|
import numpy as np
import tensorflow as tf
from rltools import util
def logsumexp(a, axis, name=None):
"""
Like scipy.misc.logsumexp with keepdims=True
(does NOT eliminate the singleton axis)
"""
with tf.op_scope([a, axis], name, 'logsumexp') as scope:
a = tf.convert_to_tensor(a, name='a')
axis = tf.convert_to_tensor(axis, name='axis')
amax = tf.reduce_max(a, axis, keep_dims=True)
shifted_result = tf.log(tf.reduce_sum(tf.exp(a - amax), axis, keep_dims=True))
return tf.add(shifted_result, amax, name=scope)
def lookup_last_idx(a, inds, name=None):
"""
Looks up indices in a. e.g. a[[1, 2, 3]] = [a[1], a[2], a[3]]
a is a d1 x d2 ... dn tensor
inds is a d1 x d2 ... d(n-1) tensor of integers
returns the tensor
out[i_1,...,i_{n-1}] = a[i_1,...,i_{n-1}, inds[i_1,...,i_{n-1}]]
"""
with tf.op_scope([a, inds], name, 'lookup_last_idx') as scope:
a = tf.convert_to_tensor(a, name='a')
inds = tf.convert_to_tensor(inds, name='inds')
# Flatten the arrays
ashape, indsshape = tf.shape(a), tf.shape(inds)
aflat, indsflat = tf.reshape(a, [-1]), tf.reshape(inds, [-1])
# Compute the indices corresponding to inds in the flattened array
# TODO Causes UserWarning: Converting sparse IndexedSlices to a dense Tensor of unknown shape.
delta = tf.gather(ashape, tf.size(ashape) - 1) # i.e. delta = ashape[-1],
aflatinds = tf.range(0, limit=tf.size(a), delta=delta) + indsflat
# Look up the desired elements in the flattened array, and reshape
# to the original shape
return tf.reshape(tf.gather(aflat, aflatinds), indsshape, name=scope)
def flatcat(arrays, name=None):
"""
Flattens arrays and concatenates them in order.
"""
with tf.op_scope(arrays, name, 'flatcat') as scope:
return tf.concat(0, [tf.reshape(a, [-1]) for a in arrays], name=scope)
def fixedgradients(loss, params):
"""
Replace None by zero shaped as params
"""
grads = tf.gradients(loss, xs=params)
for idx, (grad, param) in enumerate(zip(grads, params)):
if grad is None:
grads[idx] = tf.zeros_like(param)
return grads
def unflatten_into_tensors(flatparams_P, output_shapes, name=None):
"""
Unflattens a vector produced by flatcat into a list of tensors of the specified shapes.
"""
with tf.op_scope([flatparams_P], name, 'unflatten_into_tensors') as scope:
outputs = []
curr_pos = 0
for shape in output_shapes:
size = np.prod(shape).astype('int')
flatval = flatparams_P[curr_pos:curr_pos + size]
outputs.append(tf.reshape(flatval, shape))
curr_pos += size
assert curr_pos == flatparams_P.get_shape().num_elements(), "{} != {}".format(
curr_pos, flatparams_P.get_shape().num_elements())
return tf.tuple(outputs, name=scope)
def unflatten_into_vars(flatparams_P, param_vars, name=None):
"""
Unflattens a vector produced by flatcat into the original variables
"""
with tf.op_scope([flatparams_P] + param_vars, name, 'unflatten_into_vars') as scope:
tensors = unflatten_into_tensors(flatparams_P,
[v.get_shape().as_list() for v in param_vars])
return tf.group(*[v.assign(t) for v, t in util.safezip(param_vars, tensors)], name=scope)
def subsample_feed(feed, frac):
assert isinstance(feed, tuple) and len(feed) >= 1
assert isinstance(frac, float) and 0. < frac <= 1.
l = feed[0].shape[0]
assert all(a.shape[0] == l for a in feed), 'All feed entries must have the same length'
subsamp_inds = np.random.choice(l, size=int(frac * l))
return tuple(a[subsamp_inds, ...] for a in feed)
def function(inputs, outputs, updates=None):
if isinstance(outputs, list):
return TFFunction(inputs, outputs, updates)
elif isinstance(outputs, dict):
f = TFFunction(inputs, outputs.values(), updates)
return lambda *inputs, **kwargs: dict(zip(outputs.keys(), f(*inputs, **kwargs)))
else:
f = TFFunction(inputs, [outputs], updates)
return lambda *inputs, **kwargs: f(*inputs, **kwargs)[0]
class TFFunction(object):
def __init__(self, inputs, outputs, updates):
self._inputs = inputs
self._outputs = outputs
self._updates = [] if updates is None else updates
def __call__(self, *inputs_, **kwargs):
assert len(inputs_) == len(self._inputs)
feed_dict = dict(zip(self._inputs, inputs_))
sess = kwargs.pop('sess', tf.get_default_session())
results = sess.run(self._outputs + self._updates, feed_dict=feed_dict)
if any(result is not None and np.isnan(result).any() for result in results):
raise RuntimeError("NaN encountered")
return results[:len(self._outputs)]
| 37.930769
| 102
| 0.635976
|
c0fda86d8e5bfabe7337a5c81a3157b98644593b
| 349
|
py
|
Python
|
foursight_core/checks/helpers/confchecks.py
|
4dn-dcic/foursight-core
|
2e5ea594d38d04ad58f63ee42e5fb4b920bfb63c
|
[
"MIT"
] | null | null | null |
foursight_core/checks/helpers/confchecks.py
|
4dn-dcic/foursight-core
|
2e5ea594d38d04ad58f63ee42e5fb4b920bfb63c
|
[
"MIT"
] | 3
|
2021-08-11T07:09:24.000Z
|
2022-02-16T18:58:45.000Z
|
foursight_core/checks/helpers/confchecks.py
|
4dn-dcic/foursight-core
|
2e5ea594d38d04ad58f63ee42e5fb4b920bfb63c
|
[
"MIT"
] | null | null | null |
# Create a new confchecks.py for a new project (e.g. foursight-cgap)
from ...decorators import Decorators
# replace placeholder_prefix with an actual foursight_prefix
deco = Decorators('placeholder_prefix')
CheckResult = deco.CheckResult
ActionResult = deco.ActionResult
check_function = deco.check_function
action_function = deco.action_function
| 34.9
| 68
| 0.819484
|
395a26f225e90ab6d0182b745330b1f40d7a0d95
| 3,050
|
py
|
Python
|
uol_redacoes_xml/crawler/commons.py
|
gpassero/UOLRedacoesXML
|
94b74fc91c4e7a6b582ebc3708aa0dca2ba12ca6
|
[
"Apache-2.0"
] | 24
|
2017-01-05T11:19:37.000Z
|
2021-11-26T16:49:00.000Z
|
uol_redacoes_xml/crawler/commons.py
|
gpassero/UOLRedacoesXML
|
94b74fc91c4e7a6b582ebc3708aa0dca2ba12ca6
|
[
"Apache-2.0"
] | null | null | null |
uol_redacoes_xml/crawler/commons.py
|
gpassero/UOLRedacoesXML
|
94b74fc91c4e7a6b582ebc3708aa0dca2ba12ca6
|
[
"Apache-2.0"
] | 8
|
2017-03-28T13:54:51.000Z
|
2021-06-27T11:51:55.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 28 23:35:21 2016
@author: Guilherme Passero <guilherme.passero0@gmail.com>
"""
from http.client import HTTPSConnection as httpc
from xml.etree.ElementTree import tostring
from xml.dom import minidom
from pyquery import PyQuery as pq
import html2text
import re
import bz2
import time
conn = {}
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_images = True
h.ignore_emphasis = True
h.body_width = False
def get_conn(host):
global conn
if not host in conn or conn[host].sock is None:
conn[host] = httpc(host, timeout=999999)
return conn[host]
def close_conns():
global conn
for key, conn_ in conn.items():
conn_.close()
def get_web_page_content(url):
time.sleep(0.1)
if not url or url == '' or url == '?': return False
host, page = re.sub('.*http.?://', '', url).split('/', 1)
if not host: host = 'educacao.uol.com.br'
conn = get_conn(host)
print('Requesting ' + page, end='')
try:
conn.request('GET', '/'+page)
print(' OK', end='')
response = conn.getresponse()
print(' OK', end='')
if response.status == 200:
content = response.read()
print(' OK')
else:
content = False
print(' ERROR')
print(response.status, response.reason)
except:
print('Error connecting to ' + page)
return ''
# conn.close()
return content
def html2text(html):
return h.handle(html)
def handle_essay_content(html):
d = pq(html)
if d.text() == '':
return '', '', [], ''
errors = d.find('u').map(lambda i, e: (pq(e).text(), pq(e).next().text()))
d.remove('h1, h2, h3, h4')
original = h.handle(d.remove('.certo, .texto-corrigido').html())
# Remove suggestions that were not put inside "span.text-corrigido"
original = re.sub(r' \[(.*?)\]([.?!,])', r'\2', original)
original = re.sub(r'\[(.*?)\][ ]?', '', original)
d = pq(html)
d.remove('h1, h2, h3, h4')
d.find('u').map(lambda i, e: pq(e).text(pq(e).next().text().replace('[', '').replace(']', '')))
d.remove('.certo, .texto-corrigido')
fixed = h.handle(d.html())
d = pq(html)
d.remove('h1, h2, h3, h4')
d.find('.certo, .texto-corrigido').map(lambda i, e: pq(e).text('['+pq(e).text()+']'))
d.find('u').map(lambda i, e: pq(e).text('#-'+pq(e).text()+'-#'))
d.find('.erro, .texto-errado, u').map(lambda i, e: pq(e).text('*-'+pq(e).text()+'-*'))
review = h.handle(d.html())
return original, fixed, errors, review
def write_to_file(root, filename='essays.xml'):
xml = minidom.parseString(tostring(root)).toprettyxml(indent="\t")
# xml = re.sub('>[\r\n ]*', '>', xml)
# xml = re.sub('[\r\n ]*<', '<', xml)
xml = re.sub('[ ]*[\r\n]+[ ]*', '\n', xml)
xml = re.sub('\n[\n ]+', '\n', xml)
with open(filename, 'wt', encoding = 'utf-8') as f:
f.write(xml)
with bz2.open(filename+'.bz2', 'wt', encoding='utf-8') as f:
f.write(xml)
| 27.232143
| 99
| 0.564262
|
de5265ab6bdf90b564421b43d429b5507de7b1d4
| 6,637
|
py
|
Python
|
cat/apps/competition/views.py
|
oyusec/oyusec.github.io
|
9f9e81a953d7d624509bf055296b18da6ae769a9
|
[
"MIT"
] | 6
|
2021-01-19T16:01:05.000Z
|
2021-04-21T09:41:51.000Z
|
cat/apps/competition/views.py
|
ByamB4/oyusec.github.io
|
13094c310b926e0922fae0eb95e73d0c2774f574
|
[
"MIT"
] | 12
|
2021-05-03T01:44:16.000Z
|
2021-07-04T09:36:44.000Z
|
cat/apps/competition/views.py
|
oyusec/oyusec.github.io
|
9f9e81a953d7d624509bf055296b18da6ae769a9
|
[
"MIT"
] | 2
|
2021-12-09T01:25:30.000Z
|
2022-01-10T23:37:59.000Z
|
from django.db import models
from django.shortcuts import get_object_or_404
from django.utils.timezone import localdate
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.renderers import JSONRenderer
from rest_framework.response import Response
from rest_framework.permissions import AllowAny, IsAuthenticated
from apps.core.models import (
BaseUser,
BaseUserProfile
)
from apps.core.utils import *
from apps.ctf.models import (
Challenge,
Solve,
Submission
)
from apps.api.views import BaseView
from .models import (
CompetitionAuthor,
CompetitionUser,
Competition,
)
from apps.ctf.consts import *
from .utils import *
from .consts import *
class Competitions(BaseView):
def get(self, request):
response = self.serialize()
return Response({
'success': True,
'data': response
})
def serialize(self):
result = {
'live': [],
'coming': [],
'archive': []
}
for _ in Competition.objects.all():
result[_.status].append({
'name': _.name,
'description': _.description,
'id': _.uuid,
'photo': _.photo,
'slug': _.slug,
'start_date': convert_to_localtime(_.start_date),
'end_date': convert_to_localtime(_.end_date),
})
return result
class CompetitionView(BaseView):
def get(self, request, slug):
competition = get_object_or_404(Competition, slug=slug)
response = self.serialize(competition)
return Response({'success': True, 'data': response})
def serialize(self, competition):
challs = Challenge.objects.filter(
competition=competition, state=STATE_VISIBLE)
authors = []
for _ in CompetitionAuthor.objects.filter(
competition=competition).values('user__username', 'user__photo'):
authors.append({
'username': _['user__username'],
'photo': _['user__photo'],
})
result = {
'name': competition.name,
'description': competition.description,
'slug': competition.slug,
'photo': competition.photo,
'rule': competition.rule,
'prize': competition.prize,
'location': competition.location,
'weight': competition.weight,
'enrollment': get_enrollment(competition.enrollment),
'start_date': convert_to_localtime(competition.start_date),
'end_date': convert_to_localtime(competition.end_date),
'status': get_status(competition.status),
'max_score': challs.aggregate(models.Sum('value'))['value__sum'],
'submission_count': Submission.objects.filter(challenge__in=challs).count(),
'user_count': CompetitionUser.objects.filter(competition=competition).count(),
'authors': authors,
}
return result
class CompetitionChallenges(BaseView):
def get(self, request, slug):
competition = get_object_or_404(Competition, slug=slug)
response = self.serialize(competition)
return Response(response)
def serialize(self, competition):
challenges = []
if competition.status == COMPETITION_COMING:
return {
'success': False
}
for challenge in Challenge.objects.filter(competition=competition, state__contains=STATE_VISIBLE):
challenges.append({
'author': {
'username': challenge.user.username,
'slug': challenge.user.slug,
},
'name': challenge.name,
'value': challenge.value,
'description': challenge.description,
'id': challenge.uuid,
'status': 'unsolved',
'state': challenge.state,
'category': challenge.category,
'competition': True
})
# if challenge.category not in ret.keys():
# ret[challenge.category] = []
# ret[challenge.category].append(data)
return {
'success': True,
'data': challenges,
}
class CompetitionChallengesSolves(BaseView):
def get(self, request, slug):
competition = get_object_or_404(Competition, slug=slug)
challenges = Challenge.objects.filter(
competition=competition,
state=STATE_VISIBLE
)
response = self.serialize(challenges)
return Response({'success': True, 'data': response})
def serialize(self, challenges):
ret = []
for challenge in challenges:
solves = Solve.objects.filter(challenge=challenge).count()
ret.append({
'challengeID': challenge.uuid,
'solves': solves
})
return ret
class CompetitionScoreboard(BaseView):
def get(self, request, slug):
competition = get_object_or_404(Competition, slug=slug)
standings = CompetitionUser.get_standings(_comp=competition)
return Response({'success': True, 'data': standings})
class CompetitionComing(BaseView):
def get(self, request):
comp = Competition.objects.filter(
status=COMPETITION_COMING).values('start_date', 'weight', 'name', 'enrollment', 'slug', 'uuid').order_by('start_date').first()
if not comp:
return Response({'success': False})
return Response({'success': True, 'data': {
'slug': comp['slug'],
'id': comp['uuid'],
'name': comp['name'],
'weight': comp['weight'],
'enrollment': comp['enrollment'],
'start_date': convert_to_localtime(comp['start_date']),
}})
class CompetitionUserSolves(BaseView):
def get(self, request, slug):
"""
If user not authenticated then returns []
"""
user = request.user
response = []
if not user.is_authenticated:
return Response({'success': True, 'data': response})
comp = get_object_or_404(Competition, slug=slug)
solves = Solve.objects.filter(
user=user, challenge__competition=comp, challenge__state__contains=STATE_VISIBLE)
for solve in solves:
response.append({
'challenge_id': solve.challenge.uuid
})
return Response({'success': True, 'data': response})
| 31.908654
| 138
| 0.588368
|
8fd9f08113c9e73466bce6832cacfb3c1f41ec0e
| 1,534
|
py
|
Python
|
s3_sqs_consumer.py
|
lucksjb/localstack-demo
|
9e4537ca4557154accfe56134d4cc8510dc1211d
|
[
"MIT"
] | null | null | null |
s3_sqs_consumer.py
|
lucksjb/localstack-demo
|
9e4537ca4557154accfe56134d4cc8510dc1211d
|
[
"MIT"
] | 2
|
2021-11-08T01:45:33.000Z
|
2021-11-08T01:46:37.000Z
|
s3_sqs_consumer.py
|
lucksjb/localstack-demo
|
9e4537ca4557154accfe56134d4cc8510dc1211d
|
[
"MIT"
] | 1
|
2021-12-07T17:51:07.000Z
|
2021-12-07T17:51:07.000Z
|
import json
import os
import io
import logging
import boto3
from dotenv import load_dotenv
# carrega as variaveis de ambiente
load_dotenv()
# configura um logger
logging.basicConfig(level=logging.INFO)
# obtem um cliente para integrar com servico SQS, e a URL da fila
sqs = boto3.client('sqs', endpoint_url=os.getenv('AWS_ENDPOINT_URL'))
queue = sqs.get_queue_url(QueueName=os.getenv('S3_QUEUE'))
# obtem um cliente para integrar com servico S3
s3 = boto3.client('s3', endpoint_url=os.getenv('AWS_ENDPOINT_URL'))
# puxa/recebe mensagens da fila
response = sqs.receive_message(
QueueUrl=queue['QueueUrl'], MaxNumberOfMessages=2)
if 'Messages' in response:
for message in response['Messages']:
logging.info(f'Mensagem [{message["MessageId"]}] recebida')
# converte o corpo da mensagem JSON em um dicionario python
body = json.loads(message['Body'])
# converte a mensagem JSON em um dicionario python
msg = json.loads(body["Message"])
# sobe o arquivo para o bucket
with io.BytesIO() as f:
f.write(str(msg['value']).encode('utf-8'))
f.seek(0)
s3.upload_fileobj(f, Bucket=os.getenv(
'S3_BUCKET'), Key=f'orders-{msg["id"]}')
# remove a mensagem da fila
sqs.delete_message(
QueueUrl=queue['QueueUrl'], ReceiptHandle=message['ReceiptHandle'])
print(
f'Mensagem [{msg["id"]}] salva no bucket [{os.getenv("S3_BUCKET")}]')
else:
logging.info('nenhuma mensagem recebida')
| 34.863636
| 81
| 0.672099
|
54d3d0a218dec3588844333cd47e1f92489d8df9
| 1,411
|
py
|
Python
|
tensorflow/contrib/gan/python/features/python/spectral_normalization.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 36
|
2016-12-17T15:25:25.000Z
|
2022-01-29T21:50:53.000Z
|
tensorflow/contrib/gan/python/features/python/spectral_normalization.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 59
|
2019-06-17T09:37:49.000Z
|
2022-01-19T01:21:34.000Z
|
tensorflow/contrib/gan/python/features/python/spectral_normalization.py
|
PaulWang1905/tensorflow
|
ebf12d22b4801fb8dab5034cc94562bf7cc33fa0
|
[
"Apache-2.0"
] | 36
|
2017-07-27T21:12:40.000Z
|
2022-02-03T16:45:56.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-like layers and utilities that implement Spectral Normalization.
Based on "Spectral Normalization for Generative Adversarial Networks" by Miyato,
et al in ICLR 2018. https://openreview.net/pdf?id=B1QRgziT-
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.gan.python.features.python import spectral_normalization_impl
# pylint: disable=wildcard-import
from tensorflow.contrib.gan.python.features.python.spectral_normalization_impl import *
# pylint: enable=wildcard-import
from tensorflow.python.util.all_util import remove_undocumented
__all__ = spectral_normalization_impl.__all__
remove_undocumented(__name__, __all__)
| 42.757576
| 87
| 0.76258
|
4dcac23de49fbe69f2ce3facaa0f9530cf699a0d
| 3,148
|
py
|
Python
|
app/auth/routes.py
|
hi-guy/StudyFlask
|
c0f8e0df3ec76bfdd9ccaabfe86c54529f977d32
|
[
"MIT"
] | 1
|
2018-07-18T14:00:37.000Z
|
2018-07-18T14:00:37.000Z
|
app/auth/routes.py
|
kevinmel2000/flask-demo
|
bd27185c85823d5e88e1ce51edc237706f2145f3
|
[
"MIT"
] | null | null | null |
app/auth/routes.py
|
kevinmel2000/flask-demo
|
bd27185c85823d5e88e1ce51edc237706f2145f3
|
[
"MIT"
] | 1
|
2020-07-07T04:14:04.000Z
|
2020-07-07T04:14:04.000Z
|
from flask import render_template, redirect, url_for, flash, request
from werkzeug.urls import url_parse
from flask_login import login_user, logout_user, current_user
from flask_babel import _
from app import db
from app.auth import bp
from app.auth.forms import LoginForm, RegistrationForm, \
ResetPasswordRequestForm, ResetPasswordForm
from app.models import User
from app.auth.email import send_password_reset_email
@bp.route('/login', methods=['GET', 'POST'])
def login():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = LoginForm()
if form.validate_on_submit():
user = User.query.filter_by(username=form.username.data).first()
if user is None or not user.check_password(form.password.data):
flash(_('Invalid username or password'))
return redirect(url_for('auth.login'))
login_user(user, remember=form.remember_me.data)
next_page = request.args.get('next')
if not next_page or url_parse(next_page).netloc != '':
next_page = url_for('main.index')
return redirect(next_page)
return render_template('auth/login.html', title=_('Sign In'), form=form)
@bp.route('/logout')
def logout():
logout_user()
return redirect(url_for('main.index'))
@bp.route('/register', methods=['GET', 'POST'])
def register():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = RegistrationForm()
if form.validate_on_submit():
user = User(username=form.username.data, email=form.email.data)
user.set_password(form.password.data)
db.session.add(user)
db.session.commit()
flash(_('Congratulations, you are now a registered user!'))
return redirect(url_for('auth.login'))
return render_template('auth/register.html', title=_('Register'),
form=form)
@bp.route('/reset_password_request', methods=['GET', 'POST'])
def reset_password_request():
if current_user.is_authenticated:
return redirect(url_for('main.index'))
form = ResetPasswordRequestForm()
if form.validate_on_submit():
user = User.query.filter_by(email=form.email.data).first()
if user:
send_password_reset_email(user)
flash(
_('Check your email for the instructions to reset your password'))
return redirect(url_for('auth.login'))
return render_template('auth/reset_password_request.html',
title=_('Reset Password'), form=form)
@bp.route('/reset_password/<token>', methods=['GET', 'POST'])
def reset_password(token):
if current_user.is_authenticated:
return redirect(url_for('main.index'))
user = User.verify_reset_password_token(token)
if not user:
return redirect(url_for('main.index'))
form = ResetPasswordForm()
if form.validate_on_submit():
user.set_password(form.password.data)
db.session.commit()
flash(_('Your password has been reset.'))
return redirect(url_for('auth.login'))
return render_template('auth/reset_password.html', form=form)
| 38.390244
| 78
| 0.68075
|
ac187e1df01d0bd2a4be348adfa83646d1949ba4
| 6,490
|
py
|
Python
|
setup.py
|
asford/dvc
|
4ed55d00511ea3d9115b76c463e1a466408b11ef
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
asford/dvc
|
4ed55d00511ea3d9115b76c463e1a466408b11ef
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
asford/dvc
|
4ed55d00511ea3d9115b76c463e1a466408b11ef
|
[
"Apache-2.0"
] | null | null | null |
import importlib.util
import os
from setuptools import find_packages, setup
from setuptools.command.build_py import build_py as _build_py
# Prevents pkg_resources import in entry point script,
# see https://github.com/ninjaaron/fast-entry_points.
# This saves about 200 ms on startup time for non-wheel installs.
try:
import fastentrypoints # noqa: F401, pylint: disable=unused-import
except ImportError:
pass # not able to import when installing through pre-commit
# Read package meta-data from version.py
# see https://packaging.python.org/guides/single-sourcing-package-version/
pkg_dir = os.path.dirname(os.path.abspath(__file__))
version_path = os.path.join(pkg_dir, "dvc", "version.py")
spec = importlib.util.spec_from_file_location("dvc.version", version_path)
dvc_version = importlib.util.module_from_spec(spec)
spec.loader.exec_module(dvc_version)
version = dvc_version.__version__ # noqa: F821
# To achieve consistency between the build version and the one provided
# by your package during runtime, you need to **pin** the build version.
#
# This custom class will replace the version.py module with a **static**
# `__version__` that your package can read at runtime, assuring consistency.
#
# References:
# - https://docs.python.org/3.7/distutils/extending.html
# - https://github.com/python/mypy
class build_py(_build_py):
def pin_version(self):
path = os.path.join(self.build_lib, "dvc")
self.mkpath(path)
with open(os.path.join(path, "version.py"), "w") as fobj:
fobj.write("# AUTOGENERATED at build time by setup.py\n")
fobj.write('__version__ = "{}"\n'.format(version))
def run(self):
self.execute(self.pin_version, ())
_build_py.run(self)
install_requires = [
"ply>=3.9", # See https://github.com/pyinstaller/pyinstaller/issues/1945
"colorama>=0.3.9",
"configobj>=5.0.6",
"gitpython>3",
"dulwich>=0.20.20",
"pygit2>=1.5.0",
"setuptools>=34.0.0",
"nanotime>=0.5.2",
"pyasn1>=0.4.1",
"voluptuous>=0.11.7",
"jsonpath-ng>=1.5.1",
"requests>=2.22.0",
"grandalf==0.6",
"distro>=1.3.0",
"appdirs>=1.4.3",
"ruamel.yaml>=0.16.1",
"toml>=0.10.1",
"funcy>=1.14",
"pathspec>=0.6.0",
"shortuuid>=0.5.0",
"tqdm>=4.45.0,<5",
"packaging>=19.0",
"zc.lockfile>=1.2.1",
"flufl.lock>=3.2,<4",
"win-unicode-console>=0.5; sys_platform == 'win32'",
"pywin32>=225; sys_platform == 'win32'",
"networkx>=2.1",
"psutil>=5.8.0",
"pydot>=1.2.4",
"speedcopy>=2.0.1; python_version < '3.8' and sys_platform == 'win32'",
"dataclasses==0.7; python_version < '3.7'",
"flatten_dict>=0.3.0,<1",
"tabulate>=0.8.7",
"pygtrie==2.3.2",
"dpath>=2.0.1,<3",
"shtab>=1.3.4,<2",
"rich>=9.0.0",
"dictdiffer>=0.8.1",
"python-benedict>=0.21.1",
"pyparsing==2.4.7",
"typing_extensions>=3.7.4",
"fsspec>=0.8.5",
"diskcache>=5.2.1",
]
# Extra dependencies for remote integrations
gs = ["gcsfs>=0.7.2"]
gdrive = ["pydrive2>=1.8.1", "six >= 1.13.0"]
s3 = ["boto3>=1.9.201"]
azure = ["adlfs>=0.6.3", "azure-identity>=1.4.0", "knack"]
# https://github.com/Legrandin/pycryptodome/issues/465
oss = ["oss2==2.6.1", "pycryptodome<3.9.9"]
ssh = ["paramiko[invoke]>=2.7.0"]
# Remove the env marker if/when pyarrow is available for Python3.9
hdfs = ["pyarrow>=2.0.0"]
webhdfs = ["hdfs==2.5.8"]
webdav = ["webdavclient3>=3.14.5"]
# gssapi should not be included in all_remotes, because it doesn't have wheels
# for linux and mac, so it will fail to compile if user doesn't have all the
# requirements, including kerberos itself. Once all the wheels are available,
# we can start shipping it by default.
ssh_gssapi = ["paramiko[invoke,gssapi]>=2.7.0"]
all_remotes = gs + s3 + azure + ssh + oss + gdrive + hdfs + webhdfs + webdav
# Extra dependecies to run tests
tests_requirements = [
"wheel>=0.31.1",
# Test requirements:
"pytest>=6.0.1,<6.2.2",
"pytest-docker>=0.7.2",
"pytest-timeout>=1.3.3",
"pytest-cov>=2.6.1",
"pytest-xdist>=1.26.1",
"pytest-mock==1.11.2",
"pytest-lazy-fixture",
"pytest-tap",
"flaky>=3.5.3",
"mock>=3.0.0",
"xmltodict>=0.11.0",
"google-compute-engine==2.8.13",
"Pygments", # required by collective.checkdocs,
"collective.checkdocs",
"psutil",
"pydocstyle<4.0",
"jaraco.windows==3.9.2",
"mock-ssh-server>=0.8.2",
"moto==1.3.16.dev122",
# moto's indirect dependency that is causing problems with flufl.lock's
# dependency (atpublic). See https://github.com/iterative/dvc/pull/4853
"aws-sam-translator<1.29.0",
# for moto's indirect dependency.
# See https://github.com/iterative/dvc/pull/4879
"urllib3<1.26.0",
"rangehttpserver==1.2.0",
"beautifulsoup4==4.4.0",
"pylint==2.7.2",
"pylint-pytest==0.3.0",
"pylint-plugin-utils==0.6",
"wget",
"filelock",
"mypy",
"wsgidav",
"crc32c",
"google-cloud-storage==1.19.0",
# pypi doesn't allow for direct dependencies
# "gdrivefs @ git+https://github.com/intake/gdrivefs.git",
]
setup(
name="dvc",
version=version,
description="Git for data scientists - manage your code and data together",
long_description=open("README.rst", "r", encoding="UTF-8").read(),
author="Dmitry Petrov",
author_email="dmitry@dvc.org",
download_url="https://github.com/iterative/dvc",
license="Apache License 2.0",
install_requires=install_requires,
extras_require={
"all": all_remotes,
"gs": gs,
"gdrive": gdrive,
"s3": s3,
"azure": azure,
"oss": oss,
"ssh": ssh,
"ssh_gssapi": ssh_gssapi,
"hdfs": hdfs,
"webhdfs": webhdfs,
"webdav": webdav,
"tests": tests_requirements,
},
keywords="data-science data-version-control machine-learning git"
" developer-tools reproducibility collaboration ai",
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
packages=find_packages(exclude=["tests"]),
include_package_data=True,
url="http://dvc.org",
entry_points={"console_scripts": ["dvc = dvc.main:main"]},
cmdclass={"build_py": build_py},
zip_safe=False,
)
| 32.288557
| 79
| 0.632974
|
8edddd39f476967870a3fefe406abb65f77102c5
| 1,355
|
py
|
Python
|
Gathered CTF writeups/2016-09-09-asis-final/races/RACES.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | 1
|
2022-03-27T06:00:41.000Z
|
2022-03-27T06:00:41.000Z
|
Gathered CTF writeups/2016-09-09-asis-final/races/RACES.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | null | null | null |
Gathered CTF writeups/2016-09-09-asis-final/races/RACES.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | 1
|
2022-03-27T06:01:42.000Z
|
2022-03-27T06:01:42.000Z
|
from Crypto.Util.number import *
from gmpy import *
def gen_prime(nbit):
while True:
prime = getPrime(nbit)
if prime % 3 == 2:
return prime
def add(a, b, n):
if a == 0:
return b
if b == 0:
return a
l = ((b[1] - a[1]) * invert(b[0] - a[0], n)) % n
x = (l*l - a[0] - b[0]) % n
y = (l*(a[0] - x) - a[1]) % n
return (x, y)
def double(a, A, n):
if a == 0:
return a
l = ((3*a[0]*a[0] + A) * invert(2*a[1], n)) % n
x = (l*l - 2*a[0]) % n
y = (l*(a[0] - x) - a[1]) % n
return (x, y)
def multiply(point, exponent, A, n):
r0 = 0
r1 = point
for i in bin(exponent)[2:]:
if i == '0':
r1 = add(r0, r1, n)
r0 = double(r0, A, n)
else:
r0 = add(r0, r1, n)
r1 = double(r1, A, n)
return r0
def gen_keypair(e, nbit):
p = gen_prime(nbit)
q = gen_prime(nbit)
n = p*q
lcm = (p+1)*(q+1)/GCD(p+1, q+1)
d = invert(e, lcm)
pubkey = (n, e)
privkey = (n, d)
return pubkey, privkey
def encrypt(msg, pubkey):
n, e = pubkey
if msg < n:
while True:
r = getRandomRange(1, n)
m1, m2 = r - msg, r
if m1 > 0:
break
c1, c2 = multiply((m1, m2), e, 0, n)
return (int(c1), int(c2))
else:
return 'Error!!!'
| 21.854839
| 52
| 0.436162
|
024704be56e8f589217059dcd2d95383513ccbcf
| 209
|
py
|
Python
|
problems/remove-duplicates-from-string/remove-duplicates-from-string.py
|
Yanpas/the-coding-interview
|
72ba881aabc05f7d2a7cb32a4bbb2b1d7d4c5436
|
[
"MIT"
] | 1
|
2017-07-27T09:01:13.000Z
|
2017-07-27T09:01:13.000Z
|
problems/remove-duplicates-from-string/remove-duplicates-from-string.py
|
Yanpas/the-coding-interview
|
72ba881aabc05f7d2a7cb32a4bbb2b1d7d4c5436
|
[
"MIT"
] | null | null | null |
problems/remove-duplicates-from-string/remove-duplicates-from-string.py
|
Yanpas/the-coding-interview
|
72ba881aabc05f7d2a7cb32a4bbb2b1d7d4c5436
|
[
"MIT"
] | 2
|
2018-07-03T18:12:18.000Z
|
2021-03-19T20:27:54.000Z
|
from string import lower
def remove_duplicates(string):
seen = []
for ch in lower(string):
if not ch in seen:
seen.append(ch)
return "".join(seen)
print remove_duplicates("tree traversal")
| 19
| 41
| 0.684211
|
313a2d5477918ec60c08ea9b39420562a2cf43c2
| 32,718
|
py
|
Python
|
workspace/catkin_ws/src/rosserial/rosserial_python/src/rosserial_python/SerialClient.py
|
attaoveisi/AttBot2_Localization
|
6ce84e26cc55fc391fdbcee168aa04102d59d375
|
[
"MIT"
] | 1
|
2021-01-10T10:51:50.000Z
|
2021-01-10T10:51:50.000Z
|
workspace/catkin_ws/src/rosserial/rosserial_python/src/rosserial_python/SerialClient.py
|
attaoveisi/AttBot2_Localization
|
6ce84e26cc55fc391fdbcee168aa04102d59d375
|
[
"MIT"
] | null | null | null |
workspace/catkin_ws/src/rosserial/rosserial_python/src/rosserial_python/SerialClient.py
|
attaoveisi/AttBot2_Localization
|
6ce84e26cc55fc391fdbcee168aa04102d59d375
|
[
"MIT"
] | null | null | null |
#####################################################################
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
__author__ = "mferguson@willowgarage.com (Michael Ferguson)"
import array
import errno
import imp
import io
import multiprocessing
import Queue as queue
import socket
import struct
import sys
import threading
import time
from serial import Serial, SerialException, SerialTimeoutException
import roslib
import rospy
from std_msgs.msg import Time
from rosserial_msgs.msg import TopicInfo, Log
from rosserial_msgs.srv import RequestParamRequest, RequestParamResponse
import diagnostic_msgs.msg
ERROR_MISMATCHED_PROTOCOL = "Mismatched protocol version in packet: lost sync or rosserial_python is from different ros release than the rosserial client"
ERROR_NO_SYNC = "no sync with device"
ERROR_PACKET_FAILED = "Packet Failed : Failed to read msg data"
def load_pkg_module(package, directory):
#check if its in the python path
path = sys.path
try:
imp.find_module(package)
except ImportError:
roslib.load_manifest(package)
try:
m = __import__( package + '.' + directory )
except ImportError:
rospy.logerr( "Cannot import package : %s"% package )
rospy.logerr( "sys.path was " + str(path) )
return None
return m
def load_message(package, message):
m = load_pkg_module(package, 'msg')
m2 = getattr(m, 'msg')
return getattr(m2, message)
def load_service(package,service):
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
srv = getattr(s, service)
mreq = getattr(s, service+"Request")
mres = getattr(s, service+"Response")
return srv,mreq,mres
class Publisher:
"""
Publisher forwards messages from the serial device to ROS.
"""
def __init__(self, topic_info):
""" Create a new publisher. """
self.topic = topic_info.topic_name
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.publisher = rospy.Publisher(self.topic, self.message, queue_size=10)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def handlePacket(self, data):
""" Forward message to ROS network. """
m = self.message()
m.deserialize(data)
self.publisher.publish(m)
class Subscriber:
"""
Subscriber forwards messages from ROS to the serial device.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.id = topic_info.topic_id
self.parent = parent
# find message type
package, message = topic_info.message_type.split('/')
self.message = load_message(package, message)
if self.message._md5sum == topic_info.md5sum:
self.subscriber = rospy.Subscriber(self.topic, self.message, self.callback)
else:
raise Exception('Checksum does not match: ' + self.message._md5sum + ',' + topic_info.md5sum)
def callback(self, msg):
""" Forward message to serial device. """
data_buffer = io.BytesIO()
msg.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
def unregister(self):
rospy.loginfo("Removing subscriber: %s", self.topic)
self.subscriber.unregister()
class ServiceServer:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
self.service = rospy.Service(self.topic, srv, self.callback)
# response message
self.data = None
def unregister(self):
rospy.loginfo("Removing service: %s", self.topic)
self.service.shutdown()
def callback(self, req):
""" Forward request to serial device. """
data_buffer = io.BytesIO()
req.serialize(data_buffer)
self.response = None
self.parent.send(self.id, data_buffer.getvalue())
while self.response is None:
pass
return self.response
def handlePacket(self, data):
""" Forward response to ROS network. """
r = self.mres()
r.deserialize(data)
self.response = r
class ServiceClient:
"""
ServiceServer responds to requests from ROS.
"""
def __init__(self, topic_info, parent):
self.topic = topic_info.topic_name
self.parent = parent
# find message type
package, service = topic_info.message_type.split('/')
s = load_pkg_module(package, 'srv')
s = getattr(s, 'srv')
self.mreq = getattr(s, service+"Request")
self.mres = getattr(s, service+"Response")
srv = getattr(s, service)
rospy.loginfo("Starting service client, waiting for service '" + self.topic + "'")
rospy.wait_for_service(self.topic)
self.proxy = rospy.ServiceProxy(self.topic, srv)
def handlePacket(self, data):
""" Forward request to ROS network. """
req = self.mreq()
req.deserialize(data)
# call service proxy
resp = self.proxy(req)
# serialize and publish
data_buffer = io.BytesIO()
resp.serialize(data_buffer)
self.parent.send(self.id, data_buffer.getvalue())
class RosSerialServer:
"""
RosSerialServer waits for a socket connection then passes itself, forked as a
new process, to SerialClient which uses it as a serial port. It continues to listen
for additional connections. Each forked process is a new ros node, and proxies ros
operations (e.g. publish/subscribe) from its connection to the rest of ros.
"""
def __init__(self, tcp_portnum, fork_server=False):
rospy.loginfo("Fork_server is: %s" % fork_server)
self.tcp_portnum = tcp_portnum
self.fork_server = fork_server
def listen(self):
self.serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.serversocket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
#bind the socket to a public host, and a well-known port
self.serversocket.bind(("", self.tcp_portnum)) #become a server socket
self.serversocket.listen(1)
while True:
#accept connections
rospy.loginfo("Waiting for socket connection")
clientsocket, address = self.serversocket.accept()
#now do something with the clientsocket
rospy.loginfo("Established a socket connection from %s on port %s" % address)
self.socket = clientsocket
self.isConnected = True
if self.fork_server: # if configured to launch server in a separate process
rospy.loginfo("Forking a socket server process")
process = multiprocessing.Process(target=self.startSocketServer, args=address)
process.daemon = True
process.start()
rospy.loginfo("launched startSocketServer")
else:
rospy.loginfo("calling startSerialClient")
self.startSerialClient()
rospy.loginfo("startSerialClient() exited")
def startSerialClient(self):
client = SerialClient(self)
try:
client.run()
except KeyboardInterrupt:
pass
except RuntimeError:
rospy.loginfo("RuntimeError exception caught")
self.isConnected = False
except socket.error:
rospy.loginfo("socket.error exception caught")
self.isConnected = False
finally:
rospy.loginfo("Client has exited, closing socket.")
self.socket.close()
for sub in client.subscribers.values():
sub.unregister()
for srv in client.services.values():
srv.unregister()
def startSocketServer(self, port, address):
rospy.loginfo("starting ROS Serial Python Node serial_node-%r" % address)
rospy.init_node("serial_node_%r" % address)
self.startSerialClient()
def flushInput(self):
pass
def write(self, data):
if not self.isConnected:
return
length = len(data)
totalsent = 0
while totalsent < length:
try:
totalsent += self.socket.send(data[totalsent:])
except BrokenPipeError:
raise RuntimeError("RosSerialServer.write() socket connection broken")
def read(self, rqsted_length):
self.msg = b''
if not self.isConnected:
return self.msg
while len(self.msg) < rqsted_length:
chunk = self.socket.recv(rqsted_length - len(self.msg))
if chunk == b'':
raise RuntimeError("RosSerialServer.read() socket connection broken")
self.msg = self.msg + chunk
return self.msg
def inWaiting(self):
try: # the caller checks just for <1, so we'll peek at just one byte
chunk = self.socket.recv(1, socket.MSG_DONTWAIT|socket.MSG_PEEK)
if chunk == b'':
raise RuntimeError("RosSerialServer.inWaiting() socket connection broken")
return len(chunk)
except BlockingIOError:
return 0
class SerialClient(object):
"""
ServiceServer responds to requests from the serial device.
"""
header = b'\xff'
# hydro introduces protocol ver2 which must match node_handle.h
# The protocol version is sent as the 2nd sync byte emitted by each end
protocol_ver1 = b'\xff'
protocol_ver2 = b'\xfe'
protocol_ver = protocol_ver2
def __init__(self, port=None, baud=57600, timeout=5.0, fix_pyserial_for_test=False):
""" Initialize node, connect to bus, attempt to negotiate topics. """
self.read_lock = threading.RLock()
self.write_lock = threading.RLock()
self.write_queue = queue.Queue()
self.write_thread = None
self.lastsync = rospy.Time(0)
self.lastsync_lost = rospy.Time(0)
self.lastsync_success = rospy.Time(0)
self.last_read = rospy.Time(0)
self.last_write = rospy.Time(0)
self.timeout = timeout
self.synced = False
self.fix_pyserial_for_test = fix_pyserial_for_test
self.publishers = dict() # id:Publishers
self.subscribers = dict() # topic:Subscriber
self.services = dict() # topic:Service
self.pub_diagnostics = rospy.Publisher('/diagnostics', diagnostic_msgs.msg.DiagnosticArray, queue_size=10)
if port is None:
# no port specified, listen for any new port?
pass
elif hasattr(port, 'read'):
#assume its a filelike object
self.port=port
else:
# open a specific port
while not rospy.is_shutdown():
try:
if self.fix_pyserial_for_test:
# see https://github.com/pyserial/pyserial/issues/59
self.port = Serial(port, baud, timeout=self.timeout, write_timeout=10, rtscts=True, dsrdtr=True)
else:
self.port = Serial(port, baud, timeout=self.timeout, write_timeout=10)
break
except SerialException as e:
rospy.logerr("Error opening serial: %s", e)
time.sleep(3)
if rospy.is_shutdown():
return
time.sleep(0.1) # Wait for ready (patch for Uno)
self.buffer_out = -1
self.buffer_in = -1
self.callbacks = dict()
# endpoints for creating new pubs/subs
self.callbacks[TopicInfo.ID_PUBLISHER] = self.setupPublisher
self.callbacks[TopicInfo.ID_SUBSCRIBER] = self.setupSubscriber
# service client/servers have 2 creation endpoints (a publisher and a subscriber)
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_PUBLISHER] = self.setupServiceServerPublisher
self.callbacks[TopicInfo.ID_SERVICE_SERVER+TopicInfo.ID_SUBSCRIBER] = self.setupServiceServerSubscriber
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_PUBLISHER] = self.setupServiceClientPublisher
self.callbacks[TopicInfo.ID_SERVICE_CLIENT+TopicInfo.ID_SUBSCRIBER] = self.setupServiceClientSubscriber
# custom endpoints
self.callbacks[TopicInfo.ID_PARAMETER_REQUEST] = self.handleParameterRequest
self.callbacks[TopicInfo.ID_LOG] = self.handleLoggingRequest
self.callbacks[TopicInfo.ID_TIME] = self.handleTimeRequest
rospy.sleep(2.0)
self.requestTopics()
self.lastsync = rospy.Time.now()
def requestTopics(self):
""" Determine topics to subscribe/publish. """
rospy.loginfo('Requesting topics...')
# TODO remove if possible
if not self.fix_pyserial_for_test:
with self.read_lock:
self.port.flushInput()
# request topic sync
self.write_queue.put(self.header + self.protocol_ver + b"\x00\x00\xff\x00\x00\xff")
def txStopRequest(self):
""" Send stop tx request to client before the node exits. """
if not self.fix_pyserial_for_test:
with self.read_lock:
self.port.flushInput()
self.write_queue.put(self.header + self.protocol_ver + b"\x00\x00\xff\x0b\x00\xf4")
rospy.loginfo("Sending tx stop request")
def tryRead(self, length):
try:
read_start = time.time()
bytes_remaining = length
result = bytearray()
while bytes_remaining != 0 and time.time() - read_start < self.timeout:
with self.read_lock:
received = self.port.read(bytes_remaining)
if len(received) != 0:
self.last_read = rospy.Time.now()
result.extend(received)
bytes_remaining -= len(received)
if bytes_remaining != 0:
raise IOError("Returned short (expected %d bytes, received %d instead)." % (length, length - bytes_remaining))
return bytes(result)
except Exception as e:
raise IOError("Serial Port read failure: %s" % e)
def run(self):
""" Forward recieved messages to appropriate publisher. """
# Launch write thread.
if self.write_thread is None:
self.write_thread = threading.Thread(target=self.processWriteQueue)
self.write_thread.daemon = True
self.write_thread.start()
# Handle reading.
data = ''
read_step = None
while self.write_thread.is_alive() and not rospy.is_shutdown():
if (rospy.Time.now() - self.lastsync).to_sec() > (self.timeout * 3):
if self.synced:
rospy.logerr("Lost sync with device, restarting...")
else:
rospy.logerr("Unable to sync with device; possible link problem or link software version mismatch such as hydro rosserial_python with groovy Arduino")
self.lastsync_lost = rospy.Time.now()
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, ERROR_NO_SYNC)
self.requestTopics()
self.lastsync = rospy.Time.now()
# This try-block is here because we make multiple calls to read(). Any one of them can throw
# an IOError if there's a serial problem or timeout. In that scenario, a single handler at the
# bottom attempts to reconfigure the topics.
try:
with self.read_lock:
if self.port.inWaiting() < 1:
time.sleep(0.001)
continue
# Find sync flag.
flag = [0, 0]
read_step = 'syncflag'
flag[0] = self.tryRead(1)
if (flag[0] != self.header):
continue
# Find protocol version.
read_step = 'protocol'
flag[1] = self.tryRead(1)
if flag[1] != self.protocol_ver:
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, ERROR_MISMATCHED_PROTOCOL)
rospy.logerr("Mismatched protocol version in packet (%s): lost sync or rosserial_python is from different ros release than the rosserial client" % repr(flag[1]))
protocol_ver_msgs = {
self.protocol_ver1: 'Rev 0 (rosserial 0.4 and earlier)',
self.protocol_ver2: 'Rev 1 (rosserial 0.5+)',
b'\xfd': 'Some future rosserial version'
}
if flag[1] in protocol_ver_msgs:
found_ver_msg = 'Protocol version of client is ' + protocol_ver_msgs[flag[1]]
else:
found_ver_msg = "Protocol version of client is unrecognized"
rospy.loginfo("%s, expected %s" % (found_ver_msg, protocol_ver_msgs[self.protocol_ver]))
continue
# Read message length, checksum (3 bytes)
read_step = 'message length'
msg_len_bytes = self.tryRead(3)
msg_length, _ = struct.unpack("<hB", msg_len_bytes)
# Validate message length checksum.
if sum(array.array("B", msg_len_bytes)) % 256 != 255:
rospy.loginfo("Wrong checksum for msg length, length %d, dropping message." % (msg_length))
continue
# Read topic id (2 bytes)
read_step = 'topic id'
topic_id_header = self.tryRead(2)
topic_id, = struct.unpack("<H", topic_id_header)
# Read serialized message data.
read_step = 'data'
try:
msg = self.tryRead(msg_length)
except IOError:
self.sendDiagnostics(diagnostic_msgs.msg.DiagnosticStatus.ERROR, ERROR_PACKET_FAILED)
rospy.loginfo("Packet Failed : Failed to read msg data")
rospy.loginfo("expected msg length is %d", msg_length)
raise
# Reada checksum for topic id and msg
read_step = 'data checksum'
chk = self.tryRead(1)
checksum = sum(array.array('B', topic_id_header + msg + chk))
# Validate checksum.
if checksum % 256 == 255:
self.synced = True
self.lastsync_success = rospy.Time.now()
try:
self.callbacks[topic_id](msg)
except KeyError:
rospy.logerr("Tried to publish before configured, topic id %d" % topic_id)
self.requestTopics()
time.sleep(0.001)
else:
rospy.loginfo("wrong checksum for topic id and msg")
except IOError as exc:
rospy.logwarn('Last read step: %s' % read_step)
rospy.logwarn('Run loop error: %s' % exc)
# One of the read calls had an issue. Just to be safe, request that the client
# reinitialize their topics.
with self.read_lock:
self.port.flushInput()
with self.write_lock:
self.port.flushOutput()
self.requestTopics()
self.txStopRequest()
self.write_thread.join()
def setPublishSize(self, size):
if self.buffer_out < 0:
self.buffer_out = size
rospy.loginfo("Note: publish buffer size is %d bytes" % self.buffer_out)
def setSubscribeSize(self, size):
if self.buffer_in < 0:
self.buffer_in = size
rospy.loginfo("Note: subscribe buffer size is %d bytes" % self.buffer_in)
def setupPublisher(self, data):
""" Register a new publisher. """
try:
msg = TopicInfo()
msg.deserialize(data)
pub = Publisher(msg)
self.publishers[msg.topic_id] = pub
self.callbacks[msg.topic_id] = pub.handlePacket
self.setPublishSize(msg.buffer_size)
rospy.loginfo("Setup publisher on %s [%s]" % (msg.topic_name, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of publisher failed: %s", e)
def setupSubscriber(self, data):
""" Register a new subscriber. """
try:
msg = TopicInfo()
msg.deserialize(data)
if not msg.topic_name in list(self.subscribers.keys()):
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Setup subscriber on %s [%s]" % (msg.topic_name, msg.message_type) )
elif msg.message_type != self.subscribers[msg.topic_name].message._type:
old_message_type = self.subscribers[msg.topic_name].message._type
self.subscribers[msg.topic_name].unregister()
sub = Subscriber(msg, self)
self.subscribers[msg.topic_name] = sub
self.setSubscribeSize(msg.buffer_size)
rospy.loginfo("Change the message type of subscriber on %s from [%s] to [%s]" % (msg.topic_name, old_message_type, msg.message_type) )
except Exception as e:
rospy.logerr("Creation of subscriber failed: %s", e)
def setupServiceServerPublisher(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceServerSubscriber(self, data):
""" Register a new service server. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceServer(msg, self)
rospy.loginfo("Setup service server on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service server failed: %s", e)
def setupServiceClientPublisher(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setPublishSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mreq._md5sum == msg.md5sum:
self.callbacks[msg.topic_id] = srv.handlePacket
else:
raise Exception('Checksum does not match: ' + srv.mreq._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def setupServiceClientSubscriber(self, data):
""" Register a new service client. """
try:
msg = TopicInfo()
msg.deserialize(data)
self.setSubscribeSize(msg.buffer_size)
try:
srv = self.services[msg.topic_name]
except KeyError:
srv = ServiceClient(msg, self)
rospy.loginfo("Setup service client on %s [%s]" % (msg.topic_name, msg.message_type) )
self.services[msg.topic_name] = srv
if srv.mres._md5sum == msg.md5sum:
srv.id = msg.topic_id
else:
raise Exception('Checksum does not match: ' + srv.mres._md5sum + ',' + msg.md5sum)
except Exception as e:
rospy.logerr("Creation of service client failed: %s", e)
def handleTimeRequest(self, data):
""" Respond to device with system time. """
t = Time()
t.data = rospy.Time.now()
data_buffer = io.BytesIO()
t.serialize(data_buffer)
self.send( TopicInfo.ID_TIME, data_buffer.getvalue() )
self.lastsync = rospy.Time.now()
def handleParameterRequest(self, data):
""" Send parameters to device. Supports only simple datatypes and arrays of such. """
req = RequestParamRequest()
req.deserialize(data)
resp = RequestParamResponse()
try:
param = rospy.get_param(req.name)
except KeyError:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if param is None:
rospy.logerr("Parameter %s does not exist"%req.name)
return
if isinstance(param, dict):
rospy.logerr("Cannot send param %s because it is a dictionary"%req.name)
return
if not isinstance(param, list):
param = [param]
#check to make sure that all parameters in list are same type
t = type(param[0])
for p in param:
if t!= type(p):
rospy.logerr('All Paramers in the list %s must be of the same type'%req.name)
return
if t == int or t == bool:
resp.ints = param
if t == float:
resp.floats =param
if t == str:
resp.strings = param
data_buffer = io.BytesIO()
resp.serialize(data_buffer)
self.send(TopicInfo.ID_PARAMETER_REQUEST, data_buffer.getvalue())
def handleLoggingRequest(self, data):
""" Forward logging information from serial device into ROS. """
msg = Log()
msg.deserialize(data)
if msg.level == Log.ROSDEBUG:
rospy.logdebug(msg.msg)
elif msg.level == Log.INFO:
rospy.loginfo(msg.msg)
elif msg.level == Log.WARN:
rospy.logwarn(msg.msg)
elif msg.level == Log.ERROR:
rospy.logerr(msg.msg)
elif msg.level == Log.FATAL:
rospy.logfatal(msg.msg)
def send(self, topic, msg):
"""
Queues data to be written to the serial port.
"""
self.write_queue.put((topic, msg))
def _write(self, data):
"""
Writes raw data over the serial port. Assumes the data is formatting as a packet. http://wiki.ros.org/rosserial/Overview/Protocol
"""
with self.write_lock:
self.port.write(data)
self.last_write = rospy.Time.now()
def _send(self, topic, msg_bytes):
"""
Send a message on a particular topic to the device.
"""
length = len(msg_bytes)
if self.buffer_in > 0 and length > self.buffer_in:
rospy.logerr("Message from ROS network dropped: message larger than buffer.\n%s" % msg)
return -1
else:
# frame : header (1b) + version (1b) + msg_len(2b) + msg_len_chk(1b) + topic_id(2b) + msg(nb) + msg_topic_id_chk(1b)
length_bytes = struct.pack('<h', length)
length_checksum = 255 - (sum(array.array('B', length_bytes)) % 256)
length_checksum_bytes = struct.pack('B', length_checksum)
topic_bytes = struct.pack('<h', topic)
msg_checksum = 255 - (sum(array.array('B', topic_bytes + msg_bytes)) % 256)
msg_checksum_bytes = struct.pack('B', msg_checksum)
self._write(self.header + self.protocol_ver + length_bytes + length_checksum_bytes + topic_bytes + msg_bytes + msg_checksum_bytes)
return length
def processWriteQueue(self):
"""
Main loop for the thread that processes outgoing data to write to the serial port.
"""
while not rospy.is_shutdown():
if self.write_queue.empty():
time.sleep(0.01)
else:
data = self.write_queue.get()
while True:
try:
if isinstance(data, tuple):
topic, msg = data
self._send(topic, msg)
elif isinstance(data, bytes):
self._write(data)
else:
rospy.logerr("Trying to write invalid data type: %s" % type(data))
break
except SerialTimeoutException as exc:
rospy.logerr('Write timeout: %s' % exc)
time.sleep(1)
except RuntimeError as exc:
rospy.logerr('Write thread exception: %s' % exc)
break
def sendDiagnostics(self, level, msg_text):
msg = diagnostic_msgs.msg.DiagnosticArray()
status = diagnostic_msgs.msg.DiagnosticStatus()
status.name = "rosserial_python"
msg.header.stamp = rospy.Time.now()
msg.status.append(status)
status.message = msg_text
status.level = level
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[0].key="last sync"
if self.lastsync.to_sec()>0:
status.values[0].value=time.ctime(self.lastsync.to_sec())
else:
status.values[0].value="never"
status.values.append(diagnostic_msgs.msg.KeyValue())
status.values[1].key="last sync lost"
status.values[1].value=time.ctime(self.lastsync_lost.to_sec())
self.pub_diagnostics.publish(msg)
| 40.194103
| 181
| 0.593863
|
f0e62cbddb63bcb326582a4761ad183bcdeb3224
| 9,018
|
py
|
Python
|
tests/test_phl_cpu.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
tests/test_phl_cpu.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
tests/test_phl_cpu.py
|
RobinCamarasa/MONAI
|
8207e1e2a3555ddc3fe938e058552651900dc951
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import numpy as np
import torch
from parameterized import parameterized
from monai.networks.layers.filtering import PHLFilter
from tests.utils import skip_if_no_cpp_extention
TEST_CASES = [
[
# Case Descirption
"2 batches, 1 dimensions, 1 channels, 1 features",
# Sigmas
[1, 0.2],
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 1]
],
# Batch 1
[
# Channel 0
[0, 0, 1, 0, 0]
],
],
# Features
[
# Batch 0
[
# Channel 0
[1, 0.2, 0.5, 0, 1],
],
# Batch 1
[
# Channel 0
[0.5, 0, 1, 1, 1]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0.468968, 0.364596, 0.4082, 0.332579, 0.468968]
],
# Batch 1
[
# Channel 0
[0.202473, 0.176527, 0.220995, 0.220995, 0.220995]
],
],
],
[
# Case Descirption
"1 batches, 1 dimensions, 3 channels, 1 features",
# Sigmas
[1],
# Input
[
# Batch 0
[
# Channel 0
[1, 0, 0, 0, 0],
# Channel 1
[0, 0, 0, 0, 1],
# Channel 2
[0, 0, 1, 0, 0],
],
],
# Features
[
# Batch 0
[
# Channel 0
[1, 0.2, 0.5, 0.2, 1],
],
],
# Expected
[
# Batch 0
[
# Channel 0
[0.229572, 0.182884, 0.202637, 0.182884, 0.229572],
# Channel 1
[0.229572, 0.182884, 0.202637, 0.182884, 0.229572],
# Channel 2
[0.201235, 0.208194, 0.205409, 0.208194, 0.201235],
],
],
],
[
# Case Descirption
"1 batches, 2 dimensions, 1 channels, 3 features",
# Sigmas
[5, 3, 3],
# Input
[
# Batch 0
[
# Channel 0
[[9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 6, 6, 6], [9, 9, 6, 6, 6]]
],
],
# Features
[
# Batch 0
[
# Channel 0
[[9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 0, 0, 0], [9, 9, 6, 6, 6], [9, 9, 6, 6, 6]],
# Channel 1
[[0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4], [0, 1, 2, 3, 4]],
# Channel 2
[[0, 0, 0, 0, 0], [1, 1, 1, 1, 1], [2, 2, 2, 2, 2], [3, 3, 3, 3, 3], [4, 4, 4, 4, 4]],
],
],
# Expected
[
# Batch 0
[
# Channel 0
[
[7.696051, 7.427121, 1.191990, 1.156004, 1.157489],
[7.670297, 7.371155, 1.340232, 1.287871, 1.304018],
[7.639579, 7.365163, 1.473319, 1.397826, 1.416861],
[7.613517, 7.359183, 5.846500, 5.638952, 5.350098],
[7.598255, 7.458446, 5.912375, 5.583625, 5.233126],
]
],
],
],
[
# Case Descirption
"1 batches, 3 dimensions, 1 channels, 1 features",
# Sigmas
[5, 3, 3],
# Input
[
# Batch 0
[
# Channel 0
[
# Frame 0
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 1
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 2
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 3
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 4
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
]
],
],
# Features
[
# Batch 0
[
# Channel 0
[
# Frame 0
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 1
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0], [9, 9, 9, 0, 0]],
# Frame 2
[[0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 3
[[0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
# Frame 4
[[0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 5, 5, 5], [0, 0, 0, 0, 0], [0, 0, 0, 0, 0]],
]
],
],
# Expected
[
# Batch 0
[
# Channel 0
[
# Frame 0
[
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
],
# Frame 1
[
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
[3.578490, 3.578490, 3.578490, 0.284234, 0.284234],
],
# Frame 2
[
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
],
# Frame 3
[
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
],
# Frame 4
[
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 1.359728, 1.359728, 1.359728],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
[0.284234, 0.284234, 0.284234, 0.284234, 0.284234],
],
]
],
],
],
]
@skip_if_no_cpp_extention
class PHLFilterTestCaseCpu(unittest.TestCase):
@parameterized.expand(TEST_CASES)
def test_cpu(self, test_case_description, sigmas, input, features, expected):
# Create input tensors
input_tensor = torch.from_numpy(np.array(input)).to(dtype=torch.float, device=torch.device("cpu"))
feature_tensor = torch.from_numpy(np.array(features)).to(dtype=torch.float, device=torch.device("cpu"))
# apply filter
output = PHLFilter.apply(input_tensor, feature_tensor, sigmas).cpu().numpy()
# Ensure result are as expected
np.testing.assert_allclose(output, expected, atol=1e-4)
if __name__ == "__main__":
unittest.main()
| 34.818533
| 111
| 0.378909
|
ac6f8626966b34fe600eeaaf372728c99d2e1e71
| 6,247
|
py
|
Python
|
exploreAgent.py
|
roboTurt/ROB537-FinalProject
|
32a15cb1454f16a8bfd13977670eacaf13bf3fbb
|
[
"MIT"
] | null | null | null |
exploreAgent.py
|
roboTurt/ROB537-FinalProject
|
32a15cb1454f16a8bfd13977670eacaf13bf3fbb
|
[
"MIT"
] | null | null | null |
exploreAgent.py
|
roboTurt/ROB537-FinalProject
|
32a15cb1454f16a8bfd13977670eacaf13bf3fbb
|
[
"MIT"
] | null | null | null |
import gym
import gym_Explore2D
import numpy as np
from collections import deque
from collections import namedtuple
import random
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = "cuda"
Experience = namedtuple('Experience',
('state', 'action', 'nextState', 'reward'))
## Hyper Parameters ##
BatchSize = 1000
Gamma = 0.6
# eps_start = 1
# eps_end = 0.01
# eps_decay = 0.001
TargetUpdate = 10
ReplayBufSize = 1000000
LearningRate = 0.001
NumOfEpisodes = 100000
class ReplayMemory():
def __init__(self, capacity):
self.memory = deque(maxlen = capacity)
def push(self, *args):
"""Saves a transition."""
self.memory.append(Experience(*args))
def sample(self, batchSize):
return random.sample(self.memory, batchSize)
def sampleAvailable(self, batchSize):
return len(self.memory) >= batchSize
class epsilonGreedy():
...
class QValues():
#device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
device = "cuda"
@staticmethod
def get_current(policy_net, states, actions):
max = torch.max(policy_net(states), dim=2)[0][:,0].to(device)
return max.to(device)
#print(max)
#return max
#print(policy_net(states).max(dim=2)[0][:,0])
#return policy_net(states).max(dim=2)[0][:,0]#.gather(dim=1, index=actions.unsqueeze(-1))
@staticmethod
def get_next(target_net, next_states):
final_state_locations = next_states.flatten(start_dim=1) \
.max(dim=1)[0].eq(9).type(torch.bool).to(device)
non_final_state_locations = (final_state_locations == False)
non_final_states = next_states[non_final_state_locations]
batch_size = next_states.shape[0]
values = torch.zeros(batch_size).float().to(QValues.device)
values[non_final_state_locations] = target_net(non_final_states).max(dim=2)[0][:,0]
return values.to(device)
def extract_tensors(experiences):
# Convert batch of Experiences to Experience of batches
batch = Experience(*zip(*experiences))
t1 = torch.stack(batch.state).to(device)
t2 = torch.stack(batch.action).to(device)
t3 = torch.stack(batch.reward).to(device)
t4 = torch.stack(batch.nextState).to(device)
return (t1,t2,t3,t4)
class Agent():
def __init__(self,numActions, device):
self.numActions = numActions
self.device = device
def select_action(self, state, network):
epsilon = 0.1
if epsilon > random.random():
#choose to explore
action = random.randint(0, 3)
return torch.tensor([action]).to(self.device)
else:
#choose to exploit
with torch.no_grad():
return torch.argmax(network.forward(state)).item()
#return network.forward(state).argmax(dim=1).item()
class DQN(nn.Module):
def __init__(self, mapHeight, mapWidth):
super().__init__()
self.fc1 = nn.Linear(in_features=((mapHeight)*mapWidth), out_features=256)
self.fc2 = nn.Linear(in_features=256, out_features=256)
self.out = nn.Linear(in_features=256, out_features=4)
def forward(self, agentState):
#print(agentState.shape) #10. 1, 65, 64
layerOutput = torch.flatten(agentState, start_dim=-2, end_dim=-1).float()
#print(layerOutput.shape)
#print(self.fc1)
layerOutput = F.relu(self.fc1(layerOutput))
layerOutput = F.relu(self.fc2(layerOutput))
layerOutput = self.out(layerOutput)
return layerOutput
env = gym.make('Explore2D-v0')
agent = Agent(env.numActionsAvailable(), device)
memory = ReplayMemory(ReplayBufSize)
policy_net = DQN(env.getEnvSize()[0],env.getEnvSize()[1]).to(device)
target_net = DQN(env.getEnvSize()[0],env.getEnvSize()[1]).to(device)
target_net.load_state_dict(policy_net.state_dict())
target_net.eval()
optimizer = optim.Adam(params=policy_net.parameters(), lr=LearningRate)
env.setStepLimit(300) #max episode length
totalReward = 0
for episode in range(NumOfEpisodes):
state = env.reset()
done = False
i = 0
while not done:
i += 1
state = state.unsqueeze(0).unsqueeze(0)
action = agent.select_action(state, policy_net) + 1
#print(action)
nextState, reward, done, info = env.step(action)
memory.push(state, torch.tensor([action]), nextState, torch.tensor([reward]))
state = nextState
if memory.sampleAvailable(BatchSize):
experiences = memory.sample(BatchSize)
states, actions, rewards, nextStates = extract_tensors(experiences)
current_q_values = QValues.get_current(policy_net, states.squeeze(1), actions)
next_q_values = QValues.get_next(target_net, nextStates.unsqueeze(1))
target_q_values = (next_q_values * Gamma) + rewards
loss = F.mse_loss(current_q_values, target_q_values[:,0])
#print(rewards)
optimizer.zero_grad()
loss.backward()
optimizer.step()
if episode % TargetUpdate == 0:
target_net.load_state_dict(policy_net.state_dict())
print("Episode finished after {} timesteps".format(i+1))
state = env.reset()
done = False
i = 0
while not done:
state = state.unsqueeze(0).unsqueeze(0)
#print('state for greed action',state.shape)
action = agent.select_action(state, policy_net) + 1
nextState, reward, done, info = env.step(action)
state = nextState
i += 1
env.render()
print("Episode finished after {} timesteps".format(i+1))
# randomMove = np.random.randint(low =1, high = 5) #RL agent takes observation and selects a move. RNG in placeholder of agent
# observation, reward, done, info = env.step(randomMove)
# totalReward += reward
# if done:
# print("{} steps remaining".format(observation[1]))
# print("Episode finished after {} timesteps".format(i+1))
# print("total reward for episode: " + str(totalReward))
# break
#env.reset()
| 32.367876
| 129
| 0.653434
|
eee9902fce12cb28e805db262311152d58fec576
| 2,779
|
py
|
Python
|
data/data_split/data_split_potsdam.py
|
z1021190674/GMAUResNeXt_RS
|
a8a7444bf30e509cefc01b3be4b0587d367cda2e
|
[
"MIT"
] | 1
|
2022-03-23T11:54:33.000Z
|
2022-03-23T11:54:33.000Z
|
data/data_split/data_split_potsdam.py
|
z1021190674/GMAUResNeXt_RS
|
a8a7444bf30e509cefc01b3be4b0587d367cda2e
|
[
"MIT"
] | null | null | null |
data/data_split/data_split_potsdam.py
|
z1021190674/GMAUResNeXt_RS
|
a8a7444bf30e509cefc01b3be4b0587d367cda2e
|
[
"MIT"
] | null | null | null |
"""
Split the dataset according to the id of the tran/val/test split
"""
import sys
print(sys.path)
from osgeo import gdal_array
from data_split_utils import get_path_list, get_data_split
if __name__ == '__main__':
### Potsdam ###
# the id of the split datasets
train_id = '2_10, 3_10, 3_11, 3_12, 4_11, 4_12, 5_10, 5_12, 6_8, 6_9, 6_10, 6_11, 6_12, 7_7, 7_9, 7_11, 7_12'
val_id = '2_11, 2_12, 4_10, 5_11, 6_7, 7_8, 7_10'
# train_id = '2_10, 3_10, 3_11, 3_12, 4_11, 4_12, 5_10, 5_12, 6_8, 6_9, 6_10, 6_11, 6_12, 7_7, 7_9, 7_11, 7_12, 2_11, 2_12, 4_10, 5_11, 6_7, 7_8, 7_10'
# val_id = ''
train_id = train_id.split(', ')
val_id = val_id.split(', ')
### get data split ###
# get the data list
dic_data = get_path_list(path = r'D:\ISPRS\ISPRS 2D Semantic Labeling Contest\potsdam\4_Ortho_RGBIR',
prefix=r'top_potsdam_',
suffix=r'_RGBIR.tif',
train_id = train_id,
val_id = val_id,)
train_list = dic_data['train_list']
test_list = dic_data['test_list']
val_list = dic_data['val_list']
get_data_split(train_list, r'D:\ISPRS\ISPRS 2D Semantic Labeling Contest\potsdam\data_split\1200\train',
size_samp=(1200, 1200), overlap=600)
# get_data_split(test_list, r'D:\ISPRS\ISPRS 2D Semantic Labeling Contest\potsdam\data_split\test',
# size_samp=(1920, 1920), overlap=960)
get_data_split(val_list, r'D:\ISPRS\ISPRS 2D Semantic Labeling Contest\potsdam\data_split\1200\val',
size_samp=(1200, 1200), overlap=600)
# ### get colored_label split ###
# get the colored_label list
dic_label = get_path_list(path = r'D:\ISPRS\ISPRS 2D Semantic Labeling Contest\potsdam\5_Labels_all',
prefix=r'top_potsdam_',
suffix=r'_label.tif',
train_id = train_id,
val_id = val_id,)
train_label_list = dic_label['train_list']
test_label_list = dic_label['test_list']
val_label_list = dic_label['val_list']
# split the colored label image
get_data_split(train_label_list, r'D:\ISPRS\ISPRS 2D Semantic Labeling Contest\potsdam\colored_label_split\1200\train',
size_samp=(1200, 1200), overlap=600)
# get_data_split(test_label_list, r'D:\ISPRS\ISPRS 2D Semantic Labeling Contest\potsdam\colored_label_split\test',
# size_samp=(1920, 1920), overlap=960)
get_data_split(val_label_list, r'D:\ISPRS\ISPRS 2D Semantic Labeling Contest\potsdam\colored_label_split\1200\val',
size_samp=(1200, 1200), overlap=600)
# 得到的值是uint8 -- 以numpy array的形式保存??
x= ''
# 读取数据集的路径列表
#
###
| 40.275362
| 155
| 0.630802
|
40edc9a849a7d5d3af2a748ce33637e3597c2916
| 3,915
|
py
|
Python
|
auth0/v2/test/management/test_connections.py
|
maronnax/auth0-python
|
855e275da1f9fddc851f34df4a6b304eed8abb96
|
[
"MIT"
] | null | null | null |
auth0/v2/test/management/test_connections.py
|
maronnax/auth0-python
|
855e275da1f9fddc851f34df4a6b304eed8abb96
|
[
"MIT"
] | null | null | null |
auth0/v2/test/management/test_connections.py
|
maronnax/auth0-python
|
855e275da1f9fddc851f34df4a6b304eed8abb96
|
[
"MIT"
] | null | null | null |
import unittest
import mock
from ...management.connections import Connections
class TestConnection(unittest.TestCase):
@mock.patch('auth0.v2.management.connections.RestClient')
def test_all(self, mock_rc):
mock_instance = mock_rc.return_value
mock_instance.get.return_value = {}
c = Connections(domain='domain', token='jwttoken')
c.all()
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/connections', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'strategy': None,
'include_fields': 'true'})
c.all(fields=['a', 'b'], include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/connections', args[0])
self.assertEqual(kwargs['params'], {'fields': 'a,b',
'strategy': None,
'include_fields': 'false'})
c.all(fields=['a', 'b'], strategy='strategy', include_fields=True)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/connections', args[0])
self.assertEqual(kwargs['params'], {'fields': 'a,b',
'strategy': 'strategy',
'include_fields': 'true'})
@mock.patch('auth0.v2.management.connections.RestClient')
def test_get(self, mock_rc):
mock_instance = mock_rc.return_value
mock_instance.get.return_value = {}
c = Connections(domain='domain', token='jwttoken')
c.get('an-id')
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/connections/an-id', args[0])
self.assertEqual(kwargs['params'], {'fields': None,
'include_fields': 'true'})
c.get('an-id', fields=['a', 'b'])
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/connections/an-id', args[0])
self.assertEqual(kwargs['params'], {'fields': 'a,b',
'include_fields': 'true'})
c.get('an-id', fields=['a', 'b'], include_fields=False)
args, kwargs = mock_instance.get.call_args
self.assertEqual('https://domain/api/v2/connections/an-id', args[0])
self.assertEqual(kwargs['params'], {'fields': 'a,b',
'include_fields': 'false'})
@mock.patch('auth0.v2.management.connections.RestClient')
def test_delete(self, mock_rc):
mock_instance = mock_rc.return_value
mock_instance.delete.return_value = {}
c = Connections(domain='domain', token='jwttoken')
c.delete('this-id')
mock_instance.delete.assert_called_with(
'https://domain/api/v2/connections/this-id'
)
@mock.patch('auth0.v2.management.connections.RestClient')
def test_update(self, mock_rc):
mock_instance = mock_rc.return_value
mock_instance.patch.return_value = {}
c = Connections(domain='domain', token='jwttoken')
c.update('that-id', {'a': 'b', 'c': 'd'})
mock_instance.patch.assert_called_with(
'https://domain/api/v2/connections/that-id',
data={'a': 'b', 'c': 'd'}
)
@mock.patch('auth0.v2.management.connections.RestClient')
def test_create(self, mock_rc):
mock_instance = mock_rc.return_value
mock_instance.post.return_value = {}
c = Connections(domain='domain', token='jwttoken')
c.create({'a': 'b', 'c': 'd'})
mock_instance.post.assert_called_with(
'https://domain/api/v2/connections',
data={'a': 'b', 'c': 'd'}
)
| 37.285714
| 76
| 0.565006
|
930361dd5c40b41e73d01dd22ff326af57517ecc
| 513
|
py
|
Python
|
2014/03/django-rest-framework/src/demo3/serializers.py
|
stephanpoetschner/django-vienna
|
6045832e2ebf095896c62db2f1341b65f2acfa73
|
[
"MIT"
] | 2
|
2017-12-11T03:56:46.000Z
|
2018-03-23T20:39:11.000Z
|
2014/03/django-rest-framework/src/demo3/serializers.py
|
stephanpoetschner/django-vienna
|
6045832e2ebf095896c62db2f1341b65f2acfa73
|
[
"MIT"
] | null | null | null |
2014/03/django-rest-framework/src/demo3/serializers.py
|
stephanpoetschner/django-vienna
|
6045832e2ebf095896c62db2f1341b65f2acfa73
|
[
"MIT"
] | null | null | null |
from .models import Book
from rest_framework import serializers
class BookSerializer(serializers.HyperlinkedModelSerializer):
title = serializers.CharField(source='title')
authors = serializers.Field(source='get_author_names')
created_by = serializers.Field(source='created_by.username')
class Meta:
model = Book
fields = ('url',
'title',
'abstract',
'price_net',
'authors',
'created_by')
| 30.176471
| 64
| 0.60039
|
3f99a5c3f145e85f0bbbf4e883b6b1f8a3251dc5
| 119,539
|
py
|
Python
|
pyemf.py
|
6tudent/pyemf
|
830e61008af8acf1830e5c91dc876313a771afdb
|
[
"BSD-3-Clause-No-Nuclear-Warranty"
] | 16
|
2016-01-22T01:40:10.000Z
|
2021-07-22T02:19:24.000Z
|
pyemf.py
|
6tudent/pyemf
|
830e61008af8acf1830e5c91dc876313a771afdb
|
[
"BSD-3-Clause-No-Nuclear-Warranty"
] | 1
|
2017-11-07T11:53:14.000Z
|
2017-11-07T11:53:14.000Z
|
pyemf.py
|
jeremysanders/pyemf
|
830e61008af8acf1830e5c91dc876313a771afdb
|
[
"BSD-3-Clause-No-Nuclear-Warranty"
] | 11
|
2017-02-28T21:02:26.000Z
|
2020-10-21T19:23:29.000Z
|
#!/usr/bin/env python
"""
pyemf is a pure python module that provides a cross-platform ability
to generate enhanced metafiles (.emf files), a vector graphics format
defined by the ECMA-234 standard. Enhanced metafiles are a natively
supported image and scalable clip-art format in the OpenOffice suite
of tools and in Windows applications.
U{ECMA-234<http://www.ecma-international.org/publications/standards/Ecma-234.htm>}
is the published interface for enhanced metafiles, which is also a
file-based representation of the Windows graphics device interface.
This API follows most of the naming conventions of ECMA-234, and most
of the parameter lists of methods are the same as their ECMA-234
equivalents. The primary difference is that pyemf has extended the
API to be object-oriented based on the class L{EMF}. So, while in
ECMA-234 the first argument is generally the device context, here in
pyemf it is implicit in the class instance.
ECMA-234 defines a lot of constants (mostly integers that are used as
flags to various functions) that pyemf defines as module level
variables. So, rather than pollute your global namespace, it is
recommended that you use C{import pyemf} rather than C{from pyemf
import *}.
Introduction
============
To use pyemf in your programs, you L{instantiate<EMF.__init__>} an
L{EMF} object, draw some stuff using its methods, and save the file.
An example::
#!/usr/bin/env python
import pyemf
width=8.0
height=6.0
dpi=300
emf=pyemf.EMF(width,height,dpi)
thin=emf.CreatePen(pyemf.PS_SOLID,1,(0x01,0x02,0x03))
emf.SelectObject(thin)
emf.Polyline([(0,0),(width*dpi,height*dpi)])
emf.Polyline([(0,height*dpi),(width*dpi,0)])
emf.save("test-1.emf")
This small program creates a 8in x 6in EMF at 300 dots per inch, and
draws two lines connecting the opposite corners. This simple test is
available as C{test-1.py} in the C{examples} directory of the pyemf
distribution. There are many other small test programs to demonstrate
other features of the EMF class.
Naming Conventions in pyemf
===========================
Methods that belong to ECMA-234 are C{CamelCased} starting with a
capital letter. Methods that apply to the operation of the L{EMF}
class itself (i.e. L{load<EMF.load>} and L{save<EMF.save>}) are
C{lower} cased. Constants described in L{pyemf} that are used as
parameters are C{ALL_UPPER_CASE}.
Coordinate System
=================
Coordinates are addressed a coordinate system called B{page space} by
integer pixels in a horizontal range (increasing to the right) from C{0}
to C{width*density}, and vertically (from the top down) C{0} to
C{height*density}. Density is either dots per inch if working in
english units, or dots per millimeter if working in metric.
World and Page Space
--------------------
Note that there are four coordinate spaces used by GDI: world, page,
device, and physical device. World and page are the same, unless a
world transform (L{SetWorldTransform<EMF.SetWorldTransform>},
L{ModifyWorldTransform<EMF.ModifyWorldTransform>}) is used. In that
case, you operate in world space (that is transformed into page space
by multiplying by the transformation matrix), and it could be scaled
differently.
Experimental Coordinate System
------------------------------
Experimental support for device coordinates is available through
L{SetMapMode<EMF.SetMapMode>} and the various Window and Viewport
methods. Device coordinates are referenced by physical dimensions
corresponding to the mapping mode currently used. [The methods work
correctly (in the sense that they generate the correct records in the
metafile) and the API won't change, but it's not clear to me what the
parameters should do.]
Drawing Characteristics
=======================
GDI has a concept of the B{current object} for the each of the three
drawing characteristics: line style, fill style, and font. Once a
characteristic is made current using
L{SelectObject<EMF.SelectObject>}, it remains current until it is
replaced by another call to SelectObject. Note that a call to
SelectObject only affects that characteristic, and not the other two,
so changing the line style doesn't effect the fill style or the font.
Additionally, there is a set of B{stock objects} retrievable with
L{GetStockObject<EMF.GetStockObject>} that should be available on any
system capable of rendering an EMF.
Colors
------
A quick note about color. Colors in pyemf are specified one of three
ways:
- (r,g,b) tuple, where each component is a integer between 0 and 255 inclusive.
- (r,g,b) tuple, where each component is a float between 0.0 and 1.0 inclusive.
- packed integer created by a call to L{RGB}
Line Styles
-----------
Line styles are created by L{CreatePen<EMF.CreatePen>} and specify the
style, width, and color.
Note that there is a NULL_PEN stock object if you don't actually want
to draw a line with a drawing primitive.
Fill Styles
-----------
Polygon fill styles are created by
L{CreateSolidBrush<EMF.CreateSolidBrush>} and theoretically
L{CreateHatchBrush<EMF.CreateHatchBrush>}, although the latter doesn't
seem to be supported currently in OpenOffice. So, reliably we can only
use CreateSolidBrush and thus can only specify a fill color and not a
fill pattern.
Note that there is a stock object NULL_BRUSH that doesn't fill, useful
if you want to only draw an outline of a primitive that is normally
filled.
An interesting side-note is that there is no direct support for
gradients in EMF. Examining some .emfs that do have gradients shows
that Windows produces them using clipping regions and subdividing the
object into areas of a single color an drawing slices of the
individual color. Better support for clipping regions is the subject
of a future release of pyemf, but they also don't seem to work well in
OpenOffice, so it hasn't been a high priority.
Fonts
-----
L{CreateFont<EMF.CreateFont>} requires a large number of parameters,
the most important being the height, the rotation, and the name. Note
that the height can either be specified as a positive or negative
integer, where negative means use that value as the average I{glyph}
height and positive means use the value as the average I{cell} height.
Since a glyph is contained within a cell, the negative value will
yield a slightly larger font when rendered on screen.
Note that the two rotation values must specify the same angle.
Also note that font color is not part of a
L{SelectObject<EMF.SelectObject>} characteristic. It is specified
using the separate method L{SetTextColor<EMF.SetTextColor>}.
L{SetBkMode<EMF.SetBkMode>} and L{SetBkColor<EMF.SetBkColor>} are
supposed to work with text, but in my testing with OpenOffice it hasn't been
consistent. I tend to just C{SetBkMode(pyemf.TRANSPARENT)} and leave
it at that.
Drawing
=======
The methods listed under B{Drawing Primitives} below use either the
current line style or the current fill style (or both). Any primitive
that creates a closed figure (L{Polygon<EMF.Polygon>},
L{PolyPolygon<EMF.PolyPolygon>}, L{Rectangle<EMF.Rectangle>},
L{RoundRect<EMF.RoundRect>}, L{Ellipse<EMF.Ellipse>},
L{Chord<EMF.Chord>}, and L{Pie<EMF.Pie>}) will use both the line and
fill style. Others (L{Polyline<EMF.Polyline>},
L{PolyPolyline<EMF.PolyPolyline>} and L{Arc<EMF.Arc>}) will only use
the line style, excepting L{SetPixel<EMF.SetPixel>} which doesn't use either.
Paths
=====
To create more complicated shapes, the B{Path Primitives} are used. A
path is started with a call to L{BeginPath<EMF.BeginPath>} and the
initial point should be set with L{MoveTo<EMF.MoveTo>}. Calls to
L{LineTo<EMF.LineTo>}, L{PolylineTo<EMF.PolylineTo>},
L{ArcTo<EMF.ArcTo>}, and L{PolyBezierTo<EMF.PolyBezierTo>} extend the
path. L{CloseFigure<EMF.CloseFigure>} should be used to connect the
final point to the starting point, otherwise the path may be filled
incorrectly. L{EndPath<EMF.EndPath>} then completes the path, and it
may be outlined with L{StrokePath<EMF.StrokePath>}, filled with
L{FillPath<EMF.FillPath>} or both with
L{StrokeAndFillPath<EMF.StrokeAndFillPath>}.
Note that OpenOffice ignores L{ArcTo<EMF.ArcTo>} in terms of path
continuity -- the arc is drawn, but it is not connected to the path.
Note that L{SelectClipPath<EMF.SelectClipPath>} is broken in OpenOffice.
Coordinate System Transformation
================================
You might have noticed that methods like L{Ellipse<EMF.Ellipse>} and
L{Rectangle<EMF.Rectangle>} can only create objects that are aligned
with the X-Y axis. This would be a real limitation without some way
to rotate the figures. L{SetWorldTransform<EMF.SetWorldTransform>}
and L{ModifyWorldTransform<EMF.ModifyWorldTransform>} provide this.
These methods provide a generalized linear transformation that can
translate, rotate, scale and shear subsequent graphics operations.
These methods aren't required by the ECMA-234 spec, which may explain
why their support in OpenOffice is mixed. Drawing primitives and
paths seem to be supported and are transformed, but text is not
(though it should be).
@author: $author
@version: $version
"""
from __future__ import print_function, division
__extra_epydoc_fields__ = [
('gdi', 'GDI Command', 'GDI Commands'),
('oo', 'OpenOffice Support'),
]
import os
import sys
import re
import struct
import copy
import inspect
# python 2/3 compatibility helpers
is_py3 = sys.version_info[0] == 3
if is_py3:
from io import BytesIO, StringIO
cunicode = str
else:
from cStringIO import StringIO
BytesIO = StringIO
cunicode = unicode
# setup.py requires that these be defined, and the OnceAndOnlyOnce
# principle is used here. This is the only place where these values
# are defined in the source distribution, and everything else that
# needs this should grab it from here.
__version__ = "2.0.0"
__author__ = "Rob McMullen"
__author_email__ = "robm@users.sourceforge.net"
__url__ = "http://pyemf.sourceforge.net"
__download_url__ = "http://sourceforge.net/project/showfiles.php?group_id=148144"
__description__ = "Pure Python Enhanced Metafile Library"
__keywords__ = "graphics, scalable, vector, image, clipart, emf"
__license__ = "LGPL"
# Reference: libemf.h
# and also wine: http://cvs.winehq.org/cvsweb/wine/include/wingdi.h
# Brush styles
BS_SOLID = 0
BS_NULL = 1
BS_HOLLOW = 1
BS_HATCHED = 2
BS_PATTERN = 3
BS_INDEXED = 4
BS_DIBPATTERN = 5
BS_DIBPATTERNPT = 6
BS_PATTERN8X8 = 7
BS_DIBPATTERN8X8 = 8
BS_MONOPATTERN = 9
# Hatch styles
HS_HORIZONTAL = 0
HS_VERTICAL = 1
HS_FDIAGONAL = 2
HS_BDIAGONAL = 3
HS_CROSS = 4
HS_DIAGCROSS = 5
# mapping modes
MM_TEXT = 1
MM_LOMETRIC = 2
MM_HIMETRIC = 3
MM_LOENGLISH = 4
MM_HIENGLISH = 5
MM_TWIPS = 6
MM_ISOTROPIC = 7
MM_ANISOTROPIC = 8
MM_MAX = MM_ANISOTROPIC
# background modes
TRANSPARENT = 1
OPAQUE = 2
BKMODE_LAST = 2
# polyfill modes
ALTERNATE = 1
WINDING = 2
POLYFILL_LAST = 2
# line styles and options
PS_SOLID = 0x00000000
PS_DASH = 0x00000001
PS_DOT = 0x00000002
PS_DASHDOT = 0x00000003
PS_DASHDOTDOT = 0x00000004
PS_NULL = 0x00000005
PS_INSIDEFRAME = 0x00000006
PS_USERSTYLE = 0x00000007
PS_ALTERNATE = 0x00000008
PS_STYLE_MASK = 0x0000000f
PS_ENDCAP_ROUND = 0x00000000
PS_ENDCAP_SQUARE = 0x00000100
PS_ENDCAP_FLAT = 0x00000200
PS_ENDCAP_MASK = 0x00000f00
PS_JOIN_ROUND = 0x00000000
PS_JOIN_BEVEL = 0x00001000
PS_JOIN_MITER = 0x00002000
PS_JOIN_MASK = 0x0000f000
PS_COSMETIC = 0x00000000
PS_GEOMETRIC = 0x00010000
PS_TYPE_MASK = 0x000f0000
# Stock GDI objects for GetStockObject()
WHITE_BRUSH = 0
LTGRAY_BRUSH = 1
GRAY_BRUSH = 2
DKGRAY_BRUSH = 3
BLACK_BRUSH = 4
NULL_BRUSH = 5
HOLLOW_BRUSH = 5
WHITE_PEN = 6
BLACK_PEN = 7
NULL_PEN = 8
OEM_FIXED_FONT = 10
ANSI_FIXED_FONT = 11
ANSI_VAR_FONT = 12
SYSTEM_FONT = 13
DEVICE_DEFAULT_FONT = 14
DEFAULT_PALETTE = 15
SYSTEM_FIXED_FONT = 16
DEFAULT_GUI_FONT = 17
STOCK_LAST = 17
# Text alignment
TA_NOUPDATECP = 0x00
TA_UPDATECP = 0x01
TA_LEFT = 0x00
TA_RIGHT = 0x02
TA_CENTER = 0x06
TA_TOP = 0x00
TA_BOTTOM = 0x08
TA_BASELINE = 0x18
TA_RTLREADING = 0x100
TA_MASK = TA_BASELINE+TA_CENTER+TA_UPDATECP+TA_RTLREADING
# lfWeight values
FW_DONTCARE = 0
FW_THIN = 100
FW_EXTRALIGHT = 200
FW_ULTRALIGHT = 200
FW_LIGHT = 300
FW_NORMAL = 400
FW_REGULAR = 400
FW_MEDIUM = 500
FW_SEMIBOLD = 600
FW_DEMIBOLD = 600
FW_BOLD = 700
FW_EXTRABOLD = 800
FW_ULTRABOLD = 800
FW_HEAVY = 900
FW_BLACK = 900
# lfCharSet values
ANSI_CHARSET = 0 # CP1252, ansi-0, iso8859-{1,15}
DEFAULT_CHARSET = 1
SYMBOL_CHARSET = 2
SHIFTJIS_CHARSET = 128 # CP932
HANGEUL_CHARSET = 129 # CP949, ksc5601.1987-0
HANGUL_CHARSET = HANGEUL_CHARSET
GB2312_CHARSET = 134 # CP936, gb2312.1980-0
CHINESEBIG5_CHARSET = 136 # CP950, big5.et-0
GREEK_CHARSET = 161 # CP1253
TURKISH_CHARSET = 162 # CP1254, -iso8859-9
HEBREW_CHARSET = 177 # CP1255, -iso8859-8
ARABIC_CHARSET = 178 # CP1256, -iso8859-6
BALTIC_CHARSET = 186 # CP1257, -iso8859-13
RUSSIAN_CHARSET = 204 # CP1251, -iso8859-5
EE_CHARSET = 238 # CP1250, -iso8859-2
EASTEUROPE_CHARSET = EE_CHARSET
THAI_CHARSET = 222 # CP874, iso8859-11, tis620
JOHAB_CHARSET = 130 # korean (johab) CP1361
MAC_CHARSET = 77
OEM_CHARSET = 255
VISCII_CHARSET = 240 # viscii1.1-1
TCVN_CHARSET = 241 # tcvn-0
KOI8_CHARSET = 242 # koi8-{r,u,ru}
ISO3_CHARSET = 243 # iso8859-3
ISO4_CHARSET = 244 # iso8859-4
ISO10_CHARSET = 245 # iso8859-10
CELTIC_CHARSET = 246 # iso8859-14
FS_LATIN1 = 0x00000001
FS_LATIN2 = 0x00000002
FS_CYRILLIC = 0x00000004
FS_GREEK = 0x00000008
FS_TURKISH = 0x00000010
FS_HEBREW = 0x00000020
FS_ARABIC = 0x00000040
FS_BALTIC = 0x00000080
FS_VIETNAMESE = 0x00000100
FS_THAI = 0x00010000
FS_JISJAPAN = 0x00020000
FS_CHINESESIMP = 0x00040000
FS_WANSUNG = 0x00080000
FS_CHINESETRAD = 0x00100000
FS_JOHAB = 0x00200000
FS_SYMBOL = 0x80000000
# lfOutPrecision values
OUT_DEFAULT_PRECIS = 0
OUT_STRING_PRECIS = 1
OUT_CHARACTER_PRECIS = 2
OUT_STROKE_PRECIS = 3
OUT_TT_PRECIS = 4
OUT_DEVICE_PRECIS = 5
OUT_RASTER_PRECIS = 6
OUT_TT_ONLY_PRECIS = 7
OUT_OUTLINE_PRECIS = 8
# lfClipPrecision values
CLIP_DEFAULT_PRECIS = 0x00
CLIP_CHARACTER_PRECIS = 0x01
CLIP_STROKE_PRECIS = 0x02
CLIP_MASK = 0x0F
CLIP_LH_ANGLES = 0x10
CLIP_TT_ALWAYS = 0x20
CLIP_EMBEDDED = 0x80
# lfQuality values
DEFAULT_QUALITY = 0
DRAFT_QUALITY = 1
PROOF_QUALITY = 2
NONANTIALIASED_QUALITY = 3
ANTIALIASED_QUALITY = 4
# lfPitchAndFamily pitch values
DEFAULT_PITCH = 0x00
FIXED_PITCH = 0x01
VARIABLE_PITCH = 0x02
MONO_FONT = 0x08
FF_DONTCARE = 0x00
FF_ROMAN = 0x10
FF_SWISS = 0x20
FF_MODERN = 0x30
FF_SCRIPT = 0x40
FF_DECORATIVE = 0x50
# Graphics Modes
GM_COMPATIBLE = 1
GM_ADVANCED = 2
GM_LAST = 2
# Arc direction modes
AD_COUNTERCLOCKWISE = 1
AD_CLOCKWISE = 2
# Clipping paths
RGN_ERROR = 0
RGN_AND = 1
RGN_OR = 2
RGN_XOR = 3
RGN_DIFF = 4
RGN_COPY = 5
RGN_MIN = RGN_AND
RGN_MAX = RGN_COPY
# Color management
ICM_OFF = 1
ICM_ON = 2
ICM_QUERY = 3
ICM_MIN = 1
ICM_MAX = 3
# World coordinate system transformation
MWT_IDENTITY = 1
MWT_LEFTMULTIPLY = 2
MWT_RIGHTMULTIPLY = 3
def _round4(num):
"""Round to the nearest multiple of 4 greater than or equal to the
given number. EMF records are required to be aligned to 4 byte
boundaries."""
return ((num+3)//4)*4
def RGB(r,g,b):
"""
Pack integer color values into a 32-bit integer format.
@param r: 0 - 255 or 0.0 - 1.0 specifying red
@param g: 0 - 255 or 0.0 - 1.0 specifying green
@param b: 0 - 255 or 0.0 - 1.0 specifying blue
@return: single integer that should be used when any function needs a color value
@rtype: int
@type r: int or float
@type g: int or float
@type b: int or float
"""
if isinstance(r,float):
r=int(255*r)
if r>255: r=255
elif r<0: r=0
if isinstance(g,float):
g=int(255*g)
if g>255: g=255
elif g<0: g=0
if isinstance(b,float):
b=int(255*b)
if b>255: b=255
elif b<0: b=0
return ((b<<16)|(g<<8)|r)
def _normalizeColor(c):
"""
Normalize the input into a packed integer. If the input is a tuple,
pass it through L{RGB} to generate the color value.
@param c: color
@type c: int or (r,g,b) tuple
@return: packed integer color from L{RGB}
@rtype: int
"""
if isinstance(c,int):
return c
if isinstance(c,tuple) or isinstance(c,list):
return RGB(*c)
raise TypeError("Color must be specified as packed integer or 3-tuple (r,g,b)")
# FIXME: do I need DPtoLP and LPtoDP?
class _DC:
"""Device Context state machine. This is used to simulate the
state of the GDI buffer so that some user commands can return
information. In a real GDI implementation, there'd be lots of
error checking done, but here we can't do a whole bunch because
we're outputting to a metafile. So, in general, we assume
success.
Here's Microsoft's explanation of units: http://msdn.microsoft.com/library/default.asp?url=/library/en-us/gdi/cordspac_3qsz.asp
Window <=> Logical units <=> page space or user addressable
integer pixel units.
Viewport <=> Physical units <=> device units and are measured in actual
dimensions, like .01 mm units.
There are four coordinate spaces used by GDI: world, page, device,
and physical device. World and page are the same, unless a world
transform is used. These are addressed by integer pixels. Device
coordinates are referenced by physical dimensions corresponding to
the mapping mode currently used.
"""
def __init__(self,width='6.0',height='4.0',density='72',units='in'):
self.x=0
self.y=0
# list of objects that can be referenced by their index
# number, called "handle"
self.objects=[]
self.objects.append(None) # handle 0 is reserved
# Maintain a stack that contains list of empty slots in object
# list resulting from deletes
self.objectholes=[]
# Reference device size in logical units (pixels)
self.ref_pixelwidth=1024
self.ref_pixelheight=768
# Reference device size in mm
self.ref_width=320
self.ref_height=240
# physical dimensions are in .01 mm units
self.width=0
self.height=0
if units=='mm':
self.setPhysicalSize([[0,0],[int(width*100),int(height*100)]])
else:
self.setPhysicalSize([[0,0],[int(width*2540),int(height*2540)]])
# addressable pixel sizes
self.pixelwidth=0
self.pixelheight=0
self.setPixelSize([[0,0],[int(width*density),int(height*density)]])
#self.text_alignment = TA_BASELINE;
self.text_color = RGB(0,0,0);
#self.bk_mode = OPAQUE;
#self.polyfill_mode = ALTERNATE;
#self.map_mode = MM_TEXT;
# Viewport origin. A pixel drawn at (x,y) after the viewport
# origin has been set to (xv,yv) will be displayed at
# (x+xv,y+yv).
self.viewport_x=0
self.viewport_y=0
# Viewport extents. Should density be replaced by
# self.ref_pixelwidth/self.ref_width?
self.viewport_ext_x=self.width/100*density
self.viewport_ext_y=self.height/100*density
# Window origin. A pixel drawn at (x,y) after the window
# origin has been set to (xw,yw) will be displayed at
# (x-xw,y-yw).
# If both window and viewport origins are set, a pixel drawn
# at (x,y) will be displayed at (x-xw+xv,y-yw+yv)
self.window_x=0
self.window_y=0
# Window extents
self.window_ext_x=self.pixelwidth
self.window_ext_y=self.pixelheight
def getBounds(self,header):
"""Extract the dimensions from an _EMR._HEADER record."""
self.setPhysicalSize(header.rclFrame)
if header.szlMicrometers[0]>0:
self.ref_width=header.szlMicrometers[0]/10
self.ref_height=header.szlMicrometers[1]/10
else:
self.ref_width=header.szlMillimeters[0]*100
self.ref_height=header.szlMillimeters[1]*100
self.setPixelSize(header.rclBounds)
self.ref_pixelwidth=header.szlDevice[0]
self.ref_pixelheight=header.szlDevice[1]
def setPhysicalSize(self,points):
"""Set the physical (i.e. stuff you could measure with a
meterstick) dimensions."""
left=points[0][0]
top=points[0][1]
right=points[1][0]
bottom=points[1][1]
self.width=right-left
self.height=bottom-top
self.frame_left=left
self.frame_top=top
self.frame_right=right
self.frame_bottom=bottom
def setPixelSize(self,points):
"""Set the pixel-addressable dimensions."""
left=points[0][0]
top=points[0][1]
right=points[1][0]
bottom=points[1][1]
self.pixelwidth=right-left
self.pixelheight=bottom-top
self.bounds_left=left
self.bounds_top=top
self.bounds_right=right
self.bounds_bottom=bottom
def addObject(self,emr,handle=-1):
"""Add an object to the handle list, so it can be retrieved
later or deleted."""
count=len(self.objects)
if handle>0:
# print "Adding handle %s (%s)" % (handle,emr.__class__.__name__.lstrip('_'))
if handle>=count:
self.objects+=[None]*(handle-count+1)
self.objects[handle]=emr
elif self.objectholes:
handle=self.objectholes.pop()
self.objects[handle]=emr
else:
handle=count
self.objects.append(emr)
return handle
def removeObject(self,handle):
"""Remove an object by its handle. Handles can be reused, and
are reused from lowest available handle number."""
if handle<1 or handle>=len(self.objects):
raise IndexError("Invalid handle")
# print "removing handle %d (%s)" % (handle,self.objects[handle].__class__.__name__.lstrip('_'))
self.objects[handle]=None
found=False
# insert handle in objectholes list, but keep object holes
# list in sorted order
i=0
while i<len(self.objectholes):
if handle<self.objectholes[i]:
self.objectholes.insert(i,handle)
break
i+=1
else:
self.objectholes.append(handle)
# print self.objectholes
def popObject(self):
"""Remove last object. Used mainly in case of error."""
self.objects.pop()
##### - Field, Record, and related classes: a way to represent data
##### more advanced than using just import struct
# A structrecord class for EMF strings
class Field(object):
def __init__(self,fmt,size=1,num=1,offset=None):
# Format string, if applicable
self.fmt=fmt
# Size of record, in bytes
self.size=size
# Number of records. Could be an integer OR a text string
# that will be interpreted as a key in the object. That key
# will be referenced at unpack time to determine the number of
# records
self.num=num
# Relative offset to start of record, if supplied
self.offset=offset
self.debug=False
def getNumBytes(self,obj=None):
size=self.size*self.getNum(obj)
#if debug: print "size=%d" % size
return size
def calcNumBytes(self,obj,name):
if isinstance(obj.values[name],list) or isinstance(obj.values[name],tuple):
size=self.size*len(obj.values[name])
if self.debug: print(" calcNumBytes: size=%d len(obj.values[%s])=%d total=%d" % (self.size,name,len(obj.values[name]),size))
# also update the linked number, if applicable
else:
size=self.size*self.getNum(obj)
return size
# Get number of elements of this object (i.e. number of chars in a
# string, number of items in a list, etc.)
def getNum(self,obj=None):
num=0
if isinstance(self.num,int):
num=self.num
elif obj:
num=getattr(obj,self.num) # find obj."num"
#if debug: print "getting number for obj=%s, self.num=%s => num=%d" % (obj.__class__.__name__,self.num,num)
return num
def hasNumReference(self):
# If this format uses a reference to get the number, return
# that reference name
if isinstance(self.num,str):
return self.num
return False
def calcNum(self,obj,name):
if isinstance(obj.values[name],list) or isinstance(obj.values[name],tuple):
num=len(obj.values[name])
##if debug: print "calcNumBytes: size=%d num=%d" % (size,len(obj.values[name]))
# also update the linked number, if applicable
else:
num=self.getNum(obj)
return num
def getOffset(self,obj):
if self.offset==None: return None
offset=0
if isinstance(self.offset,int):
offset+=self.offset
elif obj:
offset+=getattr(obj,self.offset) # find obj."offset"
if self.debug: print("getting offset for obj=%s, self.offset=%s => offset=%d" % (obj.__class__.__name__,self.offset,offset))
return offset
def hasOffsetReference(self):
# If this format uses a reference to get the offset, return
# that reference name
if (self.offset,str):
return self.offset
return False
def unpack(self,obj,name,data,ptr):
raise NotImplementedError()
def pack(self,obj,name,value):
raise NotImplementedError()
def getDefault(self):
return None
def setDefault(self):
pass
def getString(self,name,val):
return val
class StructFormat(Field):
def __init__(self,fmt):
Field.__init__(self,fmt,struct.calcsize(fmt))
def unpack(self,obj,name,data,ptr):
value=struct.unpack(self.fmt,data[ptr:ptr+self.size])[0]
return (value,self.size)
def pack(self,obj,name,value):
return struct.pack(self.fmt,value)
def str_color(self,val):
return "red=0x%02x green=0x%02x blue=0x%02x" % ((val&0xff),((val&0xff00)>>8),((val&0xff0000)>>16))
def getString(self,name,val):
if name.endswith("olor"):
val=self.str_color(val)
elif self.fmt.endswith("s"):
val=val.decode('utf-16le')
return val
class String(Field):
def __init__(self,default=None,size=1,num=1,offset=None):
# Note the two bytes per unicode char
Field.__init__(self,None,size=size,num=num,offset=offset)
self.setDefault(default)
def calcNumBytes(self,obj,name):
if self.hasNumReference():
# If this is a dynamic string, calculate the size required
txt=obj.values[name]
if self.size==2:
# it's unicode, so get the number of actual bytes required
# to store it
txt=txt.encode('utf-16')
# EMF requires that strings be stored as multiples of 4 bytes
return len(txt)
else:
# this is a fixed length string, so we know the length already.
return Field.calcNumBytes(self,obj,name)
def calcNum(self,obj,name):
if self.hasNumReference():
return len(obj.values[name])
else:
return Field.calcNumBytes(self,obj,name)
def unpack(self,obj,name,data,ptr):
offset=self.getOffset(obj)
if offset==None:
pass
elif offset>0:
ptr=offset
else:
return ('',0)
size=self.getNumBytes(obj)
txt=data[ptr:ptr+size]
if self.size==2:
txt=txt.decode('utf-16') # Now is a unicode string
if self.debug:
try:
print("str: '%s'" % str(txt))
except UnicodeEncodeError:
print("<<<BAD UNICODE STRING>>>: '%s'" % repr(txt))
return (txt,size)
def pack(self,obj,name,value):
txt=value
if self.size==2:
txt=txt.encode('utf-16')
maxlen=self.getNumBytes(obj)
if len(txt)>maxlen:
txt=txt[0:maxlen]
else:
txt+='\0'*(maxlen-len(txt))
return txt
def getDefault(self):
# FIXME: need to take account of number
return self.default
def setDefault(self,default):
if default is None:
if self.size==2:
default=u''
else:
default=''
self.default=default
class CString(String):
def __init__(self,default=None,num=1,offset=None):
String.__init__(self,None,size=1,num=num,offset=offset)
def unpack(self,obj,name,data,ptr):
(txt,size)=String.unpack(self,obj,name,data,ptr)
i=0
while i<size:
if txt[i]=='\0': break
i+=1
return (txt[0:i],i)
class List(Field):
def __init__(self,default=None,num=1,fmt='i',offset=None):
Field.__init__(self,fmt,struct.calcsize(fmt),num,offset=offset)
self.setDefault(default)
def unpack(self,obj,name,data,ptr):
values=[]
offset=self.getOffset(obj)
if offset==None:
pass
elif offset>0:
ptr=offset
else:
return (values,0)
num=self.getNum(obj)
while num>0:
values.append(struct.unpack(self.fmt,data[ptr:ptr+self.size])[0])
ptr+=self.size
num-=1
return (values,self.getNumBytes(obj))
def pack(self,obj,name,value):
fh=BytesIO()
size=0
for val in value:
fh.write(struct.pack(self.fmt,val))
return fh.getvalue()
def getDefault(self):
return self.default
def setDefault(self,default):
if default is not None:
default=[0]*self.getNum()
self.default=default
class Tuples(Field):
def __init__(self,default=None,rank=2,num=1,fmt='i',offset=None):
if fmt[0] in "<>@!=":
fmt=fmt[0]+fmt[1]*rank
else:
fmt=fmt*rank
Field.__init__(self,fmt,struct.calcsize(fmt),num,offset=offset)
if self.debug: print("Tuples:%s self.size=%d" % (self.__class__.__name__,self.size))
self.rank=rank
self.setDefault(default)
# always create a list of lists
def unpack(self,obj,name,data,ptr):
values=[]
offset=self.getOffset(obj)
if offset==None:
pass
elif offset>0:
ptr=offset
else:
return (values,0)
num=self.getNum(obj)
if self.debug: print("unpack: name=%s num=%d ptr=%d datasize=%d" % (name,num,ptr,len(data)))
while num>0:
values.append(list(struct.unpack(self.fmt,data[ptr:ptr+self.size])))
ptr+=self.size
num-=1
return (values,self.getNumBytes(obj))
# assuming a list of lists
def pack(self,obj,name,value):
fh=BytesIO()
size=0
if self.debug: print("pack: value=%s" % (str(value)))
for val in value:
fh.write(struct.pack(self.fmt,*val))
return fh.getvalue()
def getDefault(self):
# FIXME: need to take account of number
return self.default
def setDefault(self,default):
if default is None:
default=[[0]*self.rank]*self.getNum()
self.default=default
# Special case of two-tuples
class Points(Tuples):
def __init__(self,default=None,num=1,fmt='i',offset=None):
Tuples.__init__(self,rank=2,num=num,fmt=fmt,default=default,offset=offset)
# Factory for a bunch of flyweight Struct objects
fmtfactory={}
def FormatFactory(fmt):
if fmt in fmtfactory:
return fmtfactory[fmt]
fmtobj=StructFormat(fmt)
fmtfactory[fmt]=fmtobj
return fmtobj
class RecordFormat:
default_endian="<"
def __init__(self,typedef):
self.typedef=typedef
self.minstructsize=0 # minimum structure size (variable entries not counted)
self.endian=self.default_endian
self.fmtmap={} # map of name to typecode object
self.default={}
self.names=[] # order of names in record
self.debug=0
self.fmt=''
self.setFormat(typedef)
def getDefaults(self):
values={}
for name in self.names:
fmt=self.fmtmap[name]
values[name]=self.default[name]
return values
def setFormat(self,typedef,default=None):
if self.debug: print("typedef=%s" % str(typedef))
if isinstance(typedef,list) or isinstance(typedef,tuple):
for item in typedef:
if len(item)==3:
typecode,name,default=item
else:
typecode,name=item
self.appendFormat(typecode,name,default)
elif typedef:
raise AttributeError("format must be a list")
if self.debug: print("current struct=%s size=%d\n names=%s" % (self.fmt,self.minstructsize,self.names))
def appendFormat(self,typecode,name,defaultvalue):
if isinstance(typecode,str):
if typecode[0] not in "<>@!=":
typecode=self.endian+typecode
self.fmt+=typecode
fmtobj=FormatFactory(typecode)
self.fmtmap[name]=fmtobj
self.default[name]=defaultvalue
elif isinstance(typecode,Field):
self.fmt+='{'+typecode.__class__.__name__+'}'
if defaultvalue is not None:
typecode.setDefault(defaultvalue)
self.fmtmap[name]=typecode
self.default[name]=self.fmtmap[name].getDefault()
else:
self.fmt+='{'+typecode.__class__.__name__+'}'
self.fmtmap[name]=typecode(defaultvalue)
self.default[name]=self.fmtmap[name].getDefault()
self.minstructsize+=self.fmtmap[name].getNumBytes()
self.names.append(name)
def calcNumBytes(self,obj):
size=0
for name in self.names:
fmt=self.fmtmap[name]
bytes=fmt.calcNumBytes(obj,name)
if self.debug: print("calcNumBytes: %s=%d" % (name,bytes))
size+=bytes
return size
def unpack(self,data,obj,initptr=0):
ptr=initptr
obj.values={}
if self.minstructsize+ptr>0:
if self.minstructsize+ptr>len(data):
# we have a problem. More stuff to unparse than
# we have data. Hmmm. Fill with binary zeros
# till I think of a better idea.
data+="\0"*(self.minstructsize+ptr-len(data))
for name in self.names:
fmt=self.fmtmap[name]
(value,size)=fmt.unpack(obj,name,data,ptr)
#if fmt.fmt=="<i": value=0
#if self.debug: print "name=%s fmt=%s value=%s" % (name,fmt.fmt,str(value))
obj.values[name]=value
ptr+=size
return ptr
def pack(self,values,obj,alreadypacked=0):
fh=BytesIO()
size=0
output={}
# First, create all the output bytes
for name in self.names:
fmt=self.fmtmap[name]
try:
output[name]=fmt.pack(obj,name,values[name])
except:
print("Exception while trying to pack %s for object:" % name)
print(obj)
raise
# check if the offset to this parameter needs to be
# adjusted. This is assuming that the offset occurs
# BEFORE the parameter, which isn't a bad assumption.
refname=fmt.hasOffsetReference()
#print output[name]
if refname and output[name]:
if self.debug: print("pack: %s has offset %s, was=%d now=%d" % (name,refname,values[refname],size+alreadypacked))
values[refname]=size+alreadypacked
output[refname]=self.fmtmap[refname].pack(obj,refname,values[refname])
# also need to check if a dependent length needs to be updated
refname=fmt.hasNumReference()
#if self.debug: print output[name]
if refname and output[name]:
newnum=fmt.calcNum(obj,name)
if self.debug: print("pack: %s has num %s, was=%d now=%d" % (name,refname,values[refname],newnum))
values[refname]=newnum
output[refname]=self.fmtmap[refname].pack(obj,refname,values[refname])
size+=len(output[name])
# Finally, write all the output bytes to the filehandle
for name in self.names:
fh.write(output[name])
return fh.getvalue()
def getString(self,obj):
txt=StringIO()
for name in self.names:
fmt=self.fmtmap[name]
val=fmt.getString(name,obj.values[name])
try:
txt.write("\t%-20s: %s\n" % (name,val))
except UnicodeEncodeError:
txt.write("\t%-20s: <<<BAD UNICODE STRING>>> %s\n" % (name,repr(val)))
return txt.getvalue()
class Record(object):
"""baseclass for binary records"""
format=None
typedef=()
def __init__(self):
# if we've never seen this class before, create a new format.
# Note that subclasses of classes that we have already seen
# pick up any undefined class attributes from their
# superclasses, so we have to check if this is a subclass with
# a different typedef
if self.__class__.format==None or self.__class__.typedef != self.format.typedef:
# if self.debug: print "creating format for %d" % id
self.__class__.format=RecordFormat(self.__class__.typedef)
# list of values parsed from the input stream
self.values=self.__class__.format.getDefaults()
def __getattr__(self,name):
"""Return EMR attribute if the name exists in the typedef list
of the object. This is only called when the standard
attribute lookup fails on this object, so we don't have to
handle the case where name is an actual attribute of self."""
f=Record.__getattribute__(self,'format')
try:
if name in f.names:
v=Record.__getattribute__(self,'values')
return v[name]
except IndexError:
raise IndexError("name=%s index=%d values=%s" % (name,index,str(v)))
raise AttributeError("%s not defined in EMR object" % name)
def __setattr__(self,name,value):
"""Set a value in the object, propagating through to
self.values[] if the name is in the typedef list."""
f=Record.__getattribute__(self,'format')
try:
if f and name in f.names:
v=Record.__getattribute__(self,'values')
v[name]=value
else:
# it's not an automatically serializable item, so store it.
self.__dict__[name]=value
except IndexError:
raise IndexError("name=%s index=%d values=%s" % (name,index,str(v)))
class EMFString(Field):
def __init__(self,default=None,size=2,num=1,offset=None):
# Note the two bytes per unicode char
Field.__init__(self,None,size=size,num=num,offset=offset)
self.setDefault(default)
def calcNumBytes(self,obj,name):
if self.hasNumReference():
# If this is a dynamic string, calculate the size required
txt=obj.values[name]
if self.size==2:
# it's unicode, so get the number of actual bytes required
# to store it
txt=txt.encode('utf-16le')
# EMF requires that strings be stored as multiples of 4 bytes
extra=_round4(len(txt))-len(txt)
return len(txt)+extra
else:
# this is a fixed length string, so we know the length already.
return Field.calcNumBytes(self,obj,name)
def calcNum(self,obj,name):
if self.hasNumReference():
return len(obj.values[name])
else:
return Field.calcNumBytes(self,obj,name)
def unpack(self,obj,name,data,ptr):
offset=self.getOffset(obj)
if offset==None:
pass
elif offset>0:
ptr=offset
else:
return ('',0)
size=self.getNumBytes(obj)
txt=data[ptr:ptr+size]
size=_round4(len(txt))
if self.size==2:
txt=txt.decode('utf-16le') # Now is a unicode string
if self.debug:
try:
print("str: '%s'" % str(txt))
except UnicodeEncodeError:
print("<<<BAD UNICODE STRING>>>: '%s'" % repr(txt))
return (txt,size)
def pack(self,obj,name,value):
txt=value
if isinstance(txt, cunicode):
txt = txt.encode('utf-16le')
if self.hasNumReference():
extra=_round4(len(txt))-len(txt) # must be multiple of 4
if extra>0:
txt+=b'\0'*extra
else:
maxlen=self.getNumBytes(obj)
if len(txt)>maxlen:
txt=txt[0:maxlen]
else:
txt+=b'\0'*(maxlen-len(txt))
return txt
def getDefault(self):
# FIXME: need to take account of number
return self.default
def setDefault(self,default):
if default is None:
if self.size==2:
default=u''
else:
default=''
self.default=default
class _EMR_UNKNOWN(Record): # extend from new-style class, or __getattr__ doesn't work
"""baseclass for EMR objects"""
emr_id=0
twobytepadding=b'\0'*2
def __init__(self):
Record.__init__(self)
self.iType=self.__class__.emr_id
self.nSize=0
self.verbose=False
self.datasize=0
self.data=None
self.unhandleddata=None
# error code. Currently just used as a boolean
self.error=0
def hasHandle(self):
"""Return true if this object has a handle that needs to be
saved in the object array for later recall by SelectObject."""
return False
def setBounds(self,bounds):
"""Set bounds of object. Depends on naming convention always
defining the bounding rectangle as
rclBounds_[left|top|right|bottom]."""
self.rclBounds=[[bounds[0][0],bounds[0][1]],[bounds[1][0],bounds[1][1]]]
def getBounds(self):
"""Return bounds of object, or None if not applicable."""
if 'rclBounds' in self.values:
return self.rclBounds
return None
def unserialize(self,fh,already_read,itype=-1,nsize=-1):
"""Read data from the file object and, using the format
structure defined by the subclass, parse the data and store it
in self.values[] list."""
prevlen=len(already_read)
if itype>0:
self.iType=itype
self.nSize=nsize
else:
(self.iType,self.nSize)=struct.unpack("<ii",already_read)
if self.nSize>prevlen:
self.data=already_read+fh.read(self.nSize-prevlen)
last=self.format.unpack(self.data,self,prevlen)
if self.nSize>last:
self.unserializeExtra(self.data[last:])
def unserializeExtra(self,data):
"""Hook for subclasses to handle extra data in the record that
isn't specified by the format statement."""
self.unhandleddata=data
pass
def serialize(self,fh):
try:
#print "packing!"
bytes=self.format.pack(self.values,self,8)
#fh.write(struct.pack(self.format.fmt,*self.values))
except struct.error:
print("!!!!!Struct error:", end=' ')
print(self)
raise
before=self.nSize
self.nSize=8+len(bytes)+self.sizeExtra()
if self.verbose and before!=self.nSize:
print("resize: before=%d after=%d" % (before,self.nSize), end=' ')
print(self)
if self.nSize%4 != 0:
print("size error--must be divisible by 4. before=%d after=%d calcNumBytes=%d extra=%d" % (before,self.nSize,len(bytes),self.sizeExtra()))
for name in self.format.names:
fmt=self.format.fmtmap[name]
size=fmt.calcNumBytes(self,name)
print(" name=%s size=%s" % (name,size))
print(self)
raise TypeError
fh.write(struct.pack("<ii",self.iType,self.nSize))
fh.write(bytes)
self.serializeExtra(fh)
def serializeExtra(self,fh):
"""This is for special cases, like writing text or lists. If
this is not overridden by a subclass method, it will write out
anything in the self.unhandleddata string."""
if self.unhandleddata:
fh.write(self.unhandleddata)
def resize(self):
before=self.nSize
self.nSize=8+self.format.calcNumBytes(self)+self.sizeExtra()
if self.verbose and before!=self.nSize:
print("resize: before=%d after=%d" % (before,self.nSize), end=' ')
print(self)
if self.nSize%4 != 0:
print("size error--must be divisible by 4. before=%d after=%d calcNumBytes=%d extra=%d" % (before,self.nSize,self.format.calcNumBytes(self),self.sizeExtra()))
for name in self.format.names:
fmt=self.format.fmtmap[name]
size=fmt.calcNumBytes(self,name)
print(" name=%s size=%s" % (name,size))
print(self)
raise TypeError
def sizeExtra(self):
"""Hook for subclasses before anything is serialized. This is
used to return the size of any extra components not in the
format string, and also provide the opportunity to recalculate
any changes in size that should be reflected in self.nSize
before the record is written out."""
if self.unhandleddata:
return len(self.unhandleddata)
return 0
def __str__(self):
ret=""
details=self.format.getString(self)
if details:
ret=os.linesep
return "**%s: iType=%s nSize=%s struct='%s' size=%d extra=%d\n%s%s" % (self.__class__.__name__.lstrip('_'),self.iType,self.nSize,self.format.fmt,self.format.minstructsize,self.sizeExtra(),details,ret)
return
# Collection of classes
class _EMR:
class _HEADER(_EMR_UNKNOWN):
"""Header has different fields depending on the version of
windows. Also note that if offDescription==0, there is no
description string."""
emr_id=1
typedef=[
(Points(num=2),'rclBounds'),
(Points(num=2),'rclFrame'),
('i','dSignature',1179469088),
('i','nVersion',0x10000),
('i','nBytes',0),
('i','nRecords',0),
('h','nHandles',0),
('h','sReserved',0),
('i','nDescription',0),
('i','offDescription',0),
('i','nPalEntries',0),
(List(num=2),'szlDevice',[1024,768]),
(List(num=2),'szlMillimeters',[320,240]),
('i','cbPixelFormat',0),
('i','offPixelFormat',0),
('i','bOpenGL',0),
(List(num=2),'szlMicrometers'),
(EMFString(num='nDescription',offset='offDescription'),'description'),
]
def __init__(self,description=''):
_EMR_UNKNOWN.__init__(self)
print(self)
print(self.__class__.format.default)
# NOTE: rclBounds and rclFrame will be determined at
# serialize time
self.description = description
if len(description)>0:
self.description=u'pyemf '+__version__+u'\0'+description+u'\0\0'
self.nDescription=len(self.description)
def setBounds(self,dc,scaleheader):
self.rclBounds=[[dc.bounds_left,dc.bounds_top],
[dc.bounds_right,dc.bounds_bottom]]
self.rclFrame=[[dc.frame_left,dc.frame_top],
[dc.frame_right,dc.frame_bottom]]
print(self)
if scaleheader:
self.szlDevice[0]=dc.pixelwidth
self.szlDevice[1]=dc.pixelheight
self.szlMicrometers[0]=dc.width*10
self.szlMicrometers[1]=dc.height*10
else:
self.szlDevice[0]=dc.ref_pixelwidth
self.szlDevice[1]=dc.ref_pixelheight
self.szlMicrometers[0]=dc.ref_width*10
self.szlMicrometers[1]=dc.ref_height*10
self.szlMillimeters[0]=self.szlMicrometers[0]//1000
self.szlMillimeters[1]=self.szlMicrometers[1]//1000
class _POLYBEZIER(_EMR_UNKNOWN):
emr_id=2
typedef=[
(Points(num=2),'rclBounds'),
('i','cptl'),
(Points(num='cptl',fmt='i'),'aptl'),
]
def __init__(self,points=[],bounds=((0,0),(0,0))):
_EMR_UNKNOWN.__init__(self)
self.setBounds(bounds)
self.cptl=len(points)
self.aptl=points
class _POLYGON(_POLYBEZIER):
emr_id=3
pass
class _POLYLINE(_POLYBEZIER):
emr_id=4
pass
class _POLYBEZIERTO(_POLYBEZIER):
emr_id=5
pass
class _POLYLINETO(_POLYBEZIERTO):
emr_id=6
pass
class _POLYPOLYLINE(_EMR_UNKNOWN):
emr_id=7
typedef=[
(Points(num=2),'rclBounds'),
('i','nPolys'),
('i','cptl'),
(List(num='nPolys',fmt='i'),'aPolyCounts'),
(Points(num='cptl',fmt='i'),'aptl'),
]
def __init__(self,points=[],polycounts=[],bounds=((0,0),(0,0))):
_EMR_UNKNOWN.__init__(self)
self.setBounds(bounds)
self.cptl=len(points)
self.aptl=points
self.nPolys=len(polycounts)
self.aPolyCounts=polycounts
class _POLYPOLYGON(_POLYPOLYLINE):
emr_id=8
pass
class _SETWINDOWEXTEX(_EMR_UNKNOWN):
emr_id=9
typedef=[
('i','szlExtent_cx'),
('i','szlExtent_cy'),
]
def __init__(self,cx=0,cy=0):
_EMR_UNKNOWN.__init__(self)
self.szlExtent_cx=cx
self.szlExtent_cy=cy
class _SETWINDOWORGEX(_EMR_UNKNOWN):
emr_id=10
typedef=[
('i','ptlOrigin_x'),
('i','ptlOrigin_y'),
]
def __init__(self,x=0,y=0):
_EMR_UNKNOWN.__init__(self)
self.ptlOrigin_x=x
self.ptlOrigin_y=y
class _SETVIEWPORTEXTEX(_SETWINDOWEXTEX):
emr_id=11
pass
class _SETVIEWPORTORGEX(_SETWINDOWORGEX):
emr_id=12
pass
class _SETBRUSHORGEX(_SETWINDOWORGEX):
emr_id=13
pass
class _EOF(_EMR_UNKNOWN):
"""End of file marker. Usually 20 bytes long, but I have a
Windows generated .emf file that only has a 12 byte long EOF
record. I don't know if that's a broken example or what, but
both Windows progs and OpenOffice seem to handle it."""
emr_id=14
typedef=[
('i','nPalEntries',0),
('i','offPalEntries',0),
('i','nSizeLast',0)
]
def __init__(self):
_EMR_UNKNOWN.__init__(self)
class _SETPIXELV(_EMR_UNKNOWN):
emr_id=15
typedef=[
('i','ptlPixel_x'),
('i','ptlPixel_y'),
('i','crColor')
]
def __init__(self,x=0,y=0,color=0):
_EMR_UNKNOWN.__init__(self)
self.ptlPixel_x=x
self.ptlPixel_y=y
self.crColor=color
class _SETMAPPERFLAGS(_EMR_UNKNOWN):
emr_id=16
emr_format=[('i','dwFlags',0)]
def __init__(self):
_EMR_UNKNOWN.__init__(self)
class _SETMAPMODE(_EMR_UNKNOWN):
emr_id=17
typedef=[('i','iMode',MM_ANISOTROPIC)]
def __init__(self,mode=MM_ANISOTROPIC,first=0,last=MM_MAX):
_EMR_UNKNOWN.__init__(self)
if mode<first or mode>last:
self.error=1
else:
self.iMode=mode
class _SETBKMODE(_SETMAPMODE):
emr_id=18
def __init__(self,mode=OPAQUE):
_EMR._SETMAPMODE.__init__(self,mode,last=BKMODE_LAST)
class _SETPOLYFILLMODE(_SETMAPMODE):
emr_id=19
def __init__(self,mode=ALTERNATE):
_EMR._SETMAPMODE.__init__(self,mode,last=POLYFILL_LAST)
class _SETROP2(_SETMAPMODE):
emr_id=20
pass
class _SETSTRETCHBLTMODE(_SETMAPMODE):
emr_id=21
pass
class _SETTEXTALIGN(_SETMAPMODE):
emr_id=22
def __init__(self,mode=TA_BASELINE):
_EMR._SETMAPMODE.__init__(self,mode,last=TA_MASK)
#define EMR_SETCOLORADJUSTMENT 23
class _SETTEXTCOLOR(_EMR_UNKNOWN):
emr_id=24
typedef=[('i','crColor',0)]
def __init__(self,color=0):
_EMR_UNKNOWN.__init__(self)
self.crColor=color
class _SETBKCOLOR(_SETTEXTCOLOR):
emr_id=25
pass
#define EMR_OFFSETCLIPRGN 26
class _MOVETOEX(_EMR_UNKNOWN):
emr_id=27
typedef=[
('i','ptl_x'),
('i','ptl_y'),
]
def __init__(self,x=0,y=0):
_EMR_UNKNOWN.__init__(self)
self.ptl_x=x
self.ptl_y=y
def getBounds(self):
return ((self.ptl_x,self.ptl_y),(self.ptl_x,self.ptl_y))
#define EMR_SETMETARGN 28
#define EMR_EXCLUDECLIPRECT 29
#define EMR_INTERSECTCLIPRECT 30
class _SCALEVIEWPORTEXTEX(_EMR_UNKNOWN):
emr_id=31
typedef=[
('i','xNum',1),
('i','xDenom',1),
('i','yNum',1),
('i','yDenom',1),
]
def __init__(self,xn=1,xd=1,yn=1,yd=1):
_EMR_UNKNOWN.__init__(self)
self.xNum=xn
self.xDenom=xd
self.yNum=yn
self.yDenom=yd
class _SCALEWINDOWEXTEX(_SCALEVIEWPORTEXTEX):
emr_id=32
pass
class _SAVEDC(_EMR_UNKNOWN):
emr_id=33
pass
class _RESTOREDC(_EMR_UNKNOWN):
emr_id=34
typedef=[('i','iRelative')]
def __init__(self,rel=-1):
_EMR_UNKNOWN.__init__(self)
self.iRelative=rel
class _SETWORLDTRANSFORM(_EMR_UNKNOWN):
emr_id=35
typedef=[
('f','eM11'),
('f','eM12'),
('f','eM21'),
('f','eM22'),
('f','eDx'),
('f','eDy'),
]
def __init__(self,em11=1.0,em12=0.0,em21=0.0,em22=1.0,edx=0.0,edy=0.0):
_EMR_UNKNOWN.__init__(self)
self.eM11=em11
self.eM12=em12
self.eM21=em21
self.eM22=em22
self.eDx=edx
self.eDy=edy
class _MODIFYWORLDTRANSFORM(_EMR_UNKNOWN):
emr_id=36
typedef=[
('f','eM11'),
('f','eM12'),
('f','eM21'),
('f','eM22'),
('f','eDx'),
('f','eDy'),
('i','iMode'),
]
def __init__(self,em11=1.0,em12=0.0,em21=0.0,em22=1.0,edx=0.0,edy=0.0,mode=MWT_IDENTITY):
_EMR_UNKNOWN.__init__(self)
self.eM11=em11
self.eM12=em12
self.eM21=em21
self.eM22=em22
self.eDx=edx
self.eDy=edy
self.iMode=mode
class _SELECTOBJECT(_EMR_UNKNOWN):
"""Select a brush, pen, font (or bitmap or region but there is
no current user interface for those) object to be current and
replace the previous item of that class. Note that stock
objects have their high order bit set, so the handle must be
an unsigned int."""
emr_id=37
typedef=[('I','handle')]
def __init__(self,dc=None,handle=0):
_EMR_UNKNOWN.__init__(self)
self.handle=handle
# Note: a line will still be drawn when the linewidth==0. To force an
# invisible line, use style=PS_NULL
class _CREATEPEN(_EMR_UNKNOWN):
emr_id=38
typedef=[
('i','handle',0),
('i','lopn_style'),
('i','lopn_width'),
('i','lopn_unused',0),
('i','lopn_color'),
]
def __init__(self,style=PS_SOLID,width=1,color=0):
_EMR_UNKNOWN.__init__(self)
self.lopn_style=style
self.lopn_width=width
self.lopn_color=color
def hasHandle(self):
return True
class _CREATEBRUSHINDIRECT(_EMR_UNKNOWN):
emr_id=39
typedef=[
('i','handle',0),
('I','lbStyle'),
('i','lbColor'),
('I','lbHatch'),
]
def __init__(self,style=BS_SOLID,hatch=HS_HORIZONTAL,color=0):
_EMR_UNKNOWN.__init__(self)
self.lbStyle = style
self.lbColor = color
self.lbHatch = hatch
def hasHandle(self):
return True
class _DELETEOBJECT(_SELECTOBJECT):
emr_id=40
pass
class _ANGLEARC(_EMR_UNKNOWN):
emr_id=41
typedef=[
('i','ptlCenter_x'),
('i','ptlCenter_y'),
('i','nRadius'),
('f','eStartAngle'),
('f','eSweepAngle'),
]
def __init__(self):
_EMR_UNKNOWN.__init__(self)
class _ELLIPSE(_EMR_UNKNOWN):
emr_id=42
typedef=[
(Points(num=2),'rclBox'),
]
def __init__(self,box=((0,0),(0,0))):
_EMR_UNKNOWN.__init__(self)
self.rclBox=[[box[0][0],box[0][1]],[box[1][0],box[1][1]]]
class _RECTANGLE(_ELLIPSE):
emr_id=43
pass
class _ROUNDRECT(_EMR_UNKNOWN):
emr_id=44
typedef=[
(Points(num=2),'rclBox'),
('i','szlCorner_cx'),
('i','szlCorner_cy')
]
def __init__(self,box=((0,0),(0,0)),cx=0,cy=0):
_EMR_UNKNOWN.__init__(self)
self.rclBox=[[box[0][0],box[0][1]],[box[1][0],box[1][1]]]
self.szlCorner_cx=cx
self.szlCorner_cy=cy
class _ARC(_EMR_UNKNOWN):
emr_id=45
typedef=[
(Points(num=2),'rclBox'),
('i','ptlStart_x'),
('i','ptlStart_y'),
('i','ptlEnd_x'),
('i','ptlEnd_y')]
def __init__(self,box=((0,0),(0,0)),
xstart=0,ystart=0,xend=0,yend=0):
_EMR_UNKNOWN.__init__(self)
self.rclBox=[[box[0][0],box[0][1]],[box[1][0],box[1][1]]]
self.ptlStart_x=xstart
self.ptlStart_y=ystart
self.ptlEnd_x=xend
self.ptlEnd_y=yend
class _CHORD(_ARC):
emr_id=46
pass
class _PIE(_ARC):
emr_id=47
pass
class _SELECTPALETTE(_EMR_UNKNOWN):
emr_id=48
typedef=[('i','handle')]
def __init__(self):
_EMR_UNKNOWN.__init__(self)
# Stub class for palette
class _CREATEPALETTE(_EMR_UNKNOWN):
emr_id=49
typedef=[('i','handle',0)]
def __init__(self):
_EMR_UNKNOWN.__init__(self)
def hasHandle(self):
return True
#define EMR_SETPALETTEENTRIES 50
#define EMR_RESIZEPALETTE 51
#define EMR_REALIZEPALETTE 52
#define EMR_EXTFLOODFILL 53
class _LINETO(_MOVETOEX):
emr_id=54
pass
class _ARCTO(_ARC):
emr_id=55
def getBounds(self):
# not exactly the bounds, because the arc may actually use
# less of the ellipse than is specified by the bounds.
# But at least the actual bounds aren't outside these
# bounds.
return self.rclBox
#define EMR_POLYDRAW 56
class _SETARCDIRECTION(_EMR_UNKNOWN):
emr_id=57
typedef=[('i','iArcDirection')]
def __init__(self):
_EMR_UNKNOWN.__init__(self)
#define EMR_SETMITERLIMIT 58
class _BEGINPATH(_EMR_UNKNOWN):
emr_id=59
pass
class _ENDPATH(_EMR_UNKNOWN):
emr_id=60
pass
class _CLOSEFIGURE(_EMR_UNKNOWN):
emr_id=61
pass
class _FILLPATH(_EMR_UNKNOWN):
emr_id=62
typedef=[(Points(num=2),'rclBounds')]
def __init__(self,bounds=((0,0),(0,0))):
_EMR_UNKNOWN.__init__(self)
self.setBounds(bounds)
class _STROKEANDFILLPATH(_FILLPATH):
emr_id=63
pass
class _STROKEPATH(_FILLPATH):
emr_id=64
pass
class _FLATTENPATH(_EMR_UNKNOWN):
emr_id=65
pass
class _WIDENPATH(_EMR_UNKNOWN):
emr_id=66
pass
class _SELECTCLIPPATH(_SETMAPMODE):
"""Select the current path and make it the clipping region.
Must be a closed path.
@gdi: SelectClipPath
"""
emr_id=67
def __init__(self,mode=RGN_COPY):
_EMR._SETMAPMODE.__init__(self,mode,first=RGN_MIN,last=RGN_MAX)
class _ABORTPATH(_EMR_UNKNOWN):
"""Discards any current path, whether open or closed.
@gdi: AbortPath"""
emr_id=68
pass
#define EMR_GDICOMMENT 70
#define EMR_FILLRGN 71
#define EMR_FRAMERGN 72
#define EMR_INVERTRGN 73
#define EMR_PAINTRGN 74
#define EMR_EXTSELECTCLIPRGN 75
#define EMR_BITBLT 76
#define EMR_STRETCHBLT 77
#define EMR_MASKBLT 78
#define EMR_PLGBLT 79
#define EMR_SETDIBITSTODEVICE 80
class _STRETCHDIBITS(_EMR_UNKNOWN):
"""Copies the image from the source image to the destination
image. DIB is currently an opaque format to me, but
apparently it has been extented recently to allow JPG and PNG
images...
@gdi: StretchDIBits
"""
emr_id=81
typedef=[
(Points(num=2),'rclBounds'),
('i','xDest'),
('i','yDest'),
('i','xSrc'),
('i','ySrc'),
('i','cxSrc'),
('i','cySrc'),
('i','offBmiSrc'),
('i','cbBmiSrc'),
('i','offBitsSrc'),
('i','cbBitsSrc'),
('i','iUsageSrc'),
('i','dwRop'),
('i','cxDest'),
('i','cyDest')]
def __init__(self):
_EMR_UNKNOWN.__init__(self)
class _EXTCREATEFONTINDIRECTW(_EMR_UNKNOWN):
# Note: all the strings here (font names, etc.) are unicode
# strings.
emr_id=82
typedef=[
('i','handle'),
('i','lfHeight'),
('i','lfWidth'),
('i','lfEscapement'),
('i','lfOrientation'),
('i','lfWeight'),
('B','lfItalic'),
('B','lfUnderline'),
('B','lfStrikeOut'),
('B','lfCharSet'),
('B','lfOutPrecision'),
('B','lfClipPrecision'),
('B','lfQuality'),
('B','lfPitchAndFamily'),
(EMFString(num=32,size=2),'lfFaceName'),
# ('64s','lfFaceName',), # really a 32 char unicode string
(EMFString(num=64,size=2),'elfFullName'),
# ('128s','elfFullName','\0'*128), # really 64 char unicode str
(EMFString(num=32,size=2),'elfStyle'),
# ('64s','elfStyle','\0'*64), # really 32 char unicode str
('i','elfVersion',0),
('i','elfStyleSize',0),
('i','elfMatch',0),
('i','elfReserved',0),
('i','elfVendorId',0),
('i','elfCulture',0),
('B','elfPanose_bFamilyType',1),
('B','elfPanose_bSerifStyle',1),
('B','elfPanose_bWeight',1),
('B','elfPanose_bProportion',1),
('B','elfPanose_bContrast',1),
('B','elfPanose_bStrokeVariation',1),
('B','elfPanose_bArmStyle',1),
('B','elfPanose_bLetterform',1),
('B','elfPanose_bMidline',1),
('B','elfPanose_bXHeight',1)]
def __init__(self,height=0,width=0,escapement=0,orientation=0,
weight=FW_NORMAL,italic=0,underline=0,strike_out=0,
charset=ANSI_CHARSET,out_precision=OUT_DEFAULT_PRECIS,
clip_precision=CLIP_DEFAULT_PRECIS,
quality=DEFAULT_QUALITY,
pitch_family=DEFAULT_PITCH|FF_DONTCARE,name='Times New Roman'):
_EMR_UNKNOWN.__init__(self)
self.lfHeight=height
self.lfWidth=width
self.lfEscapement=escapement
self.lfOrientation=orientation
self.lfWeight=weight
self.lfItalic=italic
self.lfUnderline=underline
self.lfStrikeOut=strike_out
self.lfCharSet=charset
self.lfOutPrecision=out_precision
self.lfClipPrecision=clip_precision
self.lfQuality=quality
self.lfPitchAndFamily=pitch_family
# pad the structure out to 4 byte boundary
self.unhandleddata=_EMR_UNKNOWN.twobytepadding
# truncate or pad to exactly 32 characters
if len(name)>32:
name=name[0:32]
else:
name+='\0'*(32-len(name))
self.lfFaceName=name.encode('utf-16le')
# print "lfFaceName=%s" % self.lfFaceName
def hasHandle(self):
return True
class _EXTTEXTOUTA(_EMR_UNKNOWN):
emr_id=83
typedef=[
(Points(num=2),'rclBounds',[[0,0],[-1,-1]]),
('i','iGraphicsMode',GM_COMPATIBLE),
('f','exScale',1.0),
('f','eyScale',1.0),
('i','ptlReference_x'),
('i','ptlReference_y'),
('i','nChars'),
('i','offString',0),
('i','fOptions',0),
(Points(num=2),'rcl',[[0,0],[-1,-1]]),
('i','offDx',0),
(List(num='nChars',fmt='i',offset='offDx'),'dx'),
(EMFString(num='nChars',size=1,offset='offString'),'string'),
]
def __init__(self,x=0,y=0,txt=""):
_EMR_UNKNOWN.__init__(self)
self.ptlReference_x=x
self.ptlReference_y=y
if isinstance(txt,cunicode):
self.string=txt.encode('utf-16le')
else:
self.string=txt
self.charsize=1
self.dx=[]
class _EXTTEXTOUTW(_EXTTEXTOUTA):
emr_id=84
typedef=[
(Points(num=2),'rclBounds',[[0,0],[-1,-1]]),
('i','iGraphicsMode',GM_COMPATIBLE),
('f','exScale',1.0),
('f','eyScale',1.0),
('i','ptlReference_x'),
('i','ptlReference_y'),
('i','nChars'),
('i','offString',0),
('i','fOptions',0),
(Points(num=2),'rcl',[[0,0],[-1,-1]]),
('i','offDx',0),
(List(num='nChars',fmt='i',offset='offDx'),'dx'),
(EMFString(num='nChars',size=2,offset='offString'),'string'),
]
def __init__(self,x=0,y=0,txt=u''):
_EMR._EXTTEXTOUTA.__init__(self,x,y,txt)
self.charsize=2
class _POLYBEZIER16(_POLYBEZIER):
emr_id=85
typedef=[
(Points(num=2),'rclBounds'),
('i','cptl'),
(Points(num='cptl',fmt='h'),'aptl'),
]
class _POLYGON16(_POLYBEZIER16):
emr_id=86
pass
class _POLYLINE16(_POLYBEZIER16):
emr_id=87
pass
class _POLYBEZIERTO16(_POLYBEZIERTO):
emr_id=88
typedef=[
(Points(num=2),'rclBounds'),
('i','cptl'),
(Points(num='cptl',fmt='h'),'aptl'),
]
pass
class _POLYLINETO16(_POLYBEZIERTO16):
emr_id=89
pass
class _POLYPOLYLINE16(_POLYPOLYLINE):
emr_id=90
typedef=[
(Points(num=2),'rclBounds'),
('i','nPolys'),
('i','cptl'),
(List(num='nPolys',fmt='i'),'aPolyCounts'),
(Points(num='cptl',fmt='h'),'aptl'),
]
pass
class _POLYPOLYGON16(_POLYPOLYLINE16):
emr_id=91
pass
#define EMR_POLYDRAW16 92
# Stub class for storage of brush with monochrome bitmap or DIB
class _CREATEMONOBRUSH(_CREATEPALETTE):
emr_id=93
pass
# Stub class for device independent bitmap brush
class _CREATEDIBPATTERNBRUSHPT(_CREATEPALETTE):
emr_id=94
pass
# Stub class for extended pen
class _EXTCREATEPEN(_CREATEPALETTE):
emr_id=95
pass
#define EMR_POLYTEXTOUTA 96
#define EMR_POLYTEXTOUTW 97
class _SETICMMODE(_SETMAPMODE):
"""Set or query the current color management mode.
@gdi: SetICMMode
"""
emr_id=98
def __init__(self,mode=ICM_OFF):
_EMR._SETMAPMODE.__init__(self,mode,first=ICM_MIN,last=ICM_MAX)
#define EMR_CREATECOLORSPACE 99
#define EMR_SETCOLORSPACE 100
#define EMR_DELETECOLORSPACE 101
#define EMR_GLSRECORD 102
#define EMR_GLSBOUNDEDRECORD 103
#define EMR_PIXELFORMAT 104
#define EMR_DRAWESCAPE 105
#define EMR_EXTESCAPE 106
#define EMR_STARTDOC 107
#define EMR_SMALLTEXTOUT 108
#define EMR_FORCEUFIMAPPING 109
#define EMR_NAMEDESCAPE 110
#define EMR_COLORCORRECTPALETTE 111
#define EMR_SETICMPROFILEA 112
#define EMR_SETICMPROFILEW 113
#define EMR_ALPHABLEND 114
#define EMR_SETLAYOUT 115
#define EMR_TRANSPARENTBLT 116
#define EMR_RESERVED_117 117
#define EMR_GRADIENTFILL 118
#define EMR_SETLINKEDUFI 119
#define EMR_SETTEXTJUSTIFICATION 120
#define EMR_COLORMATCHTOTARGETW 121
#define EMR_CREATECOLORSPACEW 122
# Set up the mapping of ids to classes for all of the record types in
# the EMR class.
_emrmap={}
for name in dir(_EMR):
#print name
cls=getattr(_EMR,name,None)
if cls and inspect.isclass(cls) and issubclass(cls,_EMR_UNKNOWN):
#print "subclass! id=%d %s" % (cls.emr_id,str(cls))
_emrmap[cls.emr_id]=cls
class EMF:
"""
Reference page of the public API for enhanced metafile creation. See
L{pyemf} for an overview / mini tutorial.
@group Creating Metafiles: __init__, load, save
@group Drawing Parameters: GetStockObject, SelectObject, DeleteObject, CreatePen, CreateSolidBrush, CreateHatchBrush, SetBkColor, SetBkMode, SetPolyFillMode
@group Drawing Primitives: SetPixel, Polyline, PolyPolyline, Polygon, PolyPolygon, Rectangle, RoundRect, Ellipse, Arc, Chord, Pie, PolyBezier
@group Path Primatives: BeginPath, EndPath, MoveTo, LineTo, PolylineTo, ArcTo,
PolyBezierTo, CloseFigure, FillPath, StrokePath, StrokeAndFillPath
@group Clipping: SelectClipPath
@group Text: CreateFont, SetTextAlign, SetTextColor, TextOut
@group Coordinate System Transformation: SaveDC, RestoreDC, SetWorldTransform, ModifyWorldTransform
@group **Experimental** -- Viewport Manipulation: SetMapMode, SetViewportOrgEx, GetViewportOrgEx, SetWindowOrgEx, GetWindowOrgEx, SetViewportExtEx, ScaleViewportExtEx, GetViewportExtEx, SetWindowExtEx, ScaleWindowExtEx, GetWindowExtEx
"""
def __init__(self,width=6.0,height=4.0,density=300,units="in",
description="pyemf.sf.net",verbose=False):
"""
Create an EMF structure in memory. The size of the resulting image is
specified in either inches or millimeters depending on the value of
L{units}. Width and height are floating point values, but density
must be an integer because this becomes the basis for the coordinate
system in the image. Density is the number of individually
addressible pixels per unit measurement (dots per inch or dots per
millimeter, depending on the units system) in the image. A
consequence of this is that each pixel is specified by a pair of
integer coordinates.
@param width: width of EMF image in inches or millimeters
@param height: height of EMF image in inches or millimeters
@param density: dots (pixels) per unit measurement
@param units: string indicating the unit measurement, one of:
- 'in'
- 'mm'
@type width: float
@type height: float
@type density: int
@type units: string
@param description: optional string to specify a description of the image
@type description: string
"""
self.filename=None
self.dc=_DC(width,height,density,units)
self.records=[]
# path recordkeeping
self.pathstart=0
self.verbose=verbose
# if True, scale the image using only the header, and not
# using MapMode or SetWindow/SetViewport.
self.scaleheader=True
emr=_EMR._HEADER(description)
self._append(emr)
if not self.scaleheader:
self.SetMapMode(MM_ANISOTROPIC)
self.SetWindowExtEx(self.dc.pixelwidth,self.dc.pixelheight)
self.SetViewportExtEx(
int(self.dc.width/100.0*self.dc.ref_pixelwidth/self.dc.ref_width),
int(self.dc.height/100.0*self.dc.ref_pixelheight/self.dc.ref_height))
def loadmem(self,membuf=None):
"""
Read an existing buffer from a string of bytes. If any records exist
in the current object, they will be overwritten by the records from
this buffer.
@param membuf: buffer to load
@type membuf: string
@returns: True for success, False for failure.
@rtype: Boolean
"""
fh = BytesIO(membuf)
self._load(fh)
def load(self,filename=None):
"""
Read an existing EMF file. If any records exist in the current
object, they will be overwritten by the records from this file.
@param filename: filename to load
@type filename: string
@returns: True for success, False for failure.
@rtype: Boolean
"""
if filename:
self.filename=filename
if self.filename:
fh=open(self.filename,'rb')
self._load(fh)
def _load(self,fh):
self.records=[]
self._unserialize(fh)
self.scaleheader=False
# get DC from header record
self.dc.getBounds(self.records[0])
def _unserialize(self,fh):
try:
count=1
while count>0:
data=fh.read(8)
count=len(data)
if count>0:
(iType,nSize)=struct.unpack("<ii",data)
if self.verbose: print("EMF: iType=%d nSize=%d" % (iType,nSize))
if iType in _emrmap:
e=_emrmap[iType]()
else:
e=_EMR_UNKNOWN()
e.unserialize(fh,data,iType,nSize)
self.records.append(e)
if e.hasHandle():
self.dc.addObject(e,e.handle)
elif isinstance(e,_EMR._DELETEOBJECT):
self.dc.removeObject(e.handle)
if self.verbose:
print("Unserializing: ", end=' ')
print(e)
except EOFError:
pass
def _append(self,e):
"""Append an EMR to the record list, unless the record has
been flagged as having an error."""
if not e.error:
if self.verbose:
print("Appending: ", end=' ')
print(e)
self.records.append(e)
return 1
return 0
def _end(self):
"""
Append an EOF record and compute header information. The header needs
to know the number of records, number of handles, bounds, and size of
the entire metafile before it can be written out, so we have to march
through all the records and gather info.
"""
end=self.records[-1]
if not isinstance(end,_EMR._EOF):
if self.verbose: print("adding EOF record")
e=_EMR._EOF()
self._append(e)
header=self.records[0]
header.setBounds(self.dc,self.scaleheader)
header.nRecords=len(self.records)
header.nHandles=len(self.dc.objects)
size=0
for e in self.records:
e.resize()
size+=e.nSize
if self.verbose: print("size=%d total=%d" % (e.nSize,size))
if self.verbose: print("total: %s bytes" % size)
header.nBytes=size
def save(self,filename=None):
"""
Write the EMF to disk.
@param filename: filename to write
@type filename: string
@returns: True for success, False for failure.
@rtype: Boolean
"""
self._end()
if filename:
self.filename=filename
if self.filename:
try:
fh=open(self.filename,"wb")
self._serialize(fh)
fh.close()
return True
except:
raise
return False
return False
def _serialize(self,fh):
for e in self.records:
if self.verbose: print(e)
e.serialize(fh)
def _create(self,width,height,dots_per_unit,units):
pass
def _getBounds(self,points):
"""Get the bounding rectangle for this list of 2-tuples."""
left=points[0][0]
right=left
top=points[0][1]
bottom=top
for x,y in points[1:]:
if x<left:
left=x
elif x>right:
right=x
if y<top:
top=y
elif y>bottom:
bottom=y
return ((left,top),(right,bottom))
def _mergeBounds(self,bounds,itembounds):
if itembounds:
if itembounds[0][0]<bounds[0][0]: bounds[0][0]=itembounds[0][0]
if itembounds[0][1]<bounds[0][1]: bounds[0][1]=itembounds[0][1]
if itembounds[1][0]>bounds[1][0]: bounds[1][0]=itembounds[1][0]
if itembounds[1][1]>bounds[1][1]: bounds[1][1]=itembounds[1][1]
def _getPathBounds(self):
"""Get the bounding rectangle for the list of EMR records
starting from the last saved path start to the current record."""
# If there are no bounds supplied, default to the EMF standard
# of ((0,0),(-1,-1)) which means that the bounds aren't
# precomputed.
bounds=[[0,0],[-1,-1]]
# find the first bounds
for i in range(self.pathstart,len(self.records)):
#print "FIXME: checking initial bounds on record %d" % i
e=self.records[i]
# print e
# print "bounds=%s" % str(e.getBounds())
objbounds=e.getBounds()
if objbounds:
#print "bounds=%s" % str(objbounds)
# have to copy the object manually because we don't
# want to overwrite the object's bounds
bounds=[[objbounds[0][0],objbounds[0][1]],
[objbounds[1][0],objbounds[1][1]]]
break
# if there are more records with bounds, merge them
for j in range(i,len(self.records)):
#print "FIXME: checking bounds for more records: %d" % j
e=self.records[j]
# print e
# print "bounds=%s" % str(e.getBounds())
self._mergeBounds(bounds,e.getBounds())
return bounds
def _useShort(self,bounds):
"""Determine if we can use the shorter 16-bit EMR structures.
If all the numbers can fit within 16 bit integers, return
true. The bounds 4-tuple is (left,top,right,bottom)."""
SHRT_MIN=-32768
SHRT_MAX=32767
if bounds[0][0]>=SHRT_MIN and bounds[0][1]>=SHRT_MIN and bounds[1][0]<=SHRT_MAX and bounds[1][1]<=SHRT_MAX:
return True
return False
def _appendOptimize16(self,points,cls16,cls):
bounds=self._getBounds(points)
if self._useShort(bounds):
e=cls16(points,bounds)
else:
e=cls(points,bounds)
if not self._append(e):
return 0
return 1
def _appendOptimizePoly16(self,polylist,cls16,cls):
"""polylist is a list of lists of points, where each inner
list represents a single polygon or line. The number of
polygons is the size of the outer list."""
points=[]
polycounts=[]
for polygon in polylist:
count=0
for point in polygon:
points.append(point)
count+=1
polycounts.append(count)
bounds=self._getBounds(points)
if self._useShort(bounds):
e=cls16(points,polycounts,bounds)
else:
e=cls(points,polycounts,bounds)
if not self._append(e):
return 0
return 1
def _appendHandle(self,e):
handle=self.dc.addObject(e)
if not self._append(e):
self.dc.popObject()
return 0
e.handle=handle
return handle
def GetStockObject(self,obj):
"""
Retrieve the handle for a predefined graphics object. Stock objects
include (at least) the following:
- WHITE_BRUSH
- LTGRAY_BRUSH
- GRAY_BRUSH
- DKGRAY_BRUSH
- BLACK_BRUSH
- NULL_BRUSH
- HOLLOW_BRUSH
- WHITE_PEN
- BLACK_PEN
- NULL_PEN
- OEM_FIXED_FONT
- ANSI_FIXED_FONT
- ANSI_VAR_FONT
- SYSTEM_FONT
- DEVICE_DEFAULT_FONT
- DEFAULT_PALETTE
- SYSTEM_FIXED_FONT
- DEFAULT_GUI_FONT
@param obj: number of stock object.
@return: handle of stock graphics object.
@rtype: int
@type obj: int
"""
if obj>=0 and obj<=STOCK_LAST:
return obj|0x80000000
raise IndexError("Undefined stock object.")
def SelectObject(self,handle):
"""
Make the given graphics object current.
@param handle: handle of graphics object to make current.
@return:
the handle of the current graphics object which obj replaces.
@rtype: int
@type handle: int
"""
return self._append(_EMR._SELECTOBJECT(self.dc,handle))
def DeleteObject(self,handle):
"""
Delete the given graphics object. Note that, now, only those contexts
into which the object has been selected get a delete object
records.
@param handle: handle of graphics object to delete.
@return: true if the object was successfully deleted.
@rtype: int
@type handle: int
"""
e=_EMR._DELETEOBJECT(self.dc,handle)
self.dc.removeObject(handle)
return self._append(e)
def CreatePen(self,style,width,color):
"""
Create a pen, used to draw lines and path outlines.
@param style: the style of the new pen, one of:
- PS_SOLID
- PS_DASH
- PS_DOT
- PS_DASHDOT
- PS_DASHDOTDOT
- PS_NULL
- PS_INSIDEFRAME
- PS_USERSTYLE
- PS_ALTERNATE
@param width: the width of the new pen.
@param color: (r,g,b) tuple or the packed integer L{color<RGB>} of the new pen.
@return: handle to the new pen graphics object.
@rtype: int
@type style: int
@type width: int
@type color: int
"""
return self._appendHandle(_EMR._CREATEPEN(style,width,_normalizeColor(color)))
def CreateSolidBrush(self,color):
"""
Create a solid brush used to fill polygons.
@param color: the L{color<RGB>} of the solid brush.
@return: handle to brush graphics object.
@rtype: int
@type color: int
"""
return self._appendHandle(_EMR._CREATEBRUSHINDIRECT(color=_normalizeColor(color)))
def CreateHatchBrush(self,hatch,color):
"""
Create a hatched brush used to fill polygons.
B{Note:} Currently appears unsupported in OpenOffice.
@param hatch: integer representing type of fill:
- HS_HORIZONTAL
- HS_VERTICAL
- HS_FDIAGONAL
- HS_BDIAGONAL
- HS_CROSS
- HS_DIAGCROSS
@type hatch: int
@param color: the L{color<RGB>} of the 'on' pixels of the brush.
@return: handle to brush graphics object.
@rtype: int
@type color: int
"""
return self._appendHandle(_EMR._CREATEBRUSHINDIRECT(hatch=hatch,color=_normalizeColor(color)))
def SetBkColor(self,color):
"""
Set the background color used for any transparent regions in fills or
hatched brushes.
B{Note:} Currently appears sporadically supported in OpenOffice.
@param color: background L{color<RGB>}.
@return: previous background L{color<RGB>}.
@rtype: int
@type color: int
"""
e=_EMR._SETBKCOLOR(_normalizeColor(color))
if not self._append(e):
return 0
return 1
def SetBkMode(self,mode):
"""
Set the background mode for interaction between transparent areas in
the region to be drawn and the existing background.
The choices for mode are:
- TRANSPARENT
- OPAQUE
B{Note:} Currently appears sporadically supported in OpenOffice.
@param mode: background mode.
@return: previous background mode.
@rtype: int
@type mode: int
"""
e=_EMR._SETBKMODE(mode)
if not self._append(e):
return 0
return 1
def SetPolyFillMode(self,mode):
"""
Set the polygon fill mode. Generally these modes produce
different results only when the edges of the polygons overlap
other edges.
@param mode: fill mode with the following options:
- ALTERNATE - fills area between odd and even numbered sides
- WINDING - fills all area as long as a point is between any two sides
@return: previous fill mode.
@rtype: int
@type mode: int
"""
e=_EMR._SETPOLYFILLMODE(mode)
if not self._append(e):
return 0
return 1
def SetMapMode(self,mode):
"""
Set the window mapping mode. This is the mapping between pixels in page space to pixels in device space. Page space is the coordinate system that is used for all the drawing commands -- it is how pixels are identified and figures are placed in the metafile. They are integer units.
Device space is the coordinate system of the final output, measured in physical dimensions such as mm, inches, or twips. It is this coordinate system that provides the scaling that makes metafiles into a scalable graphics format.
- MM_TEXT: each unit in page space is mapped to one pixel
- MM_LOMETRIC: 1 page unit = .1 mm in device space
- MM_HIMETRIC: 1 page unit = .01 mm in device space
- MM_LOENGLISH: 1 page unit = .01 inch in device space
- MM_HIENGLISH: 1 page unit = .001 inch in device space
- MM_TWIPS: 1 page unit = 1/20 point (or 1/1440 inch)
- MM_ISOTROPIC: 1 page unit = user defined ratio, but axes equally scaled
- MM_ANISOTROPIC: 1 page unit = user defined ratio, axes may be independently scaled
@param mode: window mapping mode.
@return: previous window mapping mode, or zero if error.
@rtype: int
@type mode: int
"""
e=_EMR._SETMAPMODE(mode)
if not self._append(e):
return 0
return 1
def SetViewportOrgEx(self,xv,yv):
"""
Set the origin of the viewport, which translates the origin of the
coordinate system by (xv,yv). A pixel drawn at (x,y) in the new
coordinate system will be displayed at (x+xv,y+yv) in terms of the
previous coordinate system.
Contrast this with L{SetWindowOrgEx}, which seems to be the opposite
translation. So, if in addition, the window origin is set to (xw,yw)
using L{SetWindowOrgEx}, a pixel drawn at (x,y) will be displayed at
(x-xw+xv,y-yw+yv) in terms of the original coordinate system.
@param xv: new x position of the viewport origin.
@param yv: new y position of the viewport origin.
@return: previous viewport origin
@rtype: 2-tuple (x,y) if successful, or None if unsuccessful
@type xv: int
@type yv: int
"""
e=_EMR._SETVIEWPORTORGEX(xv,yv)
if not self._append(e):
return None
old=(self.dc.viewport_x,self.dc.viewport_y)
self.dc.viewport_x=xv
self.dc.viewport_y=yv
return old
def GetViewportOrgEx(self):
"""
Get the origin of the viewport.
@return: returns the current viewport origin.
@rtype: 2-tuple (x,y)
"""
return (self.dc.viewport_x,self.dc.viewport_y)
def SetWindowOrgEx(self,xw,yw):
"""
Set the origin of the window, which translates the origin of the
coordinate system by (-xw,-yw). A pixel drawn at (x,y) in the new
coordinate system will be displayed at (x-xw,y-yw) in terms of the
previous coordinate system.
Contrast this with L{SetViewportOrgEx}, which seems to be the opposite
translation. So, if in addition, the viewport origin is set to
(xv,yv) using L{SetViewportOrgEx}, a pixel drawn at (x,y) will be
displayed at (x-xw+xv,y-yw+yv) in terms of the original coordinate
system.
@param xw: new x position of the window origin.
@param yw: new y position of the window origin.
@return: previous window origin
@rtype: 2-tuple (x,y) if successful, or None if unsuccessful
@type xw: int
@type yw: int
"""
e=_EMR._SETWINDOWORGEX(xw,yw)
if not self._append(e):
return None
old=(self.dc.window_x,self.dc.window_y)
self.dc.window_x=xw
self.dc.window_y=yw
return old
def GetWindowOrgEx(self):
"""
Get the origin of the window.
@return: returns the current window origin.
@rtype: 2-tuple (x,y)
"""
return (self.dc.window_x,self.dc.window_y)
def SetViewportExtEx(self,x,y):
"""
Set the dimensions of the viewport in device units. Device units are
physical dimensions, in millimeters. The total extent is equal to the
width is millimeters multiplied by the density of pixels per
millimeter in that dimension.
Note: this is only usable when L{SetMapMode} has been set to
MM_ISOTROPIC or MM_ANISOTROPIC.
@param x: new width of the viewport.
@param y: new height of the viewport.
@return: returns the previous size of the viewport.
@rtype: 2-tuple (width,height) if successful, or None if unsuccessful
@type x: int
@type y: int
"""
e=_EMR._SETVIEWPORTEXTEX(x,y)
if not self._append(e):
return None
old=(self.dc.viewport_ext_x,self.dc.viewport_ext_y)
self.dc.viewport_ext_x=xv
self.dc.viewport_ext_y=yv
return old
def ScaleViewportExtEx(self,x_num,x_den,y_num,y_den):
"""
Scale the dimensions of the viewport.
@param x_num: numerator of x scale
@param x_den: denominator of x scale
@param y_num: numerator of y scale
@param y_den: denominator of y scale
@return: returns the previous size of the viewport.
@rtype: 2-tuple (width,height) if successful, or None if unsuccessful
@type x_num: int
@type x_den: int
@type y_num: int
@type y_den: int
"""
e=_EMR._EMR._SCALEVIEWPORTEXTEX(x_num,x_den,y_num,y_den)
if not self._append(e):
return None
old=(self.dc.viewport_ext_x,self.dc.viewport_ext_y)
self.dc.viewport_ext_x=old[0]*x_num/x_den
self.dc.viewport_ext_y=old[1]*y_num/y_den
return old
def GetViewportExtEx(self):
"""
Get the dimensions of the viewport in device units (i.e. physical dimensions).
@return: returns the size of the viewport.
@rtype: 2-tuple (width,height)
"""
old=(self.dc.viewport_ext_x,self.dc.viewport_ext_y)
return old
def SetWindowExtEx(self,x,y):
"""
Set the dimensions of the window. Window size is measured in integer
numbers of pixels (logical units).
Note: this is only usable when L{SetMapMode} has been set to
MM_ISOTROPIC or MM_ANISOTROPIC.
@param x: new width of the window.
@param y: new height of the window.
@return: returns the previous size of the window.
@rtype: 2-tuple (width,height) if successful, or None if unsuccessful
@type x: int
@type y: int
"""
e=_EMR._SETWINDOWEXTEX(x,y)
if not self._append(e):
return None
old=(self.dc.window_ext_x,self.dc.window_ext_y)
self.dc.window_ext_x=x
self.dc.window_ext_y=y
return old
def ScaleWindowExtEx(self,x_num,x_den,y_num,y_den):
"""
Scale the dimensions of the window.
@param x_num: numerator of x scale
@param x_den: denominator of x scale
@param y_num: numerator of y scale
@param y_den: denominator of y scale
@return: returns the previous size of the window.
@rtype: 2-tuple (width,height) if successful, or None if unsuccessful
@type x_num: int
@type x_den: int
@type y_num: int
@type y_den: int
"""
e=_EMR._SCALEWINDOWEXTEX(x_num,x_den,y_num,y_den)
if not self._append(e):
return None
old=(self.dc.window_ext_x,self.dc.window_ext_y)
self.dc.window_ext_x=old[0]*x_num/x_den
self.dc.window_ext_y=old[1]*y_num/y_den
return old
def GetWindowExtEx(self):
"""
Get the dimensions of the window in logical units (integer numbers of pixels).
@return: returns the size of the window.
@rtype: 2-tuple (width,height)
"""
old=(self.dc.window_ext_x,self.dc.window_ext_y)
return old
def SetWorldTransform(self,m11=1.0,m12=0.0,m21=0.0,m22=1.0,dx=0.0,dy=0.0):
"""
Set the world coordinate to logical coordinate linear transform for
subsequent operations. With this matrix operation, you can translate,
rotate, scale, shear, or a combination of all four. The matrix
operation is defined as follows where (x,y) are the original
coordinates and (x',y') are the transformed coordinates::
| x | | m11 m12 0 | | x' |
| y | * | m21 m22 0 | = | y' |
| 0 | | dx dy 1 | | 0 |
or, the same thing defined as a system of linear equations::
x' = x*m11 + y*m21 + dx
y' = x*m12 + y*m22 + dy
http://msdn.microsoft.com/library/en-us/gdi/cordspac_0inn.asp
says that the offsets are in device coordinates, not pixel
coordinates.
B{Note:} Currently partially supported in OpenOffice.
@param m11: matrix entry
@type m11: float
@param m12: matrix entry
@type m12: float
@param m21: matrix entry
@type m21: float
@param m22: matrix entry
@type m22: float
@param dx: x shift
@type dx: float
@param dy: y shift
@type dy: float
@return: status
@rtype: boolean
"""
return self._append(_EMR._SETWORLDTRANSFORM(m11,m12,m21,m22,dx,dy))
def ModifyWorldTransform(self,mode,m11=1.0,m12=0.0,m21=0.0,m22=1.0,dx=0.0,dy=0.0):
"""
Change the current linear transform. See L{SetWorldTransform} for a
description of the matrix parameters. The new transform may be
modified in one of three ways, set by the mode parameter:
- MWT_IDENTITY: reset the transform to the identity matrix (the matrix parameters are ignored).
- MWT_LEFTMULTIPLY: multiply the matrix represented by these parameters by the current world transform to get the new transform.
- MWT_RIGHTMULTIPLY: multiply the current world tranform by the matrix represented here to get the new transform.
The reason that there are two different multiplication types is that
matrix multiplication is not commutative, which means the order of
multiplication makes a difference.
B{Note:} The parameter order was changed from GDI standard so that I
could make the matrix parameters optional in the case of MWT_IDENTITY.
B{Note:} Currently appears unsupported in OpenOffice.
@param mode: MWT_IDENTITY, MWT_LEFTMULTIPLY, or MWT_RIGHTMULTIPLY
@type mode: int
@param m11: matrix entry
@type m11: float
@param m12: matrix entry
@type m12: float
@param m21: matrix entry
@type m21: float
@param m22: matrix entry
@type m22: float
@param dx: x shift
@type dx: float
@param dy: y shift
@type dy: float
@return: status
@rtype: boolean
"""
return self._append(_EMR._MODIFYWORLDTRANSFORM(m11,m12,m21,m22,dx,dy,mode))
def SetPixel(self,x,y,color):
"""
Set the pixel to the given color.
@param x: the horizontal position.
@param y: the vertical position.
@param color: the L{color<RGB>} to set the pixel.
@type x: int
@type y: int
@type color: int or (r,g,b) tuple
"""
return self._append(_EMR._SETPIXELV(x,y,_normalizeColor(color)))
def Polyline(self,points):
"""
Draw a sequence of connected lines.
@param points: list of x,y tuples
@return: true if polyline is successfully rendered.
@rtype: int
@type points: tuple
"""
return self._appendOptimize16(points,_EMR._POLYLINE16,_EMR._POLYLINE)
def PolyPolyline(self,polylines):
"""
Draw multiple polylines. The polylines argument is a list of lists,
where each inner list represents a single polyline. Each polyline is
described by a list of x,y tuples as in L{Polyline}. For example::
lines=[[(100,100),(200,100)],
[(300,100),(400,100)]]
emf.PolyPolyline(lines)
draws two lines, one from 100,100 to 200,100, and another from 300,100
to 400,100.
@param polylines: list of lines, where each line is a list of x,y tuples
@type polylines: list
@return: true if polypolyline is successfully rendered.
@rtype: int
"""
return self._appendOptimizePoly16(polylines,_EMR._POLYPOLYLINE16,_EMR._POLYPOLYLINE)
def Polygon(self,points):
"""
Draw a closed figure bounded by straight line segments. A polygon is
defined by a list of points that define the endpoints for a series of
connected straight line segments. The end of the last line segment is
automatically connected to the beginning of the first line segment,
the border is drawn with the current pen, and the interior is filled
with the current brush. See L{SetPolyFillMode} for the fill effects
when an overlapping polygon is defined.
@param points: list of x,y tuples
@return: true if polygon is successfully rendered.
@rtype: int
@type points: tuple
"""
if len(points)==4:
if points[0][0]==points[1][0] and points[2][0]==points[3][0] and points[0][1]==points[3][1] and points[1][1]==points[2][1]:
if self.verbose: print("converting to rectangle, option 1:")
return self.Rectangle(points[0][0],points[0][1],points[2][0],points[2][1])
elif points[0][1]==points[1][1] and points[2][1]==points[3][1] and points[0][0]==points[3][0] and points[1][0]==points[2][0]:
if self.verbose: print("converting to rectangle, option 2:")
return self.Rectangle(points[0][0],points[0][1],points[2][0],points[2][1])
return self._appendOptimize16(points,_EMR._POLYGON16,_EMR._POLYGON)
def PolyPolygon(self,polygons):
"""
Draw multiple polygons. The polygons argument is a list of lists,
where each inner list represents a single polygon. Each polygon is
described by a list of x,y tuples as in L{Polygon}. For example::
lines=[[(100,100),(200,100),(200,200),(100,200)],
[(300,100),(400,100),(400,200),(300,200)]]
emf.PolyPolygon(lines)
draws two squares.
B{Note:} Currently partially supported in OpenOffice. The line width
is ignored and the polygon border is not closed (the final point is
not connected to the starting point in each polygon).
@param polygons: list of polygons, where each polygon is a list of x,y tuples
@type polygons: list
@return: true if polypolygon is successfully rendered.
@rtype: int
"""
return self._appendOptimizePoly16(polygons,_EMR._POLYPOLYGON16,_EMR._POLYPOLYGON)
def Ellipse(self,left,top,right,bottom):
"""
Draw an ellipse using the current pen.
@param left: x position of left side of ellipse bounding box.
@param top: y position of top side of ellipse bounding box.
@param right: x position of right edge of ellipse bounding box.
@param bottom: y position of bottom edge of ellipse bounding box.
@return: true if rectangle was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
"""
return self._append(_EMR._ELLIPSE(((left,top),(right,bottom))))
def Rectangle(self,left,top,right,bottom):
"""
Draw a rectangle using the current pen.
@param left: x position of left side of ellipse bounding box.
@param top: y position of top side of ellipse bounding box.
@param right: x position of right edge of ellipse bounding box.
@param bottom: y position of bottom edge of ellipse bounding box.
@return: true if rectangle was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
"""
return self._append(_EMR._RECTANGLE(((left,top),(right,bottom))))
def RoundRect(self,left,top,right,bottom,cornerwidth,cornerheight):
"""
Draw a rectangle with rounded corners using the current pen.
@param left: x position of left side of ellipse bounding box.
@param top: y position of top side of ellipse bounding box.
@param right: x position of right edge of ellipse bounding box.
@param bottom: y position of bottom edge of ellipse bounding box.
@param cornerwidth: width of the ellipse that defines the roundness of the corner.
@param cornerheight: height of ellipse
@return: true if rectangle was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
@type cornerwidth: int
@type cornerheight: int
"""
return self._append(_EMR._ROUNDRECT((((left,top),(right,bottom)),
cornerwidth,cornerheight)))
def Arc(self,left,top,right,bottom,xstart,ystart,xend,yend):
"""
Draw an arc of an ellipse. The ellipse is specified by its bounding
rectange and two lines from its center to indicate the start and end
angles. left, top, right, bottom describe the bounding rectangle of
the ellipse. The start point given by xstart,ystert defines a ray
from the center of the ellipse through the point and out to infinity.
The point at which this ray intersects the ellipse is the starting
point of the arc. Similarly, the infinite radial ray from the center
through the end point defines the end point of the ellipse. The arc
is drawn in a counterclockwise direction, and if the start and end
rays are coincident, a complete ellipse is drawn.
@param left: x position of left edge of arc box.
@param top: y position of top edge of arc box.
@param right: x position of right edge of arc box.
@param bottom: y position bottom edge of arc box.
@param xstart: x position of arc start.
@param ystart: y position of arc start.
@param xend: x position of arc end.
@param yend: y position of arc end.
@return: true if arc was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
@type xstart: int
@type ystart: int
@type xend: int
@type yend: int
"""
return self._append(_EMR._ARC(((left,top),(right,bottom)),
xstart,ystart,xend,yend))
def Chord(self,left,top,right,bottom,xstart,ystart,xend,yend):
"""
Draw a chord of an ellipse. A chord is a closed region bounded by an
arc and the [straight] line between the two points that define the arc
start and end. The arc start and end points are defined as in L{Arc}.
@param left: x position of left edge of arc box.
@param top: y position of top edge of arc box.
@param right: x position of right edge of arc box.
@param bottom: y position bottom edge of arc box.
@param xstart: x position of arc start.
@param ystart: y position of arc start.
@param xend: x position of arc end.
@param yend: y position of arc end.
@return: true if arc was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
@type xstart: int
@type ystart: int
@type xend: int
@type yend: int
"""
return self._append(_EMR._CHORD(((left,top),(right,bottom)),
xstart,ystart,xend,yend))
def Pie(self,left,top,right,bottom,xstart,ystart,xend,yend):
"""
Draw a pie slice of an ellipse. The ellipse is specified as in
L{Arc}, and it is filled with the current brush.
@param left: x position of left edge of arc box.
@param top: y position of top edge of arc box.
@param right: x position of right edge of arc box.
@param bottom: y position bottom edge of arc box.
@param xstart: x position of arc start.
@param ystart: y position of arc start.
@param xend: x position of arc end.
@param yend: y position of arc end.
@return: true if arc was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
@type xstart: int
@type ystart: int
@type xend: int
@type yend: int
"""
if xstart==xend and ystart==yend:
# Fix for OpenOffice: doesn't render a full ellipse when
# the start and end angles are the same
e=_EMR._ELLIPSE(((left,top),(right,bottom)))
else:
e=_EMR._PIE(((left,top),(right,bottom)),xstart,ystart,xend,yend)
return self._append(e)
def PolyBezier(self,points):
"""
Draw cubic Bezier curves using the list of points as both endpoints
and control points. The first point is used as the starting point,
the second and thrird points are control points, and the fourth point
is the end point of the first curve. Subsequent curves need three
points each: two control points and an end point, as the ending point
of the previous curve is used as the starting point for the next
curve.
@param points: list of x,y tuples that are either end points or control points
@return: true if bezier curve was successfully rendered.
@rtype: int
@type points: tuple
"""
return self._appendOptimize16(points,_EMR._POLYBEZIER16,_EMR._POLYBEZIER)
def BeginPath(self):
"""
Begin defining a path. Any previous unclosed paths are discarded.
@return: true if successful.
@rtype: int
"""
# record next record number as first item in path
self.pathstart=len(self.records)
return self._append(_EMR._BEGINPATH())
def EndPath(self):
"""
End the path definition.
@return: true if successful.
@rtype: int
"""
return self._append(_EMR._ENDPATH())
def MoveTo(self,x,y):
"""
Move the current point to the given position and implicitly begin a
new figure or path.
@param x: new x position.
@param y: new y position.
@return: true if position successfully changed (can this fail?)
@rtype: int
@type x: int
@type y: int
"""
return self._append(_EMR._MOVETOEX(x,y))
def LineTo(self,x,y):
"""
Draw a straight line using the current pen from the current point to
the given position.
@param x: x position of line end.
@param y: y position of line end.
@return: true if line is drawn (can this fail?)
@rtype: int
@type x: int
@type y: int
"""
return self._append(_EMR._LINETO(x,y))
def PolylineTo(self,points):
"""
Draw a sequence of connected lines starting from the current
position and update the position to the final point in the list.
@param points: list of x,y tuples
@return: true if polyline is successfully rendered.
@rtype: int
@type points: tuple
"""
return self._appendOptimize16(points,_EMR._POLYLINETO16,_EMR._POLYLINETO)
def ArcTo(self,left,top,right,bottom,xstart,ystart,xend,yend):
"""
Draw an arc and update the current position. The arc is drawn as
described in L{Arc}, but in addition the start of the arc will be
connected to the previous position and the current position is updated
to the end of the arc so subsequent path operations such as L{LineTo},
L{PolylineTo}, etc. will connect to the end.
B{Note:} Currently appears unsupported in OpenOffice.
@param left: x position of left edge of arc box.
@param top: y position of top edge of arc box.
@param right: x position of right edge of arc box.
@param bottom: y position bottom edge of arc box.
@param xstart: x position of arc start.
@param ystart: y position of arc start.
@param xend: x position of arc end.
@param yend: y position of arc end.
@return: true if arc was successfully rendered.
@rtype: int
@type left: int
@type top: int
@type right: int
@type bottom: int
@type xstart: int
@type ystart: int
@type xend: int
@type yend: int
"""
return self._append(_EMR._ARCTO(((left,top),(right,bottom)),
xstart,ystart,xend,yend))
def PolyBezierTo(self,points):
"""
Draw cubic Bezier curves, as described in L{PolyBezier}, but in
addition draw a line from the previous position to the start of the
curve. If the arc is successfully rendered, the current position is
updated so that subsequent path operations such as L{LineTo},
L{PolylineTo}, etc. will follow from the end of the curve.
@param points: list of x,y tuples that are either end points or control points
@return: true if bezier curve was successfully rendered.
@rtype: int
@type points: tuple
"""
return self._appendOptimize16(points,_EMR._POLYBEZIERTO16,_EMR._POLYBEZIERTO)
def CloseFigure(self):
"""
Close a currently open path, which connects the current position to the starting position of a figure. Usually the starting position is the most recent call to L{MoveTo} after L{BeginPath}.
@return: true if successful
@rtype: int
"""
return self._append(_EMR._CLOSEFIGURE())
def FillPath(self):
"""
Close any currently open path and fills it using the currently
selected brush and polygon fill mode.
@return: true if successful.
@rtype: int
"""
bounds=self._getPathBounds()
return self._append(_EMR._FILLPATH(bounds))
def StrokePath(self):
"""
Close any currently open path and outlines it using the currently
selected pen.
@return: true if successful.
@rtype: int
"""
bounds=self._getPathBounds()
return self._append(_EMR._STROKEPATH(bounds))
def StrokeAndFillPath(self):
"""
Close any currently open path, outlines it using the currently
selected pen, and fills it using the current brush. Same as stroking
and filling using both the L{FillPath} and L{StrokePath} options,
except that the pixels that would be in the overlap region to be both
stroked and filled are optimized to be only stroked.
B{Note:} Supported in OpenOffice 2.*, unsupported in OpenOffice 1.*.
@return: true if successful.
@rtype: int
"""
bounds=self._getPathBounds()
return self._append(_EMR._STROKEANDFILLPATH(bounds))
def SelectClipPath(self,mode=RGN_COPY):
"""
Use the current path as the clipping path. The current path must be a
closed path (i.e. with L{CloseFigure} and L{EndPath})
B{Note:} Currently unsupported in OpenOffice -- it apparently uses the
bounding rectangle of the path as the clip area, not the path itself.
@param mode: one of the following values that specifies how to modify the clipping path
- RGN_AND: the new clipping path becomes the intersection of the old path and the current path
- RGN_OR: the new clipping path becomes the union of the old path and the current path
- RGN_XOR: the new clipping path becomes the union of the old path and the current path minus the intersection of the old and current path
- RGN_DIFF: the new clipping path becomes the old path where any overlapping region of the current path is removed
- RGN_COPY: the new clipping path is set to the current path and the old path is thrown away
@return: true if successful.
@rtype: int
"""
return self._append(_EMR._SELECTCLIPPATH(mode))
def SaveDC(self):
"""
Saves the current state of the graphics mode (such as line and fill
styles, font, clipping path, drawing mode and any transformations) to
a stack. This state can be restored by L{RestoreDC}.
B{Note:} Currently unsupported in OpenOffice -- it apparently uses the
bounding rectangle of the path as the clip area, not the path itself.
@return: value of the saved state.
@rtype: int
"""
return self._append(_EMR._SAVEDC())
def RestoreDC(self,stackid):
"""
Restores the state of the graphics mode to a stack. The L{stackid}
parameter is either a value returned by L{SaveDC}, or if negative, is
the number of states relative to the top of the save stack. For
example, C{stackid == -1} is the most recently saved state.
B{Note:} If the retrieved state is not at the top of the stack, any
saved states above it are thrown away.
B{Note:} Currently unsupported in OpenOffice -- it apparently uses the
bounding rectangle of the path as the clip area, not the path itself.
@param stackid: stack id number from L{SaveDC} or negative number for relative stack location
@type stackid: int
@return: nonzero for success
@rtype: int
"""
return self._append(_EMR._RESTOREDC(-1))
def SetTextAlign(self,alignment):
"""
Set the subsequent alignment of drawn text. You can also pass a flag
indicating whether or not to update the current point to the end of the
text. Alignment may have the (sum of) values:
- TA_NOUPDATECP
- TA_UPDATECP
- TA_LEFT
- TA_RIGHT
- TA_CENTER
- TA_TOP
- TA_BOTTOM
- TA_BASELINE
- TA_RTLREADING
@param alignment: new text alignment.
@return: previous text alignment value.
@rtype: int
@type alignment: int
"""
return self._append(_EMR._SETTEXTALIGN(alignment))
def SetTextColor(self,color):
"""
Set the text foreground color.
@param color: text foreground L{color<RGB>}.
@return: previous text foreground L{color<RGB>}.
@rtype: int
@type color: int
"""
e=_EMR._SETTEXTCOLOR(_normalizeColor(color))
if not self._append(e):
return 0
return 1
def CreateFont(self,height,width=0,escapement=0,orientation=0,weight=FW_NORMAL,italic=0,underline=0,strike_out=0,charset=ANSI_CHARSET,out_precision=OUT_DEFAULT_PRECIS,clip_precision=CLIP_DEFAULT_PRECIS,quality=DEFAULT_QUALITY,pitch_family=DEFAULT_PITCH|FF_DONTCARE,name='Times New Roman'):
"""
Create a new font object. Presumably, when rendering the EMF the
system tries to find a reasonable approximation to all the requested
attributes.
@param height: specified one of two ways:
- if height>0: locate the font using the specified height as the typical cell height
- if height<0: use the absolute value of the height as the typical glyph height.
@param width: typical glyph width. If zero, the typical aspect ratio of the font is used.
@param escapement: angle, in degrees*10, of rendered string rotation. Note that escapement and orientation must be the same.
@param orientation: angle, in degrees*10, of rendered string rotation. Note that escapement and orientation must be the same.
@param weight: weight has (at least) the following values:
- FW_DONTCARE
- FW_THIN
- FW_EXTRALIGHT
- FW_ULTRALIGHT
- FW_LIGHT
- FW_NORMAL
- FW_REGULAR
- FW_MEDIUM
- FW_SEMIBOLD
- FW_DEMIBOLD
- FW_BOLD
- FW_EXTRABOLD
- FW_ULTRABOLD
- FW_HEAVY
- FW_BLACK
@param italic: non-zero means try to find an italic version of the face.
@param underline: non-zero means to underline the glyphs.
@param strike_out: non-zero means to strike-out the glyphs.
@param charset: select the character set from the following list:
- ANSI_CHARSET
- DEFAULT_CHARSET
- SYMBOL_CHARSET
- SHIFTJIS_CHARSET
- HANGEUL_CHARSET
- HANGUL_CHARSET
- GB2312_CHARSET
- CHINESEBIG5_CHARSET
- GREEK_CHARSET
- TURKISH_CHARSET
- HEBREW_CHARSET
- ARABIC_CHARSET
- BALTIC_CHARSET
- RUSSIAN_CHARSET
- EE_CHARSET
- EASTEUROPE_CHARSET
- THAI_CHARSET
- JOHAB_CHARSET
- MAC_CHARSET
- OEM_CHARSET
@param out_precision: the precision of the face may have on of the
following values:
- OUT_DEFAULT_PRECIS
- OUT_STRING_PRECIS
- OUT_CHARACTER_PRECIS
- OUT_STROKE_PRECIS
- OUT_TT_PRECIS
- OUT_DEVICE_PRECIS
- OUT_RASTER_PRECIS
- OUT_TT_ONLY_PRECIS
- OUT_OUTLINE_PRECIS
@param clip_precision: the precision of glyph clipping may have one of the
following values:
- CLIP_DEFAULT_PRECIS
- CLIP_CHARACTER_PRECIS
- CLIP_STROKE_PRECIS
- CLIP_MASK
- CLIP_LH_ANGLES
- CLIP_TT_ALWAYS
- CLIP_EMBEDDED
@param quality: (subjective) quality of the font. Choose from the following
values:
- DEFAULT_QUALITY
- DRAFT_QUALITY
- PROOF_QUALITY
- NONANTIALIASED_QUALITY
- ANTIALIASED_QUALITY
@param pitch_family: the pitch and family of the font face if the named font can't be found. Combine the pitch and style using a binary or.
- Pitch:
- DEFAULT_PITCH
- FIXED_PITCH
- VARIABLE_PITCH
- MONO_FONT
- Style:
- FF_DONTCARE
- FF_ROMAN
- FF_SWISS
- FF_MODERN
- FF_SCRIPT
- FF_DECORATIVE
@param name: ASCII string containing the name of the font face.
@return: handle of font.
@rtype: int
@type height: int
@type width: int
@type escapement: int
@type orientation: int
@type weight: int
@type italic: int
@type underline: int
@type strike_out: int
@type charset: int
@type out_precision: int
@type clip_precision: int
@type quality: int
@type pitch_family: int
@type name: string
"""
return self._appendHandle(_EMR._EXTCREATEFONTINDIRECTW(height,width,escapement,orientation,weight,italic,underline,strike_out,charset,out_precision,clip_precision,quality,pitch_family,name))
def TextOut(self,x,y,text):
"""
Draw a string of text at the given position using the current FONT and
other text attributes.
@param x: x position of text.
@param y: y position of text.
@param text: ASCII text string to render.
@return: true of string successfully drawn.
@rtype: int
@type x: int
@type y: int
@type text: string
"""
e=_EMR._EXTTEXTOUTA(x,y,text)
if not self._append(e):
return 0
return 1
if __name__ == "__main__":
from optparse import OptionParser
parser=OptionParser(usage="usage: %prog [options] emf-files...")
parser.add_option("-v", action="store_true", dest="verbose", default=False)
parser.add_option("-s", action="store_true", dest="save", default=False)
parser.add_option("-o", action="store", dest="outputfile", default=None)
(options, args) = parser.parse_args()
if len(args)>0:
for filename in args:
e=EMF(verbose=options.verbose)
e.load(filename)
if options.save:
if not options.outputfile:
options.outputfile=filename+".out.emf"
print("Saving %s..." % options.outputfile)
ret=e.save(options.outputfile)
if ret:
print("%s saved successfully." % options.outputfile)
else:
print("problem saving %s!" % options.outputfile)
else:
e=EMF(verbose=options.verbose)
e.save("new.emf")
| 30.816963
| 293
| 0.631334
|
3aae620eb618e6867f9f27ee50d0029674e076a4
| 564
|
py
|
Python
|
configs/_base_/models/lsgan/lsgan_128x128.py
|
plutoyuxie/mmgeneration
|
0a7f5d16c970de1766ebf049d7a0264fe506504b
|
[
"Apache-2.0"
] | 718
|
2021-04-15T11:26:20.000Z
|
2022-03-31T03:11:56.000Z
|
configs/_base_/models/lsgan/lsgan_128x128.py
|
plutoyuxie/mmgeneration
|
0a7f5d16c970de1766ebf049d7a0264fe506504b
|
[
"Apache-2.0"
] | 191
|
2021-04-15T12:13:34.000Z
|
2022-03-31T16:04:36.000Z
|
configs/_base_/models/lsgan/lsgan_128x128.py
|
plutoyuxie/mmgeneration
|
0a7f5d16c970de1766ebf049d7a0264fe506504b
|
[
"Apache-2.0"
] | 107
|
2021-04-15T12:38:41.000Z
|
2022-03-27T02:47:16.000Z
|
# define GAN model
model = dict(
type='StaticUnconditionalGAN',
generator=dict(
type='LSGANGenerator',
output_scale=128,
base_channels=256,
noise_size=1024),
discriminator=dict(
type='LSGANDiscriminator', input_scale=128, base_channels=64),
gan_loss=dict(type='GANLoss', gan_type='lsgan'))
train_cfg = dict(disc_steps=1)
test_cfg = None
# define optimizer
optimizer = dict(
generator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)),
discriminator=dict(type='Adam', lr=0.0002, betas=(0.5, 0.999)))
| 28.2
| 70
| 0.664894
|
3bea37906b16ba8d27ce8c651239eb7db3c5afaf
| 13,106
|
py
|
Python
|
lib/xos-migrate/xosmigrate/main.py
|
iecedge/xos
|
566617f676fedcb2602266191c755d191b37018a
|
[
"Apache-2.0"
] | null | null | null |
lib/xos-migrate/xosmigrate/main.py
|
iecedge/xos
|
566617f676fedcb2602266191c755d191b37018a
|
[
"Apache-2.0"
] | null | null | null |
lib/xos-migrate/xosmigrate/main.py
|
iecedge/xos
|
566617f676fedcb2602266191c755d191b37018a
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# TO RUN
# source scripts/setup_venv.sh
# xos-migrate [-s <service-name>] [-r ~/cord]
# eg: xos-migrate -r ~/Sites/cord -s core -s fabric
# TODO
# - add support to specify a name to be given to the generated migration (--name parameter in django makemigrations)
# - add support to generate empty migrations (needed for data-only migrations)
import os
import sys
import argparse
import yaml
import shutil
from xosgenx.generator import XOSProcessor, XOSProcessorArgs
from xosconfig import Config
from multistructlog import create_logger
def get_abs_path(dir_):
if os.path.isabs(dir_):
return os.path.realpath(dir_)
if dir_[0] == "~" and not os.path.exists(dir_):
dir_ = os.path.expanduser(dir_)
return os.path.abspath(dir_)
return os.path.dirname(os.path.realpath(__file__)) + "/" + dir_
def print_banner(root):
log.info(r"---------------------------------------------------------------")
log.info(r" _ __ ")
log.info(r" _ ______ _____ ____ ___ (_)___ __________ _/ /____ ")
log.info(r" | |/_/ __ \/ ___/_____/ __ `__ \/ / __ `/ ___/ __ `/ __/ _ \ ")
log.info(r" _> </ /_/ (__ )_____/ / / / / / / /_/ / / / /_/ / /_/ __/ ")
log.info(r"/_/|_|\____/____/ /_/ /_/ /_/_/\__, /_/ \__,_/\__/\___/ ")
log.info(r" /____/ ")
log.info(r"---------------------------------------------------------------")
log.debug("CORD repo root", root=root)
log.debug("Storing logs in: %s" % os.environ["LOG_FILE"])
log.debug(r"---------------------------------------------------------------")
def generate_core_models(core_dir):
core_xproto = os.path.join(core_dir, "core.xproto")
args = XOSProcessorArgs(
output=core_dir,
target="django.xtarget",
dest_extension="py",
write_to_file="model",
files=[core_xproto],
)
XOSProcessor.process(args)
security_args = XOSProcessorArgs(
output=core_dir,
target="django-security.xtarget",
dest_file="security.py",
write_to_file="single",
files=[core_xproto],
)
XOSProcessor.process(security_args)
init_args = XOSProcessorArgs(
output=core_dir,
target="init.xtarget",
dest_file="__init__.py",
write_to_file="single",
files=[core_xproto],
)
XOSProcessor.process(init_args)
def find_xproto_in_folder(path):
"""
Recursively iterate a folder tree to look for any xProto file.
We use this function in case that the name of the xProto is different from the name of the folder (eg: olt-service)
:param path: the root folder to start the search
:return: [string]
"""
xprotos = []
for fn in os.listdir(path):
# skip hidden files and folders. plus other useless things
if fn.startswith(".") or fn == "venv-xos" or fn == "htmlcov":
continue
full_path = os.path.join(path, fn)
if fn.endswith(".xproto"):
xprotos.append(full_path)
elif os.path.isdir(full_path):
xprotos = xprotos + find_xproto_in_folder(full_path)
return xprotos
def find_decls_models(path):
"""
Recursively iterate a folder tree to look for any models.py file.
This files contain the base model for _decl generated models.
:param path: the root folder to start the search
:return: [string]
"""
decls = []
for fn in os.listdir(path):
# skip hidden files and folders. plus other useless things
if fn.startswith(".") or fn == "venv-xos" or fn == "htmlcov":
continue
full_path = os.path.join(path, fn)
if fn == "models.py":
decls.append(full_path)
elif os.path.isdir(full_path):
decls = decls + find_decls_models(full_path)
return decls
def get_service_name_from_config(path):
"""
Given a service folder look for the config.yaml file and find the name
:param path: the root folder to start the search
:return: string
"""
config = os.path.join(path, "xos/synchronizer/config.yaml")
if not os.path.isfile(config):
raise Exception("Config file not found at: %s" % config)
cfg_file = open(config)
cfg = yaml.load(cfg_file)
return cfg["name"]
def generate_service_models(service_dir, service_dest_dir, service_name):
"""
Generate the django code starting from xProto for a given service.
:param service_dir: string (path to the folder)
:param service_name: string (name of the service)
:return: void
"""
xprotos = find_xproto_in_folder(service_dir)
decls = find_decls_models(service_dir)
log.debug("Generating models for %s from files %s" % (service_name, ", ".join(xprotos)))
out_dir = os.path.join(service_dest_dir, service_name)
if not os.path.isdir(out_dir):
os.mkdir(out_dir)
args = XOSProcessorArgs(
output=out_dir,
files=xprotos,
target="service.xtarget",
write_to_file="target",
)
XOSProcessor.process(args)
security_args = XOSProcessorArgs(
output=out_dir,
target="django-security.xtarget",
dest_file="security.py",
write_to_file="single",
files=xprotos,
)
XOSProcessor.process(security_args)
init_py_filename = os.path.join(out_dir, "__init__.py")
if not os.path.exists(init_py_filename):
open(init_py_filename, "w").write("# created by dynamicbuild")
# copy over models.py files from the service
if len(decls) > 0:
for file in decls:
fn = os.path.basename(file)
src_fn = file
dest_fn = os.path.join(out_dir, fn)
log.debug("Copying models.py from %s to %s" % (src_fn, dest_fn))
shutil.copyfile(src_fn, dest_fn)
# copy existing migrations from the service, otherwise they won't be incremental
src_dir = os.path.join(service_dir, "xos", "synchronizer", "migrations")
if os.path.isdir(src_dir):
dest_dir = os.path.join(out_dir, "migrations")
if os.path.isdir(dest_dir):
shutil.rmtree(dest_dir) # empty the folder, we'll copy everything again
shutil.copytree(src_dir, dest_dir)
def copy_service_migrations(service_dir, service_dest_dir, service_name):
"""
Once the migrations are generated, copy them in the correct location
:param service_dir: string (path to the folder)
:param service_name: string (name of the service)
:return: void
"""
log.debug("Copying %s migrations to %s" % (service_name, service_dir))
migration_dir = os.path.join(service_dest_dir, service_name, "migrations")
dest_dir = os.path.join(service_dir, "xos", "synchronizer", "migrations")
if os.path.isdir(dest_dir):
shutil.rmtree(dest_dir) # empty the folder, we'll copy everything again
shutil.copytree(migration_dir, dest_dir)
# clean after the tool, generated migrations has been moved in the service repo
shutil.rmtree(get_abs_path(os.path.join(migration_dir, "../")))
def monkey_patch_migration_template():
import django
django.setup()
import django.db.migrations.writer as dj
dj.MIGRATION_TEMPLATE = """\
# Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -*- coding: utf-8 -*-
# Generated by Django %(version)s on %(timestamp)s
from __future__ import unicode_literals
%(imports)s
class Migration(migrations.Migration):
%(replaces_str)s%(initial_str)s
dependencies = [
%(dependencies)s\
]
operations = [
%(operations)s\
]
"""
def configure_logging(verbose):
global log
# INITIALIZING LOGGER
Config.init()
cfg = Config().get("logging")
if verbose:
cfg["handlers"]["console"]["level"] = "DEBUG"
log = create_logger(cfg)
# SETTING ENV
os.environ["LOG_FILE"] = get_abs_path("django.log")
os.environ["XOS_CONFIG_SCHEMA"] = get_abs_path("migration_cfg_schema.yaml")
os.environ["XOS_CONFIG_FILE"] = get_abs_path("migration_cfg.yaml")
os.environ["MIGRATIONS"] = "true"
# this is populated in case we generate migrations for services and it's used in settings.py
os.environ["INSTALLED_APPS"] = ""
# PARAMS
parser = argparse.ArgumentParser(description="XOS Migrations")
required = parser.add_argument_group("required arguments")
required.add_argument(
"-s",
"--service",
action="append",
required=True,
dest="service_names",
help="The name of the folder containing the service in cord/orchestration/xos_services"
)
parser.add_argument(
"-r",
"--repo",
default=get_abs_path("~/cord"),
dest="repo_root",
help="The location of the folder containing the CORD repo root (default to ~/cord)"
)
parser.add_argument(
"--check",
default=False,
action="store_true",
dest="check",
help="Check if the migrations are generated for a given service. Does not apply any change."
)
parser.add_argument(
"-v",
"--verbose",
help="increase log verbosity",
dest="verbose",
action="store_true"
)
def run():
# cleaning up from possible incorrect states
if "INSTALLED_APPS" in os.environ:
del os.environ["INSTALLED_APPS"]
args = parser.parse_args()
configure_logging(args.verbose)
print_banner(args.repo_root)
# find absolute path to the code
xos_path = get_abs_path(os.path.join(args.repo_root, "orchestration/xos/xos/"))
django_path = get_abs_path(os.path.join(xos_path, "manage.py"))
core_dir = get_abs_path(os.path.join(xos_path, "core/models/"))
service_base_dir = get_abs_path(os.path.join(xos_path, "../../xos_services/"))
service_dest_dir = get_abs_path(os.path.join(xos_path, "services/"))
# we need to append the xos folder to sys.path
original_sys_path = sys.path
sys.path.append(xos_path)
log.info("Services: %s" % ", ".join(args.service_names))
django_cli_args = ['xos-migrate.py', "makemigrations"]
# generate the code for each service and create a list of parameters to pass to django
app_list = []
for service in args.service_names:
# NOTE we need core models to be there as all the services depend on them
generate_core_models(core_dir)
if service == "core":
django_cli_args.append("core")
else:
service_dir = os.path.join(service_base_dir, service)
service_name = get_service_name_from_config(service_dir)
generate_service_models(service_dir, service_dest_dir, service_name)
app_list.append("services.%s" % service_name)
django_cli_args.append(service_name)
if len(app_list) > 0:
os.environ["INSTALLED_APPS"] = ",".join(app_list)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "xos.settings")
monkey_patch_migration_template()
if args.check:
django_cli_args.append("--check")
django_cli_args.append("--dry-run")
from django.core.management import execute_from_command_line
try:
log.debug("Django CLI Args", args=django_cli_args)
execute_from_command_line(django_cli_args)
returncode = 0
except SystemExit as e:
returncode = e.message
if returncode != 0:
if args.check:
log.error("Migrations are not up to date with the service changes!")
else:
log.error("An error occurred")
sys.exit(returncode)
# copying migrations back to the service
for service in args.service_names:
if service == "core":
# we don't need to copy migrations for the core
continue
else:
service_dir = os.path.join(service_base_dir, service)
service_name = get_service_name_from_config(service_dir)
copy_service_migrations(service_dir, service_dest_dir, service_name)
# restore orginal sys.path
sys.path = original_sys_path
| 33.348601
| 119
| 0.654815
|
ca9cc5293211043fd6fd80e0c29c22f6b295a48f
| 91
|
py
|
Python
|
gwkenProject/gwkenApp/apps.py
|
cs-fullstack-2019-spring/django-formclassv2-cw-rsalcido
|
13877b7dd1a76bd7c81744ad525d2ef1abe0a328
|
[
"Apache-2.0"
] | null | null | null |
gwkenProject/gwkenApp/apps.py
|
cs-fullstack-2019-spring/django-formclassv2-cw-rsalcido
|
13877b7dd1a76bd7c81744ad525d2ef1abe0a328
|
[
"Apache-2.0"
] | null | null | null |
gwkenProject/gwkenApp/apps.py
|
cs-fullstack-2019-spring/django-formclassv2-cw-rsalcido
|
13877b7dd1a76bd7c81744ad525d2ef1abe0a328
|
[
"Apache-2.0"
] | null | null | null |
from django.apps import AppConfig
class GwkenappConfig(AppConfig):
name = 'gwkenApp'
| 15.166667
| 33
| 0.758242
|
04c0fb21659a8a103feb46aacf68524153911d0e
| 727
|
py
|
Python
|
spinup/pddm_envs/__init__.py
|
RamiSketcher/spinningupMPAC
|
543a3587ef88cd098ec2f060f183d0f9969e92e3
|
[
"MIT"
] | null | null | null |
spinup/pddm_envs/__init__.py
|
RamiSketcher/spinningupMPAC
|
543a3587ef88cd098ec2f060f183d0f9969e92e3
|
[
"MIT"
] | null | null | null |
spinup/pddm_envs/__init__.py
|
RamiSketcher/spinningupMPAC
|
543a3587ef88cd098ec2f060f183d0f9969e92e3
|
[
"MIT"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import spinup.pddm_envs.cheetah
import spinup.pddm_envs.ant
import spinup.pddm_envs.dclaw
import spinup.pddm_envs.baoding
import spinup.pddm_envs.cube
| 36.35
| 74
| 0.782669
|
f487772bd46dc8b11c990781ca97fe4b9b4104ae
| 652
|
py
|
Python
|
airbyte-integrations/connectors/source-helium-account-hotspots/setup.py
|
afredojala/airbyte
|
b5e713e0443180d1e4a6a81b0b4baa08e1cec67f
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-helium-account-hotspots/setup.py
|
afredojala/airbyte
|
b5e713e0443180d1e4a6a81b0b4baa08e1cec67f
|
[
"MIT"
] | null | null | null |
airbyte-integrations/connectors/source-helium-account-hotspots/setup.py
|
afredojala/airbyte
|
b5e713e0443180d1e4a6a81b0b4baa08e1cec67f
|
[
"MIT"
] | null | null | null |
#
# Copyright (c) 2021 Airbyte, Inc., all rights reserved.
#
from setuptools import find_packages, setup
MAIN_REQUIREMENTS = [
"airbyte-cdk~=0.1",
]
TEST_REQUIREMENTS = [
"pytest~=6.1",
"pytest-mock~=3.6.1",
"source-acceptance-test",
]
setup(
name="source_helium_account_hotspots",
description="Source implementation for Helium Account Hotspots.",
author="Airbyte",
author_email="contact@airbyte.io",
packages=find_packages(),
install_requires=MAIN_REQUIREMENTS,
package_data={"": ["*.json", "schemas/*.json", "schemas/shared/*.json"]},
extras_require={
"tests": TEST_REQUIREMENTS,
},
)
| 21.733333
| 77
| 0.667178
|
d222adb0526d85207f77db9545cf70792d5cee9e
| 5,435
|
py
|
Python
|
meeting/gate/migrations/0001_initial.py
|
mauler/meeting-gate
|
75e66834e4090da09a71d93e166d030daf438f21
|
[
"MIT"
] | 1
|
2018-01-17T13:03:08.000Z
|
2018-01-17T13:03:08.000Z
|
meeting/gate/migrations/0001_initial.py
|
mauler/meeting-gate
|
75e66834e4090da09a71d93e166d030daf438f21
|
[
"MIT"
] | null | null | null |
meeting/gate/migrations/0001_initial.py
|
mauler/meeting-gate
|
75e66834e4090da09a71d93e166d030daf438f21
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.8 on 2017-12-24 17:04
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Wristband',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_on', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('modified_on', models.DateTimeField(auto_now=True, verbose_name='Modificado em ')),
('wristband_code', models.CharField(blank=True, db_index=True, max_length=10, null=True, unique=True, verbose_name='Pulseira')),
('entry_on', models.DateTimeField(null=True, verbose_name='Entrou em')),
('wallet_id', models.PositiveIntegerField(blank=True, null=True, verbose_name='ID Carteira')),
('shop_created_on', models.DateTimeField(blank=True, null=True, verbose_name='Comprado em')),
],
options={
'verbose_name': 'Pulseira',
'verbose_name_plural': 'Pulseiras',
'ordering': ('-entry_on', 'shop_created_on'),
},
),
migrations.CreateModel(
name='LocalTicket',
fields=[
('wristband_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='gate.Wristband')),
],
options={
'verbose_name': 'Ticket Local',
'verbose_name_plural': 'Tickets Locais',
'ordering': ('-entry_on',),
},
bases=('gate.wristband',),
),
migrations.CreateModel(
name='QRCode',
fields=[
('wristband_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='gate.Wristband')),
('uuid', models.CharField(blank=True, db_index=True, editable=False, max_length=36, null=True, unique=True, verbose_name='QRCode')),
('qrcode_requires_identification', models.BooleanField(default=False)),
],
options={
'ordering': ('created_on',),
'abstract': False,
},
bases=('gate.wristband', models.Model),
),
migrations.CreateModel(
name='GuestTicket',
fields=[
('qrcode_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='gate.QRCode')),
('person_name', models.CharField(blank=True, max_length=100, verbose_name='Nome da Pessoa')),
('person_document', models.CharField(blank=True, max_length=14, verbose_name='Documento da Pessoa')),
('list_name', models.CharField(max_length=100, verbose_name='Nome da Lista de Convidados')),
],
options={
'verbose_name': 'Ticket de Convidado',
'verbose_name_plural': 'Tickets de Convidados',
'ordering': ('-entry_on', 'list_name'),
},
bases=('gate.qrcode', models.Model),
),
migrations.CreateModel(
name='PaperTicket',
fields=[
('qrcode_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='gate.QRCode')),
('batch_name', models.CharField(max_length=100, verbose_name='Nome do Lote')),
('batch_line', models.PositiveIntegerField(verbose_name='Linha do Lote')),
],
options={
'verbose_name': 'Ticket de Papel',
'verbose_name_plural': 'Tickets de Papel',
'ordering': ('-entry_on', 'shop_created_on'),
},
bases=('gate.qrcode',),
),
migrations.CreateModel(
name='WebTicket',
fields=[
('qrcode_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='gate.QRCode')),
('person_name', models.CharField(blank=True, max_length=100, verbose_name='Nome da Pessoa')),
('person_document', models.CharField(blank=True, max_length=14, verbose_name='Documento da Pessoa')),
('product_id', models.PositiveIntegerField(blank=True, null=True, verbose_name='ID do Produto')),
('product_name', models.CharField(max_length=100, verbose_name='Nome do Produto')),
('buyer_name', models.CharField(max_length=100, verbose_name='Nome do Comprador')),
('buyer_email', models.EmailField(max_length=254, verbose_name='Email do Comprador')),
],
options={
'verbose_name': 'Ticket Web',
'verbose_name_plural': 'Tickets Web',
'ordering': ('-entry_on', 'shop_created_on'),
},
bases=('gate.qrcode', models.Model),
),
]
| 50.794393
| 194
| 0.588224
|
85760511a16164d9673734ae07fe6697794a9930
| 2,403
|
py
|
Python
|
Advanced workshops/reInvent2019-400/src/markov/rewards/reward_function.py
|
espittle/aws-deepracer-workshops
|
aa3679f98d83fac7239e939ad593ca2876559519
|
[
"MIT-0"
] | 1,116
|
2018-11-28T19:50:34.000Z
|
2022-03-31T08:24:16.000Z
|
Advanced workshops/reInvent2019-400/src/markov/rewards/reward_function.py
|
espittle/aws-deepracer-workshops
|
aa3679f98d83fac7239e939ad593ca2876559519
|
[
"MIT-0"
] | 31
|
2018-11-29T20:34:11.000Z
|
2021-09-23T16:48:44.000Z
|
Advanced workshops/reInvent2019-400/src/markov/rewards/reward_function.py
|
espittle/aws-deepracer-workshops
|
aa3679f98d83fac7239e939ad593ca2876559519
|
[
"MIT-0"
] | 619
|
2018-11-28T19:04:02.000Z
|
2022-03-21T13:42:21.000Z
|
""" Instead of the bubble penalty, we consider both the
distance between the learner car and the closest bot car,
as well as if the learner car is within the wedge area
apexed at the bot car. The assumption is within the wedge
is more possible to crash. """
def reward_function(params):
reward = 1e-3
distance_from_center = params['distance_from_center']
track_width = params['track_width']
bot_car_progress_delta = params['bot_car_progress']
bot_car_lane_match = params['bot_car_lane_match']
speed = params['speed']
steering = abs(params['steering_angle'])
flag_unsafe = params['flag_unsafe']
dist_closest_bot_car = params['dist_closest_bot']
is_bot_in_camera = params['is_bot_in_camera']
reward = 1e-3 # likely crashed / close to off track
# # wide centerline
# marker_1 = 0.4 * track_width
# if distance_from_center <= marker_1:
# reward = 1
if distance_from_center <= (0.3 * track_width):
reward_lane = 1.0
if distance_from_center <= (0.2 * track_width):
# geting close to the center
reward_lane *= 0.8
elif distance_from_center <= (0.1 * track_width):
# getting closer
reward_lane *= 0.2
elif distance_from_center <= (0.05 * track_width):
# too close
reward_lane = 1e-3
else:
reward_lane = 1e-3
# avoid closest bot car
reward_avoid = 1.0
# penalize if distance too close
if 0.8 <= dist_closest_bot_car < 1.0 and flag_unsafe:
reward_avoid *= 0.8
elif 0.5 <= dist_closest_bot_car < 0.8 and flag_unsafe:
reward_avoid *= 0.5
elif 0.3 < dist_closest_bot_car < 0.5 and flag_unsafe:
reward_avoid *= 0.01
# on the different lane of the closest ahead bot car
if bot_car_lane_match and is_bot_in_camera:
reward_lane = 0.0
else:
reward_lane = 1.0
reward =+ 2.0*reward_avoid + 2.0*reward_lane
# speed penalty
if speed < 3.0:
reward *= 0.5
# # don't wiggle as much
# # Steering penality threshold, change the number based on your action space setting
# ABS_STEERING_THRESHOLD = 15
# # Penalize reward if the agent is steering too much
# if steering > ABS_STEERING_THRESHOLD:
# reward *= 0.8
return float(reward)
| 32.917808
| 89
| 0.632127
|
d77e44a77ea33c16a428165aaccfea7a86e20259
| 1,696
|
py
|
Python
|
examples/synthetic/tension/eval_speed.py
|
Argonne-National-Laboratory/pyoptmat
|
a6e5e8d0b93c77374d4ccbc65a86262eec5df77b
|
[
"MIT"
] | null | null | null |
examples/synthetic/tension/eval_speed.py
|
Argonne-National-Laboratory/pyoptmat
|
a6e5e8d0b93c77374d4ccbc65a86262eec5df77b
|
[
"MIT"
] | 1
|
2022-03-30T22:20:38.000Z
|
2022-03-31T15:02:22.000Z
|
examples/synthetic/tension/eval_speed.py
|
Argonne-National-Laboratory/pyoptmat
|
a6e5e8d0b93c77374d4ccbc65a86262eec5df77b
|
[
"MIT"
] | 2
|
2021-11-16T15:13:54.000Z
|
2022-01-06T21:35:42.000Z
|
#!/usr/bin/env python3
import sys
sys.path.append('../../..')
sys.path.append('.')
import numpy as np
import numpy.random as ra
import xarray as xr
import torch
from maker import make_model, load_data, sf
from pyoptmat import optimize
from tqdm import tqdm
import matplotlib.pyplot as plt
import time
import warnings
warnings.filterwarnings("ignore")
# Use doubles
torch.set_default_tensor_type(torch.DoubleTensor)
# Run on GPU!
if torch.cuda.is_available():
dev = "cuda:0"
else:
dev = "cpu"
# Run on CPU (home machine GPU is eh)
# On that machine 2 threads is optimal
dev = "cpu"
device = torch.device(dev)
# Don't try to optimize for the Young's modulus
def make(n, eta, s0, R, d, **kwargs):
return make_model(torch.tensor(0.5), n, eta, s0, R, d, use_adjoint = True,
device = device, **kwargs).to(device)
if __name__ == "__main__":
# 1) Load the data for the variance of interest,
# cut down to some number of samples, and flatten
scale = 0.05
nsamples = 20 # at each strain rate
times, strains, temps, true_stresses = load_data(scale, nsamples, device = device)
# 2) Setup names for each parameter and the initial conditions
names = ["n", "eta", "s0", "R", "d"]
ics = [ra.uniform(0,1) for i in range(len(names))]
# 3) Create the actual model
model = optimize.DeterministicModel(make, names, ics)
# 4) Run some number of times
loss = torch.nn.MSELoss(reduction = 'sum')
niter = 2
t1 = time.time()
t = tqdm(range(niter), total = niter)
for i in t:
model.zero_grad()
pred = model(times, strains, temps)
lossv = loss(pred, true_stresses)
lossv.backward()
te = time.time() - t1
print("Elapsed time: %f s" % te)
| 24.228571
| 84
| 0.680425
|
4b20554a9a60a2e58f01260faddea52b742415e1
| 20,422
|
py
|
Python
|
test/functional/test_framework/test_framework.py
|
leviathan-help/LVT-MasternodeCoin
|
201329113c9e7ab79c193abe262bdfba6b540885
|
[
"MIT"
] | 1
|
2019-11-13T09:17:16.000Z
|
2019-11-13T09:17:16.000Z
|
test/functional/test_framework/test_framework.py
|
leviathan-help/LVT-MasternodeCoin
|
201329113c9e7ab79c193abe262bdfba6b540885
|
[
"MIT"
] | null | null | null |
test/functional/test_framework/test_framework.py
|
leviathan-help/LVT-MasternodeCoin
|
201329113c9e7ab79c193abe262bdfba6b540885
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2014-2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Base class for RPC testing."""
from enum import Enum
import logging
import optparse
import os
import pdb
import shutil
import sys
import tempfile
import time
from .authproxy import JSONRPCException
from . import coverage
from .test_node import TestNode
from .util import (
MAX_NODES,
PortSeed,
assert_equal,
check_json_precision,
connect_nodes_bi,
disconnect_nodes,
get_datadir_path,
initialize_datadir,
p2p_port,
set_node_times,
sync_blocks,
sync_mempools,
)
class TestStatus(Enum):
PASSED = 1
FAILED = 2
SKIPPED = 3
TEST_EXIT_PASSED = 0
TEST_EXIT_FAILED = 1
TEST_EXIT_SKIPPED = 77
class BitcoinTestFramework():
"""Base class for a leviathan test script.
Individual leviathan test scripts should subclass this class and override the set_test_params() and run_test() methods.
Individual tests can also override the following methods to customize the test setup:
- add_options()
- setup_chain()
- setup_network()
- setup_nodes()
The __init__() and main() methods should not be overridden.
This class also contains various public and private helper methods."""
def __init__(self):
"""Sets test framework defaults. Do not override this method. Instead, override the set_test_params() method"""
self.setup_clean_chain = False
self.nodes = []
self.mocktime = 0
self.supports_cli = False
self.set_test_params()
assert hasattr(self, "num_nodes"), "Test must set self.num_nodes in set_test_params()"
def main(self):
"""Main function. This should not be overridden by the subclass test scripts."""
parser = optparse.OptionParser(usage="%prog [options]")
parser.add_option("--nocleanup", dest="nocleanup", default=False, action="store_true",
help="Leave leviathands and test.* datadir on exit or error")
parser.add_option("--noshutdown", dest="noshutdown", default=False, action="store_true",
help="Don't stop leviathands after the test execution")
parser.add_option("--srcdir", dest="srcdir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__))+"/../../../src"),
help="Source directory containing leviathand/leviathan-cli (default: %default)")
parser.add_option("--cachedir", dest="cachedir", default=os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../../cache"),
help="Directory for caching pregenerated datadirs")
parser.add_option("--tmpdir", dest="tmpdir", help="Root directory for datadirs")
parser.add_option("-l", "--loglevel", dest="loglevel", default="INFO",
help="log events at this level and higher to the console. Can be set to DEBUG, INFO, WARNING, ERROR or CRITICAL. Passing --loglevel DEBUG will output all logs to console. Note that logs at all levels are always written to the test_framework.log file in the temporary test directory.")
parser.add_option("--tracerpc", dest="trace_rpc", default=False, action="store_true",
help="Print out all RPC calls as they are made")
parser.add_option("--portseed", dest="port_seed", default=os.getpid(), type='int',
help="The seed to use for assigning port numbers (default: current process id)")
parser.add_option("--coveragedir", dest="coveragedir",
help="Write tested RPC commands into this directory")
parser.add_option("--configfile", dest="configfile",
help="Location of the test framework config file")
parser.add_option("--pdbonfailure", dest="pdbonfailure", default=False, action="store_true",
help="Attach a python debugger if test fails")
parser.add_option("--usecli", dest="usecli", default=False, action="store_true",
help="use bitcoin-cli instead of RPC for all commands")
self.add_options(parser)
(self.options, self.args) = parser.parse_args()
PortSeed.n = self.options.port_seed
os.environ['PATH'] = self.options.srcdir + ":" + self.options.srcdir + "/qt:" + os.environ['PATH']
check_json_precision()
self.options.cachedir = os.path.abspath(self.options.cachedir)
# Set up temp directory and start logging
if self.options.tmpdir:
self.options.tmpdir = os.path.abspath(self.options.tmpdir)
os.makedirs(self.options.tmpdir, exist_ok=False)
else:
self.options.tmpdir = tempfile.mkdtemp(prefix="test")
self._start_logging()
success = TestStatus.FAILED
try:
if self.options.usecli and not self.supports_cli:
raise SkipTest("--usecli specified but test does not support using CLI")
self.setup_chain()
self.setup_network()
time.sleep(5)
self.run_test()
success = TestStatus.PASSED
except JSONRPCException as e:
self.log.exception("JSONRPC error")
except SkipTest as e:
self.log.warning("Test Skipped: %s" % e.message)
success = TestStatus.SKIPPED
except AssertionError as e:
self.log.exception("Assertion failed")
except KeyError as e:
self.log.exception("Key error")
except Exception as e:
self.log.exception("Unexpected exception caught during testing")
except KeyboardInterrupt as e:
self.log.warning("Exiting after keyboard interrupt")
if success == TestStatus.FAILED and self.options.pdbonfailure:
print("Testcase failed. Attaching python debugger. Enter ? for help")
pdb.set_trace()
if not self.options.noshutdown:
self.log.info("Stopping nodes")
if self.nodes:
self.stop_nodes()
else:
for node in self.nodes:
node.cleanup_on_exit = False
self.log.info("Note: leviathands were not stopped and may still be running")
if not self.options.nocleanup and not self.options.noshutdown and success != TestStatus.FAILED:
self.log.info("Cleaning up")
shutil.rmtree(self.options.tmpdir)
else:
self.log.warning("Not cleaning up dir %s" % self.options.tmpdir)
if success == TestStatus.PASSED:
self.log.info("Tests successful")
exit_code = TEST_EXIT_PASSED
elif success == TestStatus.SKIPPED:
self.log.info("Test skipped")
exit_code = TEST_EXIT_SKIPPED
else:
self.log.error("Test failed. Test logging available at %s/test_framework.log", self.options.tmpdir)
self.log.error("Hint: Call {} '{}' to consolidate all logs".format(os.path.normpath(os.path.dirname(os.path.realpath(__file__)) + "/../combine_logs.py"), self.options.tmpdir))
exit_code = TEST_EXIT_FAILED
logging.shutdown()
sys.exit(exit_code)
# Methods to override in subclass test scripts.
def set_test_params(self):
"""Tests must this method to change default values for number of nodes, topology, etc"""
raise NotImplementedError
def add_options(self, parser):
"""Override this method to add command-line options to the test"""
pass
def setup_chain(self):
"""Override this method to customize blockchain setup"""
self.log.info("Initializing test directory " + self.options.tmpdir)
if self.setup_clean_chain:
self._initialize_chain_clean()
else:
self._initialize_chain()
def setup_network(self):
"""Override this method to customize test network topology"""
self.setup_nodes()
# Connect the nodes as a "chain". This allows us
# to split the network between nodes 1 and 2 to get
# two halves that can work on competing chains.
for i in range(self.num_nodes - 1):
connect_nodes_bi(self.nodes, i, i + 1)
self.sync_all()
def setup_nodes(self):
"""Override this method to customize test node setup"""
extra_args = None
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args)
self.start_nodes()
def run_test(self):
"""Tests must override this method to define test logic"""
raise NotImplementedError
# Public helper methods. These can be accessed by the subclass test scripts.
def add_nodes(self, num_nodes, extra_args=None, rpchost=None, timewait=None, binary=None):
"""Instantiate TestNode objects"""
if extra_args is None:
extra_args = [[]] * num_nodes
if binary is None:
binary = [None] * num_nodes
assert_equal(len(extra_args), num_nodes)
assert_equal(len(binary), num_nodes)
for i in range(num_nodes):
self.nodes.append(TestNode(i, self.options.tmpdir, extra_args[i], rpchost, timewait=timewait, binary=binary[i], stderr=None, mocktime=self.mocktime, coverage_dir=self.options.coveragedir, use_cli=self.options.usecli))
def start_node(self, i, *args, **kwargs):
"""Start a leviathand"""
node = self.nodes[i]
node.start(*args, **kwargs)
node.wait_for_rpc_connection()
time.sleep(10)
if self.options.coveragedir is not None:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def start_nodes(self, extra_args=None, *args, **kwargs):
"""Start multiple leviathands"""
if extra_args is None:
extra_args = [None] * self.num_nodes
assert_equal(len(extra_args), self.num_nodes)
try:
for i, node in enumerate(self.nodes):
node.start(extra_args[i], *args, **kwargs)
for node in self.nodes:
node.wait_for_rpc_connection()
except:
# If one node failed to start, stop the others
self.stop_nodes()
raise
time.sleep(10)
if self.options.coveragedir is not None:
for node in self.nodes:
coverage.write_all_rpc_commands(self.options.coveragedir, node.rpc)
def stop_node(self, i):
"""Stop a leviathand test node"""
self.nodes[i].stop_node()
self.nodes[i].wait_until_stopped()
def stop_nodes(self):
"""Stop multiple leviathand test nodes"""
for node in self.nodes:
# Issue RPC to stop nodes
node.stop_node()
for node in self.nodes:
# Wait for nodes to stop
time.sleep(5)
node.wait_until_stopped()
def restart_node(self, i, extra_args=None):
"""Stop and start a test node"""
self.stop_node(i)
self.start_node(i, extra_args)
def assert_start_raises_init_error(self, i, extra_args=None, expected_msg=None, *args, **kwargs):
with tempfile.SpooledTemporaryFile(max_size=2**16) as log_stderr:
try:
self.start_node(i, extra_args, stderr=log_stderr, *args, **kwargs)
self.stop_node(i)
except Exception as e:
assert 'leviathand exited' in str(e) # node must have shutdown
self.nodes[i].running = False
self.nodes[i].process = None
if expected_msg is not None:
log_stderr.seek(0)
stderr = log_stderr.read().decode('utf-8')
if expected_msg not in stderr:
raise AssertionError("Expected error \"" + expected_msg + "\" not found in:\n" + stderr)
else:
if expected_msg is None:
assert_msg = "leviathand should have exited with an error"
else:
assert_msg = "leviathand should have exited with expected error " + expected_msg
raise AssertionError(assert_msg)
def wait_for_node_exit(self, i, timeout):
self.nodes[i].process.wait(timeout)
def split_network(self):
"""
Split the network of four nodes into nodes 0/1 and 2/3.
"""
disconnect_nodes(self.nodes[1], 2)
disconnect_nodes(self.nodes[2], 1)
self.sync_all([self.nodes[:2], self.nodes[2:]])
def join_network(self):
"""
Join the (previously split) network halves together.
"""
connect_nodes_bi(self.nodes, 1, 2)
self.sync_all()
def sync_all(self, node_groups=None):
if not node_groups:
node_groups = [self.nodes]
for group in node_groups:
sync_blocks(group)
sync_mempools(group)
def enable_mocktime(self):
"""Enable mocktime for the script.
mocktime may be needed for scripts that use the cached version of the
blockchain. If the cached version of the blockchain is used without
mocktime then the mempools will not sync due to IBD.
For backwared compatibility of the python scripts with previous
versions of the cache, this helper function sets mocktime to Jan 1,
2014 + (201 * 10 * 60)"""
self.mocktime = 1454124732 + (201 * 10 * 60)
def disable_mocktime(self):
self.mocktime = 0
# Private helper methods. These should not be accessed by the subclass test scripts.
def _start_logging(self):
# Add logger and logging handlers
self.log = logging.getLogger('TestFramework')
self.log.setLevel(logging.DEBUG)
# Create file handler to log all messages
fh = logging.FileHandler(self.options.tmpdir + '/test_framework.log')
fh.setLevel(logging.DEBUG)
# Create console handler to log messages to stderr. By default this logs only error messages, but can be configured with --loglevel.
ch = logging.StreamHandler(sys.stdout)
# User can provide log level as a number or string (eg DEBUG). loglevel was caught as a string, so try to convert it to an int
ll = int(self.options.loglevel) if self.options.loglevel.isdigit() else self.options.loglevel.upper()
ch.setLevel(ll)
# Format logs the same as leviathand's debug.log with microprecision (so log files can be concatenated and sorted)
formatter = logging.Formatter(fmt='%(asctime)s.%(msecs)03d000 %(name)s (%(levelname)s): %(message)s', datefmt='%Y-%m-%d %H:%M:%S')
formatter.converter = time.gmtime
fh.setFormatter(formatter)
ch.setFormatter(formatter)
# add the handlers to the logger
self.log.addHandler(fh)
self.log.addHandler(ch)
if self.options.trace_rpc:
rpc_logger = logging.getLogger("BitcoinRPC")
rpc_logger.setLevel(logging.DEBUG)
rpc_handler = logging.StreamHandler(sys.stdout)
rpc_handler.setLevel(logging.DEBUG)
rpc_logger.addHandler(rpc_handler)
def _initialize_chain(self):
"""Initialize a pre-mined blockchain for use by the test.
Create a cache of a 200-block-long chain (with wallet) for MAX_NODES
Afterward, create num_nodes copies from the cache."""
assert self.num_nodes <= MAX_NODES
create_cache = False
for i in range(MAX_NODES):
if not os.path.isdir(get_datadir_path(self.options.cachedir, i)):
create_cache = True
break
if create_cache:
self.log.debug("Creating data directories from cached datadir")
# find and delete old cache directories if any exist
for i in range(MAX_NODES):
if os.path.isdir(get_datadir_path(self.options.cachedir, i)):
shutil.rmtree(get_datadir_path(self.options.cachedir, i))
# Create cache directories, run bitcoinds:
for i in range(MAX_NODES):
datadir = initialize_datadir(self.options.cachedir, i)
args = [os.getenv("BITCOIND", "leviathand"), "-spendzeroconfchange=1", "-server", "-keypool=1", "-datadir=" + datadir, "-discover=0"]
if i > 0:
args.append("-connect=127.0.0.1:" + str(p2p_port(0)))
self.nodes.append(TestNode(i, self.options.cachedir, extra_args=[], rpchost=None, timewait=None, binary=None, stderr=None, mocktime=self.mocktime, coverage_dir=None))
self.nodes[i].args = args
self.start_node(i)
# Wait for RPC connections to be ready
for node in self.nodes:
node.wait_for_rpc_connection()
# Create a 200-block-long chain; each of the 4 first nodes
# gets 25 mature blocks and 25 immature.
# Note: To preserve compatibility with older versions of
# initialize_chain, only 4 nodes will generate coins.
#
# blocks are created with timestamps 10 minutes apart
# starting from 2010 minutes in the past
self.enable_mocktime()
block_time = self.mocktime - (201 * 60)
for i in range(2):
for peer in range(4):
for j in range(25):
set_node_times(self.nodes, block_time)
self.nodes[peer].generate(1)
block_time += 60
# Must sync before next peer starts generating blocks
sync_blocks(self.nodes)
# Shut them down, and clean up cache directories:
self.stop_nodes()
self.nodes = []
self.disable_mocktime()
def cache_path(n, *paths):
return os.path.join(get_datadir_path(self.options.cachedir, n), "regtest", *paths)
for i in range(MAX_NODES):
for entry in os.listdir(cache_path(i)):
if entry not in ['wallet.dat', 'chainstate', 'blocks', 'sporks', 'zerocoin', 'backups']:
os.remove(cache_path(i, entry))
for i in range(self.num_nodes):
from_dir = get_datadir_path(self.options.cachedir, i)
to_dir = get_datadir_path(self.options.tmpdir, i)
shutil.copytree(from_dir, to_dir)
initialize_datadir(self.options.tmpdir, i) # Overwrite port/rpcport in bitcoin.conf
def _initialize_chain_clean(self):
"""Initialize empty blockchain for use by the test.
Create an empty blockchain and num_nodes wallets.
Useful if a test case wants complete control over initialization."""
for i in range(self.num_nodes):
initialize_datadir(self.options.tmpdir, i)
class ComparisonTestFramework(BitcoinTestFramework):
"""Test framework for doing p2p comparison testing
Sets up some leviathand binaries:
- 1 binary: test binary
- 2 binaries: 1 test binary, 1 ref binary
- n>2 binaries: 1 test binary, n-1 ref binaries"""
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
def add_options(self, parser):
parser.add_option("--testbinary", dest="testbinary",
default=os.getenv("BITCOIND", "leviathand"),
help="leviathand binary to test")
parser.add_option("--refbinary", dest="refbinary",
default=os.getenv("BITCOIND", "leviathand"),
help="leviathand binary to use for reference nodes (if any)")
def setup_network(self):
extra_args = [['-whitelist=127.0.0.1']] * self.num_nodes
if hasattr(self, "extra_args"):
extra_args = self.extra_args
self.add_nodes(self.num_nodes, extra_args,
binary=[self.options.testbinary] +
[self.options.refbinary] * (self.num_nodes - 1))
self.start_nodes()
class SkipTest(Exception):
"""This exception is raised to skip a test"""
def __init__(self, message):
self.message = message
| 41.934292
| 310
| 0.621438
|
e1bda1d6cae40bc99636884978a3ffbe3a868d22
| 970
|
py
|
Python
|
grr/server/data_server/constants.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | 5
|
2017-03-17T08:25:09.000Z
|
2022-02-22T05:28:14.000Z
|
grr/server/data_server/constants.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | null | null | null |
grr/server/data_server/constants.py
|
mikecb/grr
|
52fdd977729af2a09a147301c55b8b7f1eccfa67
|
[
"Apache-2.0"
] | 3
|
2018-12-07T07:04:37.000Z
|
2022-02-22T05:28:16.000Z
|
#!/usr/bin/env python
"""Constants for the data servers."""
# Range for consistent hashing.
MAX_RANGE = 2**64
# Important file names for rebalancing.
REBALANCE_DIRECTORY = ".GRR_REBALANCE"
TRANSACTION_FILENAME = ".TRANSACTION"
REMOVE_FILENAME = ".TRANSACTION_REMOVE"
# HTTP status codes.
RESPONSE_OK = 200
RESPONSE_SERVER_NOT_AUTHORIZED = 401
RESPONSE_SERVER_NOT_ALLOWED = 403
RESPONSE_SERVER_NOT_REGISTERED = 401
RESPONSE_NOT_MASTER_SERVER = 404
RESPONSE_NOT_A_CLIENT = 400
RESPONSE_MAPPING_NOT_FOUND = 404
RESPONSE_NOT_FOUND = 404
RESPONSE_MASTER_IS_REBALANCING = 401
RESPONSE_DATA_SERVERS_UNREACHABLE = 400
RESPONSE_FILE_NOT_SAVED = 500
RESPONSE_WRONG_TRANSACTION = 401
RESPONSE_FILES_NOT_COPIED = 500
RESPONSE_NOT_COMMITED = 500
RESPONSE_FILES_NOT_MOVED = 500
RESPONSE_TRANSACTION_NOT_FOUND = 400
RESPONSE_EQUAL_DATA_SERVER = 401
RESPONSE_IS_MASTER_SERVER = 404
RESPONSE_INCOMPLETE_SYNC = 503
RESPONSE_DATA_SERVER_NOT_FOUND = 409
RESPONSE_RANGE_NOT_EMPTY = 402
| 26.944444
| 39
| 0.839175
|
598f57250a3c9b5c9bd8d27442c94a07e0e0e09e
| 8,979
|
py
|
Python
|
dist/ba_data/python/ba/macmusicapp.py
|
Bartixxx32/Bombsquad-Ballistica-Modded-Server
|
26d36f07a5b96702e4fbdf172c0d66671f1ee0bd
|
[
"MIT"
] | 317
|
2020-04-04T00:33:10.000Z
|
2022-03-28T01:07:09.000Z
|
assets/src/ba_data/python/ba/macmusicapp.py
|
Alshahriah/ballistica
|
326f6677a0118667e93ce9034849622ebef706fa
|
[
"MIT"
] | 315
|
2020-04-04T22:33:10.000Z
|
2022-03-31T22:50:02.000Z
|
assets/src/ba_data/python/ba/macmusicapp.py
|
Alshahriah/ballistica
|
326f6677a0118667e93ce9034849622ebef706fa
|
[
"MIT"
] | 97
|
2020-04-04T01:32:17.000Z
|
2022-03-16T19:02:59.000Z
|
# Released under the MIT License. See LICENSE for details.
#
"""Music playback functionality using the Mac Music (formerly iTunes) app."""
from __future__ import annotations
import threading
from typing import TYPE_CHECKING
import _ba
from ba._music import MusicPlayer
if TYPE_CHECKING:
from typing import Optional, Callable, Any
class MacMusicAppMusicPlayer(MusicPlayer):
"""A music-player that utilizes the macOS Music.app for playback.
Allows selecting playlists as entries.
"""
def __init__(self) -> None:
super().__init__()
self._thread = _MacMusicAppThread()
self._thread.start()
def on_select_entry(self, callback: Callable[[Any], None],
current_entry: Any, selection_target_name: str) -> Any:
# pylint: disable=cyclic-import
from bastd.ui.soundtrack import entrytypeselect as etsel
return etsel.SoundtrackEntryTypeSelectWindow(callback, current_entry,
selection_target_name)
def on_set_volume(self, volume: float) -> None:
self._thread.set_volume(volume)
def get_playlists(self, callback: Callable) -> None:
"""Asynchronously fetch the list of available iTunes playlists."""
self._thread.get_playlists(callback)
def on_play(self, entry: Any) -> None:
music = _ba.app.music
entry_type = music.get_soundtrack_entry_type(entry)
if entry_type == 'iTunesPlaylist':
self._thread.play_playlist(music.get_soundtrack_entry_name(entry))
else:
print('MacMusicAppMusicPlayer passed unrecognized entry type:',
entry_type)
def on_stop(self) -> None:
self._thread.play_playlist(None)
def on_app_shutdown(self) -> None:
self._thread.shutdown()
class _MacMusicAppThread(threading.Thread):
"""Thread which wrangles Music.app playback"""
def __init__(self) -> None:
super().__init__()
self._commands_available = threading.Event()
self._commands: list[list] = []
self._volume = 1.0
self._current_playlist: Optional[str] = None
self._orig_volume: Optional[int] = None
def run(self) -> None:
"""Run the Music.app thread."""
from ba._general import Call
from ba._language import Lstr
from ba._generated.enums import TimeType
_ba.set_thread_name('BA_MacMusicAppThread')
_ba.mac_music_app_init()
# Let's mention to the user we're launching Music.app in case
# it causes any funny business (this used to background the app
# sometimes, though I think that is fixed now)
def do_print() -> None:
_ba.timer(1.0,
Call(_ba.screenmessage, Lstr(resource='usingItunesText'),
(0, 1, 0)),
timetype=TimeType.REAL)
_ba.pushcall(do_print, from_other_thread=True)
# Here we grab this to force the actual launch.
_ba.mac_music_app_get_volume()
_ba.mac_music_app_get_library_source()
done = False
while not done:
self._commands_available.wait()
self._commands_available.clear()
# We're not protecting this list with a mutex but we're
# just using it as a simple queue so it should be fine.
while self._commands:
cmd = self._commands.pop(0)
if cmd[0] == 'DIE':
self._handle_die_command()
done = True
break
if cmd[0] == 'PLAY':
self._handle_play_command(target=cmd[1])
elif cmd[0] == 'GET_PLAYLISTS':
self._handle_get_playlists_command(target=cmd[1])
del cmd # Allows the command data/callback/etc to be freed.
def set_volume(self, volume: float) -> None:
"""Set volume to a value between 0 and 1."""
old_volume = self._volume
self._volume = volume
# If we've got nothing we're supposed to be playing,
# don't touch itunes/music.
if self._current_playlist is None:
return
# If volume is going to zero, stop actually playing
# but don't clear playlist.
if old_volume > 0.0 and volume == 0.0:
try:
assert self._orig_volume is not None
_ba.mac_music_app_stop()
_ba.mac_music_app_set_volume(self._orig_volume)
except Exception as exc:
print('Error stopping iTunes music:', exc)
elif self._volume > 0:
# If volume was zero, store pre-playing volume and start
# playing.
if old_volume == 0.0:
self._orig_volume = _ba.mac_music_app_get_volume()
self._update_mac_music_app_volume()
if old_volume == 0.0:
self._play_current_playlist()
def play_playlist(self, musictype: Optional[str]) -> None:
"""Play the given playlist."""
self._commands.append(['PLAY', musictype])
self._commands_available.set()
def shutdown(self) -> None:
"""Request that the player shuts down."""
self._commands.append(['DIE'])
self._commands_available.set()
self.join()
def get_playlists(self, callback: Callable[[Any], None]) -> None:
"""Request the list of playlists."""
self._commands.append(['GET_PLAYLISTS', callback])
self._commands_available.set()
def _handle_get_playlists_command(
self, target: Callable[[list[str]], None]) -> None:
from ba._general import Call
try:
playlists = _ba.mac_music_app_get_playlists()
playlists = [
p for p in playlists if p not in [
'Music', 'Movies', 'TV Shows', 'Podcasts', 'iTunes\xa0U',
'Books', 'Genius', 'iTunes DJ', 'Music Videos',
'Home Videos', 'Voice Memos', 'Audiobooks'
]
]
playlists.sort(key=lambda x: x.lower())
except Exception as exc:
print('Error getting iTunes playlists:', exc)
playlists = []
_ba.pushcall(Call(target, playlists), from_other_thread=True)
def _handle_play_command(self, target: Optional[str]) -> None:
if target is None:
if self._current_playlist is not None and self._volume > 0:
try:
assert self._orig_volume is not None
_ba.mac_music_app_stop()
_ba.mac_music_app_set_volume(self._orig_volume)
except Exception as exc:
print('Error stopping iTunes music:', exc)
self._current_playlist = None
else:
# If we've got something playing with positive
# volume, stop it.
if self._current_playlist is not None and self._volume > 0:
try:
assert self._orig_volume is not None
_ba.mac_music_app_stop()
_ba.mac_music_app_set_volume(self._orig_volume)
except Exception as exc:
print('Error stopping iTunes music:', exc)
# Set our playlist and play it if our volume is up.
self._current_playlist = target
if self._volume > 0:
self._orig_volume = (_ba.mac_music_app_get_volume())
self._update_mac_music_app_volume()
self._play_current_playlist()
def _handle_die_command(self) -> None:
# Only stop if we've actually played something
# (we don't want to kill music the user has playing).
if self._current_playlist is not None and self._volume > 0:
try:
assert self._orig_volume is not None
_ba.mac_music_app_stop()
_ba.mac_music_app_set_volume(self._orig_volume)
except Exception as exc:
print('Error stopping iTunes music:', exc)
def _play_current_playlist(self) -> None:
try:
from ba._general import Call
assert self._current_playlist is not None
if _ba.mac_music_app_play_playlist(self._current_playlist):
pass
else:
_ba.pushcall(Call(
_ba.screenmessage,
_ba.app.lang.get_resource('playlistNotFoundText') +
': \'' + self._current_playlist + '\'', (1, 0, 0)),
from_other_thread=True)
except Exception:
from ba import _error
_error.print_exception(
f'error playing playlist {self._current_playlist}')
def _update_mac_music_app_volume(self) -> None:
_ba.mac_music_app_set_volume(
max(0, min(100, int(100.0 * self._volume))))
| 38.536481
| 79
| 0.588484
|
ac3e0a83dc394f64c0099b7427feb164a99daf26
| 4,645
|
py
|
Python
|
librivox.py
|
dendisuhubdy/parallel_wavenet_vocoder
|
8f2bd7c0bd30cb90cc7ff8438ce78545c409227b
|
[
"MIT"
] | 155
|
2018-08-02T09:08:08.000Z
|
2022-01-03T22:14:52.000Z
|
librivox.py
|
popo0293/DTT_wavenet_pytorch
|
151c3f13ed62b133cc42bdfa25cf62c7bcf9e5a3
|
[
"MIT"
] | 1
|
2018-10-09T01:38:23.000Z
|
2018-10-09T03:46:05.000Z
|
librivox.py
|
popo0293/DTT_wavenet_pytorch
|
151c3f13ed62b133cc42bdfa25cf62c7bcf9e5a3
|
[
"MIT"
] | 34
|
2018-08-06T02:46:34.000Z
|
2021-03-15T02:18:20.000Z
|
from concurrent.futures import ProcessPoolExecutor
from functools import partial
import numpy as np
import os
import audio
from nnmnkwii import preprocessing as P
from hparams import hparams
from os.path import exists
import librosa
from wavenet_vocoder.util import is_mulaw_quantize, is_mulaw, is_raw
def build_from_path(in_dir, out_dir, num_workers=1, tqdm=lambda x: x):
executor = ProcessPoolExecutor(max_workers=num_workers)
futures = []
index = 1
# with open(os.path.join(in_dir, 'metadata.csv'), encoding='utf-8') as f:
# for line in f:
# parts = line.strip().split('|')
# wav_path = os.path.join(in_dir, 'wavs', '%s.wav' % parts[0])
# text = parts[2]
# futures.append(executor.submit(
# partial(_process_utterance, out_dir, index, wav_path, text)))
# index += 1
valid_ext = '.ogg .wav .mp3'.split()
for f in sorted(os.listdir(in_dir)):
valid = sum([f.endswith(ext) for ext in valid_ext])
if valid < 1:
continue
audio_filepath = os.path.join(in_dir, f)
text = audio_filepath # Not very informative
futures.append(executor.submit(
partial(_process_utterance, out_dir, index, audio_filepath, text)))
index += 1
return [tup for future in tqdm(futures) for tup in future.result()]
def _process_utterance(out_dir, index, audio_filepath, text):
# Load the audio to a numpy array:
wav_whole = audio.load_wav(audio_filepath)
if hparams.rescaling:
wav_whole = wav_whole / np.abs(wav_whole).max() * hparams.rescaling_max
# This is a librivox source, so the audio files are going to be v. long
# compared to a typical 'utterance' : So split the wav into chunks
tup_results = []
n_samples = int(8.0 * hparams.sample_rate) # All 8 second utterances
n_chunks = wav_whole.shape[0] // n_samples
for chunk_idx in range(n_chunks):
chunk_start, chunk_end = chunk_idx * n_samples, (chunk_idx + 1) * n_samples
if chunk_idx == n_chunks - 1: # This is the last chunk - allow it to extend to the end of the file
chunk_end = None
wav = wav_whole[chunk_start: chunk_end]
# Mu-law quantize
if is_mulaw_quantize(hparams.input_type):
# [0, quantize_channels)
out = P.mulaw_quantize(wav, hparams.quantize_channels)
# Trim silences
start, end = audio.start_and_end_indices(out, hparams.silence_threshold)
wav = wav[start:end]
out = out[start:end]
constant_values = P.mulaw_quantize(0, hparams.quantize_channels)
out_dtype = np.int16
elif is_mulaw(hparams.input_type):
# [-1, 1]
out = P.mulaw(wav, hparams.quantize_channels)
constant_values = P.mulaw(0.0, hparams.quantize_channels)
out_dtype = np.float32
else:
# [-1, 1]
out = wav
constant_values = 0.0
out_dtype = np.float32
# Compute a mel-scale spectrogram from the trimmed wav:
# (N, D)
mel_spectrogram = audio.melspectrogram(wav).astype(np.float32).T
# lws pads zeros internally before performing stft
# this is needed to adjust time resolution between audio and mel-spectrogram
l, r = audio.lws_pad_lr(wav, hparams.fft_size, audio.get_hop_size())
# zero pad for quantized signal
out = np.pad(out, (l, r), mode="constant", constant_values=constant_values)
N = mel_spectrogram.shape[0]
assert len(out) >= N * audio.get_hop_size()
# time resolution adjustment
# ensure length of raw audio is multiple of hop_size so that we can use
# transposed convolution to upsample
out = out[:N * audio.get_hop_size()]
assert len(out) % audio.get_hop_size() == 0
timesteps = len(out)
# Write the spectrograms to disk:
audio_filename = 'librivox-audio-%04d-%05d.npy' % (index, chunk_idx,)
mel_filename = 'librivox-mel-%04d-%05d.npy' % (index, chunk_idx,)
text_idx = '%s - %05d' % (text, chunk_idx,)
np.save(os.path.join(out_dir, audio_filename),
out.astype(out_dtype), allow_pickle=False)
np.save(os.path.join(out_dir, mel_filename),
mel_spectrogram.astype(np.float32), allow_pickle=False)
# Add results tuple describing this training example:
tup_results.append((audio_filename, mel_filename, timesteps, text_idx))
# Return all the audio results tuples (unpack in caller)
return tup_results
| 38.708333
| 107
| 0.640043
|
5bd5312f5de244f019439c181b150d5a8566bc78
| 104
|
py
|
Python
|
src/__init__.py
|
UrbanCCD-UChicago/sustainableSystems
|
d18b48cb161eb2bad9a4f200a4954444e4ec9ef1
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
UrbanCCD-UChicago/sustainableSystems
|
d18b48cb161eb2bad9a4f200a4954444e4ec9ef1
|
[
"MIT"
] | null | null | null |
src/__init__.py
|
UrbanCCD-UChicago/sustainableSystems
|
d18b48cb161eb2bad9a4f200a4954444e4ec9ef1
|
[
"MIT"
] | null | null | null |
#each sub directory of the package must be declared in this list
__name__ = 'src'
__all__ = ['utils']
| 20.8
| 64
| 0.721154
|
2939bf292702f0c2e295ff7de2581e547391a742
| 419
|
py
|
Python
|
bigpipe_response/javascript_dom_bind/react_dom_bind.py
|
shay-t/bigpipe-response
|
2a68dde1d7cfb3f837e1c108d45df1465607cd25
|
[
"MIT"
] | 13
|
2020-01-23T18:30:37.000Z
|
2020-02-14T19:05:28.000Z
|
bigpipe_response/javascript_dom_bind/react_dom_bind.py
|
shay-t/bigpipe-response
|
2a68dde1d7cfb3f837e1c108d45df1465607cd25
|
[
"MIT"
] | 3
|
2022-02-14T19:39:36.000Z
|
2022-02-27T20:26:05.000Z
|
bigpipe_response/javascript_dom_bind/react_dom_bind.py
|
shay-t/bigpipe-response
|
2a68dde1d7cfb3f837e1c108d45df1465607cd25
|
[
"MIT"
] | 1
|
2021-12-20T14:47:18.000Z
|
2021-12-20T14:47:18.000Z
|
import json
from bigpipe_response.javascript_dom_bind.javascript_dom_bind import JavascriptDOMBind
class ReactDomBind(JavascriptDOMBind):
def generate_bind_command(self, render_source: str, render_context: dict, target_element: str):
return '\nReactDOM.render(React.createElement({},\n {}),\n document.getElementById(\'{}\'));'.format(render_source, json.dumps(render_context), target_element)
| 41.9
| 168
| 0.770883
|
1e983ca455f3c5fe0e702f7ca9a4779a09626b40
| 67
|
py
|
Python
|
tests/__init__.py
|
praveenbommalibits/JSONManipulator
|
51c6d9ad916d07d96643599ec05faa1abd6da82b
|
[
"MIT"
] | 2
|
2020-07-21T08:03:41.000Z
|
2020-09-27T18:19:06.000Z
|
tests/__init__.py
|
praveenbommalibits/JSONManipulator
|
51c6d9ad916d07d96643599ec05faa1abd6da82b
|
[
"MIT"
] | 8
|
2020-09-05T11:47:26.000Z
|
2020-10-19T21:25:12.000Z
|
tests/__init__.py
|
praveenbommalibits/JSONManipulator
|
51c6d9ad916d07d96643599ec05faa1abd6da82b
|
[
"MIT"
] | 3
|
2020-07-24T05:24:14.000Z
|
2020-10-31T20:07:58.000Z
|
# -*- coding: utf-8 -*-
"""Pytest package for JSONManipulator."""
| 16.75
| 41
| 0.61194
|
fea2f2b0da51abdec5c76ea821f782d5173a7c4e
| 20,759
|
py
|
Python
|
apriori/apriorieficiente.py
|
cipiasentini/final
|
cecb39c1693839b5ea9f4b8a74df7cde9875d8c8
|
[
"MIT"
] | null | null | null |
apriori/apriorieficiente.py
|
cipiasentini/final
|
cecb39c1693839b5ea9f4b8a74df7cde9875d8c8
|
[
"MIT"
] | 2
|
2021-01-28T19:27:39.000Z
|
2022-03-25T18:18:50.000Z
|
apriori/apriorieficiente.py
|
cipiasentini/final
|
cecb39c1693839b5ea9f4b8a74df7cde9875d8c8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import sys
import os
from itertools import chain, combinations
'''
____________________________________________________________________
| CONVENCIONES DE NOMENCLATURA |
|▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔|
| - Si tenemos doble letra significa mayuscula. Ejemplo: ff == F. |
| - Si tenemos x implica subindice. Ejemplo ffx == Fx (subinidice x).|
| - Comentarios con triple apostrofe (multiline) son aclaraciones |
| de funciones de la solucion, y aquellos con numeral (inline) son |
| de implementación. |
| |
▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔▔
- Se agrego soporteMinimoOpcionales
- Se agrego numero de reglas al principio, antes de la presentacion de las reglas resultantes
- Se modifico apriori
- Se modifico formato de reglas, ahora es [[antecedente],[consecuente],soporte,confianza]
- Se modifico la presentacion de las reglas se ordena por confianza, soporte y cantidad de elementos
- El soporte que se usa en la presentacion es del itemset con el que se genero la regla ej:
si tenemos [1,2]->[3,4] el soporte que se usa es el del itemset [1,2,3,4]
'''
'''PASO1: GENERAR ITEMSETS FRECUENTES QUE SATISFAGAN EL SOPORTE MINIMO'''
'''Lee el archivo de entrada y lo mapea a una lista de listas'''
def leerDocumento(archivo):
transacciones = open(archivo, 'r') # lee el archivo
allTransac = []
for linea in transacciones.readlines(): # lee linea por linea el archivo
transaccion = list(map(int, linea.split())) # transforma cada linea del dataset en una lista
transaccion.sort() # ordena la lista
allTransac.append(transaccion) # agrega a la lista de todas las transacciones
transacciones.close()
return allTransac
'''Hace todas las combinaciones de k elementos sobre un subset'''
def subsets(arr, k):
return chain(*[combinations(arr, k)])
'''Genera los candidatos de orden k desdesde un conjunto de itemsets
frecuentes de orden k-1'''
def candidateGen(setActual, k):
# paso de union
# la carga ya se hizo en orden lexicografico, no hace falta ordenar aca
ccx = [] # lista de candidatos
ffx = []
ffx = setActual # set de items frecuentes
for f1 in ffx:
item1 = f1[0]
for f2 in ffx:
# para que siempre vaya para adelante con la generacion
# ejemplo que no compare item 1 con 2 y en otra iteracion 2 con 1
if (ffx.index(f2) <= ffx.index(f1)):
continue
# comparo elemento a elemento en orden lexicografico por igualdad
item2 = f2[0]
agregar = False
for i in range(0, len(item1)):
if (item1[i] != item2[i]):
if (i == len(item1)-1):
pos = i
agregar = True
else:
break
if agregar:
# al inicio c esta vacio ya que va a iterar varias veces, sino se hace esto se va
# a acoplar sobre la iteracion anterior
c = []
c.extend(item1)
c.append(item2[pos])
c.sort()
if (contieneEnListas(c, ccx) == False):
ccx.append(c)
# paso de poda, propiedad de clausura hacia abajo
# borro los candidatos que no esten en F[k]
for cx in ccx:
# genero los posibles subsets de longitud k-1 de cada candidato
# los genera en orden
subs = subsets(cx, k-1)
subs = list(subs) # convertimos el objeto en lista para iterarlo
#list(subs)
# y verifico que se encuentre en F[k]
for subset in subs:
sub = list(subset)
quitar = True
for itemSet in ffx:
if (sub == itemSet[0]):
quitar = False
break
if quitar:
# en caso de que intente borrar uno que ya se haya borrado antes
try:
ccx.remove(cx)
except ValueError:
pass
return ccx
'''Determina si una lista esta contenida dentro de una lista de listas'''
def contieneEnListas(listaContenida, listaContenedora):
# listacontenedora vacia
contiene = False
for lista in listaContenedora:
# si no esta vacia puede comparar pero inicia en verdadero
# asi si itera todo los elementos de la contenida
# va a devolver true porque contiene.
contiene = True
for contenido in listaContenida:
if contenido not in lista:
# si hay un elto que no contiene pone en falso y
# va al siguiente set de la lista contenedora
contiene = False
break
if contiene: return contiene
return contiene
'''Determina si una lista esta contenida dentro de otra'''
def contiene(listaContenida, listaContenedora):
for contenido in listaContenida:
if contenido not in listaContenedora:
return False
return True
'''Considera que un itemset puede estar una unica vez en una transaccion.
Si un candidato satisface el soporte minimo lo acopla a la lista de itemsets frecuentes'''
def soporteMinimo(transacciones, candidatos, minSup, k):
itemsFrecuentes = []
f = [k, itemsFrecuentes] # es el f de iteracion k
# Calculo del soporte de los candidatos
for itemC in candidatos:
cont = 0
itemFrecuente = []
for t in transacciones:
if contiene(itemC, t): # se verifica si el itemC esta contenido dentro de la lista
cont += 1
# Verificacion de la satisfaccion de soporte minimo
if (float(cont)/len(transacciones) >= minSup): # si el candidato satisface el soporte minimo es un item frecuente
itemFrecuente = [itemC, cont]
itemsFrecuentes.append(itemFrecuente)
return f
''' Considera la que un intemset puede existir mas de una vez en una transaccion.
Si un candidato satisface el soporte minimo lo acopla a la lista de itemsets frecuentes'''
def soporteMinimoOpcionales(transacciones, candidatos, minSup, k):
itemsFrecuentes = []
f = [k, itemsFrecuentes]
# por cada candidato se comprueba si satisfacen el soporte minimo
# en principio se toma candidato a candidato
for itemC in candidatos:
cont = 0
itemFrecuente = []
# se pasa a verificar cuantas veces aparece el candidato en las transacciones
for transaccion in transacciones:
ant = 0
# ant almacena la cantidad de veces que aparece el item anterior,
# si tenemos [[1],[2] almacena las ocurrencias de 1
for item in itemC:
cant = 0
# cuenta la cantidad de ocurrencias de un item
for elemento in transaccion:
if item == elemento:
cant += 1
# contT siempre almacena la cantidad de ocurrencias del item del itemset
# con menor aparicion en una transaccion
if cant != 0:
if ant == 0:
ant = cant
contT = cant
else:
if cant <= ant:
contT = cant
else: contT = ant
else:
# si no se encuentra algun item del itemset actual se pasa a la siguiente transaccion
contT = 0
break
cont += contT
# se cuenta la cantidad de ocurrencias de un itemset en una misma transaccion
# antes de pasar a la siguiente
# Verificacion de la satisfaccion de la condicion de minimo soporte
if (float(cont)/len(transacciones) >= minSup):
itemFrecuente = [itemC, cont]
itemsFrecuentes.append(itemFrecuente)
return f
'''Obtener los items existentes en el dataset'''
def initPass(transacciones):
itemSet = []
for transaccion in transacciones:
for item in transaccion:
if item not in itemSet:
itemSet.append(item)
devolver = []
for i in itemSet:
devolver.append([i])
return devolver
'''Main'''
def apriori (transacciones, minSup, minConf, tope, ms):
c1 = initPass(transacciones) # cc=C candidatos
# se verifica si el campo ms esta activo, si es asi es porque
# el usuario requiere que se consideren todas las ocurrencias de un itemset en una transaccion
# para esto se cambia el calculo del soporte
if ms: f1 = soporteMinimoOpcionales(transacciones, c1, minSup, k=1)
else: f1 = soporteMinimo(transacciones, c1, minSup, k=1)
k = 2
setActual = f1[1]
# en todosF se guardan todos los ff, es decir es F
todosF = dict()
todosF.update({str(f1[0]): f1[1]})
while (len(setActual) != 0):
print '--------NUEVA ITERACION------' + str(k) + '------'
ccx = candidateGen(setActual, k)
if ms: setNuevo = soporteMinimoOpcionales(transacciones, ccx, minSup, k)
else: setNuevo = soporteMinimo(transacciones, ccx, minSup, k)
setActual = setNuevo[1]
todosF.update({str(setNuevo[0]): setNuevo[1]})
# la siguiente seccion se utiliza si el campo tope de la funcion opcional esta activo
# si esta activo solo devuelva hasta la iteracion con los k == tope
# ya que de esta forma se obtiene solo itemsets de hasta una cantidad de elementos
# lo que conlleva a reglas de una determinada cantidad de elementos
if k != 0:
if k == tope:
break
k += 1
return todosF
'''PASO2: GENERAR REGLAS QUE SATISFAGAN LA CONFIANZA MINIMA'''
'''Devuelve una regla en formato [[antecedente/s],[consecuente/s]]'''
def generarRegla(f, h):
# se realiza una resta de conjuntos entre el itemset y el consecuente
# para obtener el antecedente
antecedente = set(f).difference(set(h))
# se genera la regla
regla = [list(antecedente), h]
return regla
'''Genera los items que van a resultar consecuentes de las reglas con mas
de un consecuente. Es semejante al anterior, pero cambia el paso de poda'''
def candidateGenReglas(ff, hh):
# paso de union
ccx = [] # lista de candidatos
ffx = []
ffx = hh # set de items frecuentes
for f1 in ffx:
item1 = f1[0]
for f2 in ffx:
# para que siempre vaya para adelante con la generacion
# ejemplo que no compare item 1 con 2 y en otra iteracion 2 con 1
if (ffx.index(f2) <= ffx.index(f1)):
continue
# comparo elemento a elemento en orden lexicografico por igualdad
item2 = f2[0]
agregar = False
for i in range(0, len(item1)):
if (item1[i] != item2[i]):
if (i == len(item1)-1):
pos = i
agregar = True
else:
break
if agregar:
# inicio c vacia ya que va a iterar varias veces y sino se va a
# appendear sobre la iteracion anterior
c = []
c.extend(item1)
c.append(item2[pos])
c.sort()
if (contieneEnListas(c, ccx) == False):
ccx.append(c)
# ccx contiene la lista de candidatos
# Paso de poda, propiedad de clausura hacia abajo
# Recordemos que el conjunto de itemsets F ya cumplio con la propiedad de clausura hacia abajo
# cuando se realizo el primer paso del algoritmo: encontrar los itemsets frecuentes que satisfagan el soporte minimo
resultado = [] # lista de items que cumpliran la propiedad de clausura hacia abajo
if len(ccx) != 0:
m = str(len(ccx[0])) # compruebo la longitud de algun candidato para conocer la longitud de todos
for h in ccx: # por cada candidato verifico si cumple con la propiedad
for f in ff[m]:
if h == f[0]: # compruebo si existe dentro del conjunto de itemsets frecuentes, si hay alguno igual
resultado.append(h) # si existe satisface la propiedad de clausura hacia abajo y se acopla a la lista
break
return resultado
'''Obtener la confianza de una regla'''
def confianza(ff, contF, reglaCandidata):
k = str(len(reglaCandidata[0])) # cantidad de elementos del antecedente
antecedentes = set(reglaCandidata[0])
# busco en F[longitud k] un itemset igual al antecedente y ordeno por las dudas
for f in ff[k]:
if (set(f[0]) == antecedentes):
antcant = f[1] # contador de cantidad de ocurrencias antecedente
break
# calculo del consecuente
conf = float(contF)/antcant
return conf
'''Genera reglas de mas de un consecuente'''
def apGenRules(ff, f, key, m, hh, minConf, reglas, n):
# reglas es reglasX en genRules
k = int(key)
if ((k > m+1) and (len(hh) != 0)):
# hhx genera los candidatos que se utilizaran como consecuentes para las reglas
# hh = [[[1]],[[2]],[[3]],[[4]]] es una lista de lista de lista
# la funcion candidateGenReglas que se utiliza aqui solo me devuelve solo una lista de listas
# [[1,2],[1,3],[1,4],[2,3],[2,4],[3,4]]
hhx = candidateGenReglas(ff, hh)
hhy = []
# hhy es una lista que posteriormente servira para transformar hhx en el formato adecuado
# para igresar en candidateGenReglas
# una vez obtenidos los itemsets, almacenados en hhx, que se utilizaran como consecuentes
# se generan las reglas
for h in hhx:
regla = generarRegla(f[0], h)
conf = confianza(ff, f[1], regla) # confianza(ff, f, reglaCandidata): #f[1] veces que se repite si lo divido por n tengo el soporte
if conf >= minConf:
soporte = float(f[1])/n # se calcula el soporte del itemset
regla.append(soporte) # se acopla el soporte del itemset
regla.append(conf) # se acopla la confianza de la regla
reglas.append(regla) # se acopla la regla a la lista de reglas (reglasX en genRules)
hhy.append([h]) # se da el formato [h] al elemento de hhx y se lo acopla a hhy
m += 1
apGenRules(ff, f, key, m, hhy, minConf, reglas, n) # genero recursivamente el resto
# de las reglas, hasta que llegue un punto que sea vacio
return reglas
'''Genera las reglas de un consecuente y las acopla con el conjunto de reglas'''
def genRules(itemsFrecuentes, minConf, n):
reglas = []
reglasX = [] # lista con reglas
ff = itemsFrecuentes # formato { 'k' : [[itemset], contador] }
for key in ff:
if (int(key) >= 2): # como la llave es un string lo paso a entero sino no podria iterar de esta manera
for f in ff[key]:
hh1 = [] #lista de consecuentes de las reglas con solo un consecuente
# en la iteracion cada item de f[0] sera un consecuente
for consecuente in f[0]:
regla1consec = generarRegla(f[0],[consecuente]) # generamo una regla de un solo consecunte
# se acopla un elemento a la lista de consecuentes hh1,
# este formato es para que se pueda utilizar posteriormente en apGenRules
hh1.append([[consecuente]])
conf = confianza(ff, f[1], regla1consec) # se calcula la confianza de la regla
if (conf >= minConf):
soporte = float(f[1])/n # se calcula el soporte
regla1consec.append(soporte) # se acopla el soporte del itemset
regla1consec.append(conf) # se acopla la confianza de la regla
reglasX.append(regla1consec) # se acopla la reglade un consecuente a la lista de reglas
# Aclaracion importante: reglasX contiene posteriormente todas las reglas
# a apGenRules entonces se le pasa por parametro todo el conjunto de reglas
# en genRules se generan solo las reglas de un consecuente, en apGenRules el resto
# pero reglasX contiene todas las reglas
# en apGenRules se le acoplan las reglas de mas de un consecuente
# (que se generan solo con los itemsets frecuentes y h)
# y en genRules las de un consecuente
reglas = apGenRules(ff, f, key, 1, hh1, minConf, reglasX, n) # genero reglas de mas de un consecuente
# ff es el diccionario con todos los Itemsets Frecuentes: F
# f es un solo Itemset
# key es el K
# 1 es el m en la primera iteracion
# hh1 es el H1 (lista de consecuentes de 1 elemento)
# reglasX tiene todas las reglas generadas no solo de un consecuente
return reglas
'''Esta funcion filtra las reglas para que devuelva aquellas
que contengan cierto elemento pasado por parametro'''
def filtroElementos(reglas, elementos):
filtro = []
noExiste = []
# comprobacion de la existencia del elemento ingresado como parametro
# se verifica si se encuentra como antecedente o consecuente de alguna de las reglas generadas
for elemento in elementos:
existe = False
for regla in reglas:
if (elemento in regla[0]) or (elemento in regla[1]):
existe = True
if regla not in filtro:
filtro.append(regla)
if existe == False:
noExiste.append(elemento)
if len(filtro) == 0:
archivo = open('reglas.txt', 'w')
archivo.write('No hay reglas con los elementos:'+ str(elementos)+'.\n')
archivo.close()
return [filtro,noExiste]
'''Funcion que genera el archivo con los resultados'''
def resultados(reglas, minSup, minConf, nombre, elementos, noExiste):
archivo = open(nombre,'w') # para generar el archivo
archivo.write('------------------------ RESULTADOS ---------------------------\n')
archivo.write('\n--- Minimo Soporte: '+ minSup)
archivo.write(' ---')
archivo.write('--- Minima Confianza: '+ minConf + ' ---\n')
archivo.write('\n')
if elementos != 'nada':
archivo.write('Elementos Buscados: '+str(elementos)+ '\n\n')
if len(noExiste) != 0:
archivo.write('Aviso: no se encontraron reglas con los elementos: '+str(noExiste)+'\n\n')
archivo.write('Se generaron: ' + str(len(reglas)) + ' reglas\n\n')
archivo.write('---------------------------- REGLAS ------------------------------\n\n')
archivo.write('Regla #: antecedente --> consecuente soporte - confianza\n')
archivo.write('\n')
i = 1
for regla in reglas:
pre = regla[0]
post = regla[1]
soporte = regla[2]
conf = regla[3]
# impresion en pantalla
regla = "Regla %s: %s --> %s S(%.3f) - C(%.3f)" % (i, pre, post, soporte, conf)
# agregar al archivo
archivo.write(regla + '\n') #
i += 1
if (i==1):
archivo.write('\n\n\n\n\n No fue posible generar reglas con los parametros especificados ')
archivo.close()
return os.path.abspath('reglas.txt')
'''Punto de acceso al algoritmo apriori, llamada desde el cliente a esta funcion'''
def inicio(ds, sup, conf, longRule, rulesOfElements, repetidos):
if (repetidos == False):
ms = False
else:
ms = True
if (longRule == ""): longRule = 0
if (rulesOfElements == ""):
elementos = 'nada'
else:
elementos = map(int, (rulesOfElements).split())
dset = leerDocumento(ds)
# se generan los itemsets frecuentes y se agregan a un diccionario
ff = apriori(dset, float(sup), float(conf), int(longRule), ms)
n = len(dset)
# se generan una lista que contiene las reglas, donde el formato de cada regla es [[antecedente/s], [consecuente/s], soporte, confianza]
r = genRules(ff, float(conf), n)
# ordeno de mayor a menor por confianza, soporte y tambien de menor a mayor por longitud de regla
r.sort(key=lambda x:(x[2], x[3], -len(x[0]), -len(x[1])), reverse=True)
noExiste = []
if elementos != 'nada':
filtro = filtroElementos(r, elementos)
r = filtro[0]
noExiste = filtro[1]
return resultados(r, str(sup), str(conf), 'reglas.txt', elementos, noExiste)
| 46.233853
| 143
| 0.593622
|
46f3555b1b5ebe587a5eb6d109f1db97ab41629b
| 116
|
py
|
Python
|
apps/utils/tests/models/socials/__init__.py
|
jorgesaw/oclock
|
2a78bd4d1ab40eaa65ea346cf8c37556fcbbeca5
|
[
"MIT"
] | null | null | null |
apps/utils/tests/models/socials/__init__.py
|
jorgesaw/oclock
|
2a78bd4d1ab40eaa65ea346cf8c37556fcbbeca5
|
[
"MIT"
] | null | null | null |
apps/utils/tests/models/socials/__init__.py
|
jorgesaw/oclock
|
2a78bd4d1ab40eaa65ea346cf8c37556fcbbeca5
|
[
"MIT"
] | null | null | null |
from .socials import (
create_link,
create_social,
LINK_USERNAME,
SOCIAL_KEY_NAME,
SOCIAL_NAME
)
| 16.571429
| 22
| 0.689655
|
fb3eb52a532c16ef4c6b112ba1e0757a11a055b2
| 2,270
|
py
|
Python
|
hoomd/hpmc/validation/disk_eos.py
|
PetersResearchGroup/PCND
|
584768cc683a6df0152ead69b567d05b781aab2b
|
[
"BSD-3-Clause"
] | 2
|
2020-03-30T14:38:50.000Z
|
2020-06-02T05:53:41.000Z
|
hoomd/hpmc/validation/disk_eos.py
|
PetersResearchGroup/PCND
|
584768cc683a6df0152ead69b567d05b781aab2b
|
[
"BSD-3-Clause"
] | null | null | null |
hoomd/hpmc/validation/disk_eos.py
|
PetersResearchGroup/PCND
|
584768cc683a6df0152ead69b567d05b781aab2b
|
[
"BSD-3-Clause"
] | 1
|
2020-05-20T07:00:08.000Z
|
2020-05-20T07:00:08.000Z
|
from hoomd import *
from hoomd import hpmc
import math
import numpy as np
import unittest
import BlockAverage
context.initialize()
phi_p_ref = 0.698;
rel_err_cs = 0.0001; # guesstimate
P_ref = 9.1709;
P_ref_err = 0.0002; # actual
n = 256;
N = n**2
a = math.sqrt(math.pi / (4*phi_p_ref));
class diskEOS_test(unittest.TestCase):
def setUp(self):
self.system = init.create_lattice(unitcell=lattice.sq(a=a), n=n);
self.mc = hpmc.integrate.sphere(d = 0.2, seed=1)
self.mc.shape_param.set('A',diameter=1.0)
self.boxmc = hpmc.update.boxmc(self.mc,betaP=P_ref,seed=123)
self.boxmc.volume(delta=0.42,weight=1)
self.log = analyze.log(filename=None, quantities = ['hpmc_overlap_count','volume','phi_p', 'hpmc_d','hpmc_a','time'], overwrite=True, period=100)
self.log.register_callback('phi_p', lambda timestep: len(self.system.particles)/self.system.box.get_volume() * math.pi / 4.0)
# warm up
run(1e3);
def test_measure_phi_p(self):
phi_p_measure = []
def log_callback(timestep):
v = self.log.query('phi_p');
phi_p_measure.append(v)
if comm.get_rank() == 0:
print('phi_p =', v);
run(10e3,callback=log_callback, callback_period=50)
block = BlockAverage.BlockAverage(phi_p_measure)
phi_p_avg = np.mean(np.array(phi_p_measure))
i, phi_p_err = block.get_error_estimate()
if comm.get_rank() == 0:
(n, num, err, err_err) = block.get_hierarchical_errors()
print('Hierarchical error analysis:')
for (i, num_samples, e, ee) in zip(n, num, err, err_err):
print('{0} {1} {2} {3}'.format(i,num_samples,e,ee))
# max error 0.5%
self.assertLessEqual(phi_p_err/phi_p_avg,0.005)
# confidence interval, 0.95 quantile of the normal distribution
ci = 1.96
if comm.get_rank() == 0:
print('avg {:.6f} +- {:.6f}'.format(phi_p_avg, phi_p_err))
# check against reference value within reference error + measurement error
self.assertLessEqual(math.fabs(phi_p_avg-phi_p_ref),ci*(phi_p_ref*rel_err_cs+phi_p_err))
if __name__ == '__main__':
unittest.main(argv = ['test.py', '-v'])
| 32.898551
| 153
| 0.629075
|
808c1e685ce1e0674f1029b9611b1800cfd1d50b
| 410
|
py
|
Python
|
src/url_downloader.py
|
IBMPredictiveAnalytics/Python_URLDownloader
|
f001d09d6fbfd6bfe28ab268edfb80749c25ba0e
|
[
"Apache-2.0"
] | null | null | null |
src/url_downloader.py
|
IBMPredictiveAnalytics/Python_URLDownloader
|
f001d09d6fbfd6bfe28ab268edfb80749c25ba0e
|
[
"Apache-2.0"
] | null | null | null |
src/url_downloader.py
|
IBMPredictiveAnalytics/Python_URLDownloader
|
f001d09d6fbfd6bfe28ab268edfb80749c25ba0e
|
[
"Apache-2.0"
] | null | null | null |
# encoding=utf-8
import sys
import urllib2
if len(sys.argv) > 1 and sys.argv[1] == "-test":
url = sys.argv[2]
filePath = sys.argv[3]
else:
url = '%%url%%'
filePath = '%%path%%'
if not url or not filePath:
raise "must specify values for both URL and file path"
req = urllib2.Request(url)
content = urllib2.urlopen(req).read()
file = open(filePath, "wb")
file.write(content)
file.close()
| 19.52381
| 58
| 0.64878
|
f695848340563e3b2fd84cd3b690191d236fecca
| 12,004
|
py
|
Python
|
classifier/naive_bayes.py
|
RLIProject/tornado
|
bac94cdaeadc4b02e9ef6b29877decb4b14a3d1c
|
[
"MIT"
] | null | null | null |
classifier/naive_bayes.py
|
RLIProject/tornado
|
bac94cdaeadc4b02e9ef6b29877decb4b14a3d1c
|
[
"MIT"
] | null | null | null |
classifier/naive_bayes.py
|
RLIProject/tornado
|
bac94cdaeadc4b02e9ef6b29877decb4b14a3d1c
|
[
"MIT"
] | null | null | null |
"""
The Tornado Framework
By Ali Pesaranghader
University of Ottawa, Ontario, Canada
E-mail: apesaran -at- uottawa -dot- ca / alipsgh -at- gmail -dot- com
---
** The Hoeffding Tree Implementation **
Paper: Domingos, Pedro, and Geoff Hulten. "Mining high-speed data streams."
Published in: Proceedings of the sixth ACM SIGKDD international conference on Knowledge discovery and data mining. ACM, 2000.
URL: https://homes.cs.washington.edu/~pedrod/papers/kdd00.pdf
"""
import gc
import math
import operator
from collections import OrderedDict
from tornado_mod.classifier.classifier import SuperClassifier
from tornado_mod.dictionary.tornado_dictionary import TornadoDic
def calculate_hoeffding_bound(r, delta, n):
epsilon = math.sqrt((math.pow(r, 2) * math.log((1 / delta), math.e)) / (2 * n))
return epsilon
def calculate_entropy(x, y):
entropy = (-1) * (x / y) * math.log2(x / y)
return entropy
def calculate_info_gain(node):
"""This function calculate the information gain of attributes given a node."""
# CALCULATING EXPECTED INFORMATION OF WHOLE TRAINING DATA
expected_info_tr = 0
for c, v in node.CLASSES_DISTRIBUTIONS.items():
if v == 0:
continue
expected_info_tr += calculate_entropy(v, node.NUMBER_OF_EXAMPLES_SEEN)
# CALCULATING EXPECTED INFORMATION WITH CONSIDERING EACH ATTRIBUTE
# THEN CALCULATING THEIR GAINS - OR SCORES
for attr, values_and_distributions in node.CANDIDATE_ATTRIBUTES_VALUES_DISTRIBUTIONS.items():
expected_info_attr = 0
for value, class_distributions in values_and_distributions.items():
sum_classes_dist = sum(class_distributions.values())
expected_info_v = 0
for class_key, class_dist in class_distributions.items():
if class_dist == 0:
continue
expected_info_v += calculate_entropy(class_dist, sum_classes_dist)
expected_info_attr += (sum_classes_dist / node.NUMBER_OF_EXAMPLES_SEEN) * expected_info_v
node.CANDIDATE_ATTRIBUTES_SCORES[attr] = expected_info_tr - expected_info_attr
# HERE WE GO WITH THE "HOEFFDING NODE".
class HoeffdingNode:
def __init__(self, classes, candidate_attributes):
# CREATING ATTRIBUTES
self.__ATTRIBUTE_NAME = None
self.NUMBER_OF_EXAMPLES_SEEN = 0
self.CLASSES_DISTRIBUTIONS = OrderedDict()
self.CLASSES_PROB_DISTRIBUTIONS = OrderedDict()
self.CANDIDATE_ATTRIBUTES = candidate_attributes
self.CANDIDATE_ATTRIBUTES_VALUES_DISTRIBUTIONS = OrderedDict()
self.CANDIDATE_ATTRIBUTES_VALUES_PROB_DISTRIBUTIONS = OrderedDict()
self.CANDIDATE_ATTRIBUTES_SCORES = OrderedDict()
self.PARENT = None
self.BRANCHES = OrderedDict()
self.__CLASS = None
self.initialize_classes(classes)
self.initialize_attributes(classes)
def initialize_classes(self, classes):
for c in classes:
self.CLASSES_DISTRIBUTIONS[c] = 0
self.CLASSES_PROB_DISTRIBUTIONS[c] = 0.0
def initialize_attributes(self, classes):
for attribute in self.CANDIDATE_ATTRIBUTES:
self.CANDIDATE_ATTRIBUTES_VALUES_DISTRIBUTIONS[attribute.NAME] = OrderedDict()
self.CANDIDATE_ATTRIBUTES_VALUES_PROB_DISTRIBUTIONS[attribute.NAME] = OrderedDict()
for value in attribute.POSSIBLE_VALUES:
self.CANDIDATE_ATTRIBUTES_VALUES_DISTRIBUTIONS[attribute.NAME][value] = OrderedDict()
self.CANDIDATE_ATTRIBUTES_VALUES_PROB_DISTRIBUTIONS[attribute.NAME][value] = OrderedDict()
for c in classes:
self.CANDIDATE_ATTRIBUTES_VALUES_DISTRIBUTIONS[attribute.NAME][value][c] = 0
self.CANDIDATE_ATTRIBUTES_VALUES_PROB_DISTRIBUTIONS[attribute.NAME][value][c] = 0.0
def set_attribute_name(self, name):
"""This function is called when an attribute has been considered as an appropriate choice of splitting!"""
self.__ATTRIBUTE_NAME = name
def get_attribute_name(self):
return self.__ATTRIBUTE_NAME
def set_class(self, c):
"""This function is called when the node is supposed to be labelled with the most frequent class."""
self.__CLASS = c
def get_class(self):
c = self.__CLASS
return c
def get_child_node(self, value):
return self.BRANCHES[value]
class HoeffdingTree(SuperClassifier):
"""This is the implementation of Hoeffding Tree which is also known as Very Fast Decision Tree (VFDT)
in the literature. Hoeffding Tree is an incremental decision tree for particularly learning from data streams."""
LEARNER_NAME = TornadoDic.HOEFFDING_TREE
LEARNER_TYPE = TornadoDic.TRAINABLE
LEARNER_CATEGORY = TornadoDic.NOM_CLASSIFIER
def __init__(self, classes, attributes, delta=0.0000001, tie=0.05, n_min=200, leaf_prediction_mode=TornadoDic.NB,
max_memory_size=33554432, memory_check_step=1000000):
super().__init__(classes, attributes)
self.__ROOT = HoeffdingNode(classes, attributes)
self.ATTRIBUTES_NAMES = []
self.__DELTA = delta
self.__TIE = tie
self.__R = math.log2(len(classes))
self.__N_min = n_min
self.__MAX_MEMORY_SIZE = max_memory_size
self.__MEMORY_CHECK_STEP = memory_check_step
self.__PREDICTION_MODE = leaf_prediction_mode
self.__set_attributes_names()
def __set_attributes_names(self):
for attribute in self.ATTRIBUTES:
self.ATTRIBUTES_NAMES.append(attribute.NAME)
def get_root(self):
return self.__ROOT
def __trace(self, instance):
current_node = self.__ROOT
while len(current_node.BRANCHES) != 0:
index = self.ATTRIBUTES_NAMES.index(current_node.get_attribute_name())
current_node = current_node.get_child_node(instance[index])
return current_node
def train(self, instance):
x, y = instance[:-1], instance[-1]
node = self.__trace(x)
node.NUMBER_OF_EXAMPLES_SEEN += 1
node.CLASSES_DISTRIBUTIONS[y] += 1
if self.__PREDICTION_MODE == TornadoDic.NB:
for c in self.CLASSES:
node.CLASSES_PROB_DISTRIBUTIONS[c] = node.CLASSES_DISTRIBUTIONS[c] / node.NUMBER_OF_EXAMPLES_SEEN
for i in range(0, len(x)):
attribute, value = self.ATTRIBUTES[i], x[i]
if node.CANDIDATE_ATTRIBUTES.__contains__(attribute):
node.CANDIDATE_ATTRIBUTES_VALUES_DISTRIBUTIONS[attribute.NAME][value][y] += 1
for c, c_prob in node.CLASSES_DISTRIBUTIONS.items():
for attr in node.CANDIDATE_ATTRIBUTES:
attr_name = attr.NAME
k = len(node.CANDIDATE_ATTRIBUTES_VALUES_DISTRIBUTIONS[attr_name])
for value in attr.POSSIBLE_VALUES:
d = node.CANDIDATE_ATTRIBUTES_VALUES_DISTRIBUTIONS[attr_name][value][c]
node.CANDIDATE_ATTRIBUTES_VALUES_PROB_DISTRIBUTIONS[attr_name][value][c] = (d + 1) / (k + c_prob)
most_populated_class = max(node.CLASSES_DISTRIBUTIONS.items(), key=operator.itemgetter(1))
node.set_class(most_populated_class)
if (node.NUMBER_OF_EXAMPLES_SEEN - 1) != most_populated_class[1]:
calculate_info_gain(node)
if node.NUMBER_OF_EXAMPLES_SEEN >= self.__N_min and len(node.CANDIDATE_ATTRIBUTES_SCORES) != 0:
g1, g2 = self.__get_two_attributes_with_highest_scores(node.CANDIDATE_ATTRIBUTES_SCORES)
epsilon = calculate_hoeffding_bound(self.__R, self.__DELTA, node.NUMBER_OF_EXAMPLES_SEEN)
if g1[1] - g2[1] > epsilon or epsilon < self.__TIE:
node.set_attribute_name(g1[0])
new_attributes_set = self.ATTRIBUTES.copy()
attribute_index = self.ATTRIBUTES_NAMES.index(g1[0])
del new_attributes_set[attribute_index]
for value in self.ATTRIBUTES[attribute_index].POSSIBLE_VALUES:
leaf = HoeffdingNode(self.CLASSES, new_attributes_set)
node.BRANCHES[value] = leaf
leaf.PARENT = node
def print_tree(self, node, c=""):
c += "\t"
print(c + node.get_attribute_name() + " " + str(node.CLASSES_DISTRIBUTIONS))
for branch, child in node.BRANCHES.items():
print(c + ">" + branch + "<")
if child.get_attribute_name() is not None:
self.print_tree(child, c)
else:
print(c + str(child.PARENT.get_class()))
def test(self, instance):
if self._IS_READY:
x = instance[0:len(instance) - 1]
y = instance[len(instance) - 1]
node = self.__trace(x)
if node.get_class() is None:
node = node.PARENT
if self.__PREDICTION_MODE == TornadoDic.MC:
prediction = node.get_class()[0]
else:
predictions = OrderedDict()
for c in node.CLASSES_DISTRIBUTIONS.keys():
pr = node.CLASSES_PROB_DISTRIBUTIONS[c]
for attr_index in range(0, len(x)):
if node.CANDIDATE_ATTRIBUTES.__contains__(self.ATTRIBUTES[attr_index]):
attr = self.ATTRIBUTES[attr_index]
value = x[attr_index]
pr *= node.CANDIDATE_ATTRIBUTES_VALUES_PROB_DISTRIBUTIONS[attr.NAME][value][c]
predictions[c] = pr
prediction = max(predictions.items(), key=operator.itemgetter(1))[0]
self.update_confusion_matrix(y, prediction)
return prediction
else:
print("Please train a Hoeffding Tree classifier first.")
exit()
def get_prediction_prob_list(self, X):
node = self.__trace(X)
X = X[0:len(X) - 1]
if node.get_class() is None:
node = node.PARENT
prob = []
if self.__PREDICTION_MODE == TornadoDic.MC:
for c in self.CLASSES:
prob.append(node.CLASSES_PROB_DISTRIBUTIONS[c])
else:
for c in self.CLASSES:
pr = node.CLASSES_PROB_DISTRIBUTIONS[c]
for attr_index in range(0, len(X)):
if node.CANDIDATE_ATTRIBUTES.__contains__(self.ATTRIBUTES[attr_index]):
attr = self.ATTRIBUTES[attr_index]
value = X[attr_index]
pr *= node.CANDIDATE_ATTRIBUTES_VALUES_PROB_DISTRIBUTIONS[attr.NAME][value][c]
prob.append(pr)
prob_sum = sum(prob)
if prob_sum != 0.0:
prob = [x / prob_sum for x in prob]
else:
prob = [0.0 for x in prob]
return prob
def get_prediction_prob(self, X):
prob = self.get_prediction_prob_list(X)
pred_prob = {}
for i, c in enumerate(self.CLASSES):
pred_prob[c] = prob[i]
return pred_prob
@staticmethod
def __get_two_attributes_with_highest_scores(attributes_scores):
sorted_attributes_scores = sorted(attributes_scores.items(), key=operator.itemgetter(1), reverse=True)
g1 = sorted_attributes_scores[0]
if len(sorted_attributes_scores) >= 2:
g2 = sorted_attributes_scores[1]
else:
g2 = (0, 0)
return g1, g2
def reset(self):
super()._reset_stats()
del self.__ROOT
gc.collect()
self.__ROOT = HoeffdingNode(self.CLASSES, self.ATTRIBUTES)
| 39.880399
| 126
| 0.626208
|
96a9c423173277e94f2f7be42972113c3c211a10
| 8,515
|
py
|
Python
|
kedro_kubeflow/cli.py
|
michalbrys/kedro-kubeflow
|
be5f0b1f35462baf0d3f9041bc87f2a3cc6a7c9a
|
[
"Apache-2.0"
] | null | null | null |
kedro_kubeflow/cli.py
|
michalbrys/kedro-kubeflow
|
be5f0b1f35462baf0d3f9041bc87f2a3cc6a7c9a
|
[
"Apache-2.0"
] | null | null | null |
kedro_kubeflow/cli.py
|
michalbrys/kedro-kubeflow
|
be5f0b1f35462baf0d3f9041bc87f2a3cc6a7c9a
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import webbrowser
from pathlib import Path
import click
from .auth import AuthHandler
from .config import PluginConfig
from .context_helper import ContextHelper
LOG = logging.getLogger(__name__)
def format_params(params: list):
return dict((p[: p.find(":")], p[p.find(":") + 1 :]) for p in params)
@click.group("Kubeflow")
def commands():
"""Kedro plugin adding support for Kubeflow Pipelines"""
pass
@commands.group(
name="kubeflow", context_settings=dict(help_option_names=["-h", "--help"])
)
@click.option(
"-e",
"--env",
"env",
type=str,
default=lambda: os.environ.get("KEDRO_ENV", "local"),
help="Environment to use.",
)
@click.pass_obj
@click.pass_context
def kubeflow_group(ctx, metadata, env):
"""Interact with Kubeflow Pipelines"""
ctx.ensure_object(dict)
ctx.obj["context_helper"] = ContextHelper.init(
metadata,
env,
)
@kubeflow_group.command()
@click.pass_context
def list_pipelines(ctx):
"""List deployed pipeline definitions"""
context_helper = ctx.obj["context_helper"]
click.echo(context_helper.kfp_client.list_pipelines())
@kubeflow_group.command()
@click.option(
"-i",
"--image",
type=str,
help="Docker image to use for pipeline execution.",
)
@click.option(
"-p",
"--pipeline",
"pipeline",
type=str,
help="Name of pipeline to run",
default="__default__",
)
@click.option(
"-en",
"--experiment-namespace",
"experiment_namespace",
type=str,
default=None,
help="Namespace where pipeline experiment run should be deployed to. Not needed "
"if provided experiment name already exists.",
)
@click.option(
"--param",
"params",
type=str,
multiple=True,
help="Parameters override in form of `key=value`",
)
@click.pass_context
def run_once(
ctx, image: str, pipeline: str, experiment_namespace: str, params: list
):
"""Deploy pipeline as a single run within given experiment.
Config can be specified in kubeflow.yml as well."""
context_helper = ctx.obj["context_helper"]
config = context_helper.config.run_config
context_helper.kfp_client.run_once(
pipeline=pipeline,
image=image if image else config.image,
experiment_name=config.experiment_name,
experiment_namespace=experiment_namespace,
run_name=config.run_name,
wait=config.wait_for_completion,
image_pull_policy=config.image_pull_policy,
parameters=format_params(params),
)
@kubeflow_group.command()
@click.pass_context
def ui(ctx) -> None:
"""Open Kubeflow Pipelines UI in new browser tab"""
host = ctx.obj["context_helper"].config.host
webbrowser.open_new_tab(host)
@kubeflow_group.command()
@click.option(
"-i",
"--image",
type=str,
help="Docker image to use for pipeline execution.",
)
@click.option(
"-p",
"--pipeline",
"pipeline",
type=str,
help="Name of pipeline to run",
default="__default__",
)
@click.option(
"-o",
"--output",
type=str,
default="pipeline.yml",
help="Pipeline YAML definition file.",
)
@click.pass_context
def compile(ctx, image, pipeline, output) -> None:
"""Translates Kedro pipeline into YAML file with Kubeflow Pipeline definition"""
context_helper = ctx.obj["context_helper"]
config = context_helper.config.run_config
context_helper.kfp_client.compile(
pipeline=pipeline,
image_pull_policy=config.image_pull_policy,
image=image if image else config.image,
output=output,
)
@kubeflow_group.command()
@click.option(
"-i",
"--image",
type=str,
help="Docker image to use for pipeline execution.",
)
@click.option(
"-p",
"--pipeline",
"pipeline",
type=str,
help="Name of pipeline to upload",
default="__default__",
)
@click.pass_context
def upload_pipeline(ctx, image, pipeline) -> None:
"""Uploads pipeline to Kubeflow server"""
context_helper = ctx.obj["context_helper"]
config = context_helper.config.run_config
context_helper.kfp_client.upload(
pipeline_name=pipeline,
image=image if image else config.image,
image_pull_policy=config.image_pull_policy,
)
@kubeflow_group.command()
@click.option(
"-p",
"--pipeline",
"pipeline",
type=str,
help="Name of pipeline to run",
default="__default__",
)
@click.option(
"-c",
"--cron-expression",
type=str,
help="Cron expression for recurring run",
required=True,
)
@click.option(
"-x",
"--experiment-name",
"experiment_name",
type=str,
help="Name of experiment associated with this run.",
)
@click.option(
"-en",
"--experiment-namespace",
"experiment_namespace",
type=str,
default=None,
help="Namespace where pipeline experiment run should be deployed to. Not needed "
"if provided experiment name already exists.",
)
@click.option(
"--param",
"params",
type=str,
multiple=True,
help="Parameters override in form of `key=value`",
)
@click.pass_context
def schedule(
ctx,
pipeline: str,
experiment_namespace: str,
experiment_name: str,
cron_expression: str,
params: list,
):
"""Schedules recurring execution of latest version of the pipeline"""
context_helper = ctx.obj["context_helper"]
config = context_helper.config.run_config
experiment = experiment_name if experiment_name else config.experiment_name
context_helper.kfp_client.schedule(
pipeline,
experiment,
experiment_namespace,
cron_expression,
run_name=config.scheduled_run_name,
parameters=format_params(params),
)
@kubeflow_group.command()
@click.argument("kfp_url", type=str)
@click.option("--with-github-actions", is_flag=True, default=False)
@click.pass_context
def init(ctx, kfp_url: str, with_github_actions: bool):
"""Initializes configuration for the plugin"""
context_helper = ctx.obj["context_helper"]
project_name = context_helper.context.project_path.name
if with_github_actions:
image = f"gcr.io/${{google_project_id}}/{project_name}:${{commit_id}}"
run_name = f"{project_name}:${{commit_id}}"
else:
image = project_name
run_name = project_name
sample_config = PluginConfig.sample_config(
url=kfp_url, image=image, project=project_name, run_name=run_name
)
config_path = Path.cwd().joinpath("conf/base/kubeflow.yaml")
with open(config_path, "w") as f:
f.write(sample_config)
click.echo(f"Configuration generated in {config_path}")
if with_github_actions:
PluginConfig.initialize_github_actions(
project_name,
where=Path.cwd(),
templates_dir=Path(__file__).parent / "templates",
)
@kubeflow_group.command(hidden=True)
@click.argument("kubeflow_run_id", type=str)
@click.option(
"--output",
type=str,
default="/tmp/mlflow_run_id",
)
@click.pass_context
def mlflow_start(ctx, kubeflow_run_id: str, output: str):
import mlflow
from kedro_mlflow.framework.context import get_mlflow_config
token = AuthHandler().obtain_id_token()
if token:
os.environ["MLFLOW_TRACKING_TOKEN"] = token
LOG.info("Configuring MLFLOW_TRACKING_TOKEN")
try:
kedro_context = ctx.obj["context_helper"].context
mlflow_conf = get_mlflow_config(kedro_context)
mlflow_conf.setup(kedro_context)
except AttributeError:
kedro_session = ctx.obj["context_helper"].session
with kedro_session:
mlflow_conf = get_mlflow_config(kedro_session)
mlflow_conf.setup()
run = mlflow.start_run(
experiment_id=mlflow_conf.experiment.experiment_id, nested=False
)
mlflow.set_tag("kubeflow_run_id", kubeflow_run_id)
with open(output, "w") as f:
f.write(run.info.run_id)
click.echo(f"Started run: {run.info.run_id}")
@kubeflow_group.command(hidden=True)
@click.argument("pvc_name", type=str)
def delete_pipeline_volume(pvc_name: str):
import kubernetes.client
import kubernetes.config
kubernetes.config.load_incluster_config()
current_namespace = open(
"/var/run/secrets/kubernetes.io/serviceaccount/namespace"
).read()
kubernetes.client.CoreV1Api().delete_namespaced_persistent_volume_claim(
pvc_name,
current_namespace,
)
click.echo(f"Volume removed: {pvc_name}")
| 26.119632
| 85
| 0.677393
|
6ca3e15e30f2b5c08cf6960512548aceba059723
| 9,555
|
py
|
Python
|
masakari/context.py
|
iorchard/masakari
|
4f1705d9940652719d156407b743866ce78e9a53
|
[
"Apache-2.0"
] | null | null | null |
masakari/context.py
|
iorchard/masakari
|
4f1705d9940652719d156407b743866ce78e9a53
|
[
"Apache-2.0"
] | null | null | null |
masakari/context.py
|
iorchard/masakari
|
4f1705d9940652719d156407b743866ce78e9a53
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2016 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
RequestContext: context for requests that persist through all of masakari.
"""
import copy
from keystoneauth1.access import service_catalog as ksa_service_catalog
from keystoneauth1 import plugin
from oslo_context import context
from oslo_db.sqlalchemy import enginefacade
from oslo_log import log as logging
from oslo_utils import timeutils
from masakari import exception
from masakari.i18n import _
from masakari import policy
from masakari import utils
LOG = logging.getLogger(__name__)
class _ContextAuthPlugin(plugin.BaseAuthPlugin):
"""A keystoneauth auth plugin that uses the values from the Context.
Ideally we would use the plugin provided by auth_token middleware however
this plugin isn't serialized yet so we construct one from the serialized
auth data.
"""
def __init__(self, auth_token, sc):
super(_ContextAuthPlugin, self).__init__()
self.auth_token = auth_token
self.service_catalog = ksa_service_catalog.ServiceCatalogV2(sc)
def get_token(self, *args, **kwargs):
return self.auth_token
def get_endpoint(self, session, service_type=None, interface=None,
region_name=None, service_name=None, **kwargs):
return self.service_catalog.url_for(service_type=service_type,
service_name=service_name,
interface=interface,
region_name=region_name)
@enginefacade.transaction_context_provider
class RequestContext(context.RequestContext):
"""Security context and request information.
Represents the user taking a given action within the system.
"""
def __init__(self, user_id=None, project_id=None,
is_admin=None, read_deleted="no",
roles=None, remote_address=None, timestamp=None,
request_id=None, auth_token=None, overwrite=True,
user_name=None, project_name=None, service_catalog=None,
user_auth_plugin=None, **kwargs):
""":param read_deleted: 'no' indicates deleted records are hidden,
'yes' indicates deleted records are visible,
'only' indicates that *only* deleted records are visible.
:param overwrite: Set to False to ensure that the greenthread local
copy of the index is not overwritten.
:param user_auth_plugin: The auth plugin for the current request's
authentication data.
:param kwargs: Extra arguments that might be present, but we ignore
because they possibly came in from older rpc messages.
"""
user = kwargs.pop('user', None)
tenant = kwargs.pop('tenant', None)
super(RequestContext, self).__init__(
auth_token=auth_token,
user=user_id or user,
tenant=project_id or tenant,
domain=kwargs.pop('domain', None),
user_domain=kwargs.pop('user_domain', None),
project_domain=kwargs.pop('project_domain', None),
is_admin=is_admin,
read_only=kwargs.pop('read_only', False),
show_deleted=kwargs.pop('show_deleted', False),
request_id=request_id,
resource_uuid=kwargs.pop('resource_uuid', None),
overwrite=overwrite,
roles=roles,
is_admin_project=kwargs.pop('is_admin_project', True),
global_request_id=kwargs.pop('global_request_id', None))
# oslo_context's RequestContext.to_dict() generates this field, we can
# safely ignore this as we don't use it.
kwargs.pop('user_identity', None)
if kwargs:
LOG.debug('Arguments dropped when creating context: %s',
str(kwargs))
# FIXME: user_id and project_id duplicate information that is
# already present in the oslo_context's RequestContext. We need to
# get rid of them.
self.user_id = user_id
self.project_id = project_id
self.read_deleted = read_deleted
self.remote_address = remote_address
if not timestamp:
timestamp = timeutils.utcnow()
if isinstance(timestamp, str):
timestamp = timeutils.parse_strtime(timestamp)
self.timestamp = timestamp
if service_catalog:
# Only include required parts of service_catalog
self.service_catalog = [
s for s in service_catalog if s.get('type') in (
'compute', 'identity')]
else:
# if list is empty or none
self.service_catalog = []
self.user_name = user_name
self.project_name = project_name
self.is_admin = is_admin
self.user_auth_plugin = user_auth_plugin
if self.is_admin is None:
self.is_admin = policy.check_is_admin(self)
def get_auth_plugin(self):
if self.user_auth_plugin:
return self.user_auth_plugin
else:
return _ContextAuthPlugin(self.auth_token, self.service_catalog)
def _get_read_deleted(self):
return self._read_deleted
def _set_read_deleted(self, read_deleted):
if read_deleted not in ('no', 'yes', 'only'):
raise ValueError(_("read_deleted can only be one of 'no', "
"'yes' or 'only', not %r") % read_deleted)
self._read_deleted = read_deleted
def _del_read_deleted(self):
del self._read_deleted
read_deleted = property(_get_read_deleted, _set_read_deleted,
_del_read_deleted)
def to_dict(self):
values = super(RequestContext, self).to_dict()
# FIXME: defensive hasattr() checks need to be
# removed once we figure out why we are seeing stack
# traces
values.update({
'user_id': getattr(self, 'user_id', None),
'project_id': getattr(self, 'project_id', None),
'is_admin': getattr(self, 'is_admin', None),
'read_deleted': getattr(self, 'read_deleted', 'no'),
'remote_address': getattr(self, 'remote_address', None),
'timestamp': utils.strtime(self.timestamp) if hasattr(
self, 'timestamp') else None,
'request_id': getattr(self, 'request_id', None),
'user_name': getattr(self, 'user_name', None),
'service_catalog': getattr(self, 'service_catalog', None),
'project_name': getattr(self, 'project_name', None)
})
return values
@classmethod
def from_dict(cls, values):
return cls(**values)
def elevated(self, read_deleted=None):
"""Return a version of this context with admin flag set."""
context = copy.copy(self)
# context.roles must be deepcopied to leave original roles
# without changes
context.roles = copy.deepcopy(self.roles)
context.is_admin = True
if 'admin' not in context.roles:
context.roles.append('admin')
if read_deleted is not None:
context.read_deleted = read_deleted
return context
def can(self, action, target=None, fatal=True):
"""Verifies that the given action is valid on the target in this context.
:param action: string representing the action to be checked.
:param target: dictionary representing the object of the action
for object creation this should be a dictionary representing the
location of the object e.g. ``{'project_id': context.project_id}``.
If None, then this default target will be considered:
{'project_id': self.project_id, 'user_id': self.user_id}
:param fatal: if False, will return False when an exception.Forbidden
occurs.
:raises masakari.exception.Forbidden: if verification fails and fatal
is True.
:return: returns a non-False value (not necessarily "True") if
authorized and False if not authorized and fatal is False.
"""
if target is None:
target = {'project_id': self.project_id,
'user_id': self.user_id}
try:
return policy.authorize(self, action, target)
except exception.Forbidden:
if fatal:
raise
return False
def to_policy_values(self):
policy = super(RequestContext, self).to_policy_values()
policy['is_admin'] = self.is_admin
return policy
def __str__(self):
return "<Context %s>" % self.to_dict()
def get_admin_context(read_deleted="no"):
return RequestContext(user_id=None,
project_id=None,
is_admin=True,
read_deleted=read_deleted,
overwrite=False)
| 38.528226
| 81
| 0.630246
|
a4961d759548c1103b085f3b1334474386e86f43
| 356
|
py
|
Python
|
module_use.py
|
vigneshbharathi68/Chadura-Assessment
|
db3789d945506316c420497fe13a3068ff61d974
|
[
"OML",
"Linux-OpenIB"
] | 1
|
2020-08-25T01:57:17.000Z
|
2020-08-25T01:57:17.000Z
|
module_use.py
|
vigneshbharathi68/Chadura-Assessment
|
db3789d945506316c420497fe13a3068ff61d974
|
[
"OML",
"Linux-OpenIB"
] | 1
|
2020-08-29T18:37:08.000Z
|
2020-08-29T18:37:08.000Z
|
module_use.py
|
vigneshbharathi68/Chadura-Assessment
|
db3789d945506316c420497fe13a3068ff61d974
|
[
"OML",
"Linux-OpenIB"
] | null | null | null |
import Calc
import Dict
import Greet
#Import the module Calc where I definied funtions, and accessed here
a = 1
b = 2
c = Calc.add(a, b)
print(c)
#Import the Module named Dict, and access the person1 Dictionary
d = Dict.person1["country"]
print(d)
#import greeting function and call the greeting function
Greet.greeting("Vignesh")
| 17.8
| 69
| 0.710674
|
bf592bf264ed6268df86f7ad0123bd6f12dd98b5
| 12,712
|
py
|
Python
|
server/src/utils.py
|
mikepfrank/COSMICi
|
e91ec9ed30ecff7e80501b3743766c31e1a99285
|
[
"MIT"
] | null | null | null |
server/src/utils.py
|
mikepfrank/COSMICi
|
e91ec9ed30ecff7e80501b3743766c31e1a99285
|
[
"MIT"
] | null | null | null |
server/src/utils.py
|
mikepfrank/COSMICi
|
e91ec9ed30ecff7e80501b3743766c31e1a99285
|
[
"MIT"
] | null | null | null |
#|*****************************************************************************
#| TOP OF FILE
#|*****************************************************************************
#|
#| FILE NAME: utils.py [python module source code]
#|
#| Miscellaneous utilities. These include the
#| following network-related utility functions:
#|
#| get_hostname() - Get first (pre-".") component of current
#| machine's hostname.
#|
#| get_my_ip() - Get the IP address (in standard format) of
#| the current machine.
#|
#| as well as the following class/object related functions/classes:
#|
#| bind() - Bind a class method to a specific instance.
#|
#| become() - Reassign the given object to a new class.
#|
#| MutableClass - Base class for objects that can change class.
#|
#| and the string function:
#|
#| unsplit() - Concatenate a list of strings with a given
#| delimiter in between.
#|
#| and another useful class:
#|
#| WatchBox - A watchable storage location.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
# Imports from standard python modules.
import threading # RLock
from socket import gethostname, gethostbyname
# these are used in get_hostname(), get_my_ip()
import flag # Flag
# Public names we export from this module to other modules that do
# "from utils import *"
__all__ = ['get_hostname', 'get_my_ip', # Networking functions.
'bind', 'become', 'MutableClass', # Class manipulation.
'unsplit' ] # String manipulation.
#|=====================================================================
#|
#| get_hostname() [module public function]
#|
#| Get the name of the host (computer) this server is
#| running on - the part before the first dot (if any), only.
#|
#| So far, it has been tested under Windows Vista as well as
#| Mac OS X (Darwin).
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_hostname():
full_hostname = gethostname()
first_part = full_hostname.partition('.')[0]
return first_part
#|======================================================================
#|
#| get_my_ip() [module public function]
#|
#| Gets the IP address of the default interface of the
#| host (computer) this server application is running on.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def get_my_ip():
full_hostname = gethostname()
my_ip = gethostbyname(full_hostname)
return my_ip
#|======================================================================
#|
#| bind() [public function]
#|
#| Given an object instance and a class method (or any
#| function), binds the instance as the first argument
#| of the method (the one whose formal name is usually
#| something like "self", "this", or "inst"), and returns
#| the newly-created lambda, which can then serve as an
#| instance method. For an example of usage, see the
#| HireCurThread() function in worklist.py.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def bind(inst:object, classMethod):
return lambda *args, **kwargs: classMethod(inst, *args, **kwargs)
#|======================================================================
#|
#| become() [universal object method]
#|
#| This new custom special method can be installed as a
#| method that applies to an object of any user-defined
#| or otherwise-mutable class. It changes the object's
#| class to the new class given. The new class
#| <newClass> must supply a special method
#| .__convertFrom(oldClass) which mutates instances of
#| the origin class <oldClass> into instances of the new
#| class <newClass> by applying any necessary additional
#| initialization. Any extra arguments are passed thru
#| to the .__convertFrom() method.
#|
#| Example usage:
#|
#| class A:
#| def __init__(this):
#| this.become = bind(this, become)
#|
#| class B:
#| def __convertFrom(this, oldClass:type): pass
#|
#| obj = A()
#| obj.become(B)
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
def become(obj:object, newClass:type, *args, **kwargs):
oldClass = obj.__class__
#print("About to switch object %s from class %s to class %s...\n" % (str(obj), str(oldClass), str(newClass)))
obj.__class__ = newClass
if hasattr(obj, '_convertFrom'):
# print("About to _convertFrom %s to %s...\n" % (str(oldClass), str(newClass)))
obj._convertFrom(oldClass, *args, **kwargs)
else:
print("WARNING: utils.become(): Destination class has no .__convertFrom() method.")
# if '__convertFrom' in newClass.__dict__:
# newClass.__convertFrom(obj, oldClass, *args, **kwargs)
#|=======================================================================
#|
#| MutableClass [module public class]
#|
#| An instance of class MutableClass can be
#| changed to be a direct instance of any
#| other subclass of class MutableClass using
#| the .become() method.
#|
#| Subclasses should override the _convertFrom()
#| method as necessary to meet their specific
#| conversion needs.
#|
#| Example usage:
#|
#| class A(MutableClass): pass
#| class B(MutableClass): pass
#|
#| obj = A() # Create new object of class A.
#| obj.become(B) # Object is now in class B.
#| obj.become(A) # Object is now in class A again.
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
class MutableClass:
def become(this, newClass:type, *args, **kwargs):
become(this, newClass, *args, **kwargs) # call function
# def __init__(this):
# this.become = bind(this, become)
# Convert this object from its previous class <oldClass>
# to the new class. This placeholder method should be
# overridden by subclasses of MutableClass as necessary.
def _convertFrom(this, oldClass:type): pass
# Un-split a list of strings using a given delimiter character.
# The list must be non-empty.
def unsplit(strs, delim:str):
result = strs[0]
for s in strs[1:]:
result = "%s%s%s" % (result, delim, s) # There must be an easier way to concatenate strings!
return result
#|======================================================================
#|
#| utils.WatchBox [module public class]
#|
#| An object of class WatchBox is simply a place to
#| store things, with an associated flag that is raised
#| whenever the stored thing is replaced. Threads can
#| then wait for the flag to be touched to be informed
#| when the stored thing has been updated. All public
#| properties and methods are thread-safe.
#|
#| Notes:
#|
#| Should this go into a module by itself?
#|
#| Private instance data members (user code should not access
#| these directly):
#|
#| ._lock:threading.RLock -
#|
#| Reentrant mutex lock guarding access to
#| the WatchBox structure for consistency.
#|
#| ._contents:object -
#|
#| The object contained in the box. A
#| WatchBox can only contain one object
#| at a time.
#|
#| ._updated:flag.Flag -
#|
#| A flag that is raised (if not already
#| raised) or waved (if already raised)
#| whenever the box's contents are updated.
#|
#|
#| Special methods:
#|
#| .__init__(initialContents, lock)
#|
#| New instance initializer. The box's initial
#| contents and the RLock to use may be optionally
#| specified. If not specified, the initial contents
#| are None and a new RLock is created.
#|
#| .__call__()
#|
#| Call method. When the box is called, it simply
#| returns its current contents.
#|
#| Public properties:
#|
#| .contents -
#|
#| The present contents of the WatchBox. That is,
#| if box is a WatchBox, then box.contents evaluates
#| to the box's present contents, which you can
#| modify using "box.contents = <newValue>".
#| Waiters are alerted on modification.
#|
#| Public methods:
#|
#| .hold(newContents) -
#|
#| Modifies the box's contents, causing it to hold
#| the given <newContents>. Returns the previous
#| contents.
#|
#| .wait(timeout) -
#|
#| Waits for an optional timeout (which defaults to
#| None, meaning wait indefinitely) for the box's
#| contents to be updated. When updated, returns
#| the new contents.
#|
#| Example usage:
#|
#| In thread 1:
#|
#| box = WatchBox(1) # create box w. initial contents
#| print(box()) # prints 1
#| box.contents = 2 # use property setter
#|
#| In thread 2:
#|
#| newVal = box.wait() # wait for contents to be updated
#| print(newVal) # prints 2
#|
#|vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
class WatchBox:
"""A watchable storage location. To check the state of
a WatchBox <box>, just call it, i.e., "box()".
box.contents is a property that may be used to
retrieve or modify the contents of the box. You
can wait for the box's contents to be updated using
the box.wait() method."""
def __init__(this, initialContents:object=None, lock=None):
"""Instance initializer for objects of class WatchBox."""
if lock == None: lock=threading.RLock()
this._lock = lock
this._contents = initialContents
this._updated = flag.Flag(lock=this._lock, initiallyUp=False)
def __call__(this):
"""Calling a box just returns its current contents."""
return this._contents
def hold(this, newContents):
"""Causes the box to hold new contents. Alerts any waiters.
Returns the previous contents of the box."""
with this._lock:
oldContents = this._contents
this._contents = newContents
this._updated.rise() # Raise or wave the 'updated' flag.
return oldContents
def wait(this, timeout=None):
"""Waits for the contents of the box to be updated.
Takes an optional timeout argument. Returns the
new contents of the box."""
with this._lock:
this._updated.waitTouch(timeout) # Wait for 'updated' flag to be touched.
return this()
@property
def contents(this):
"""This is the box.contents property getter."""
return this()
@contents.setter
def contents(this, newContents):
"""This is the setter method for the box.contents property."""
this.hold(newContents)
#|^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#| END FILE utils.py.
#|=============================================================================
| 38.638298
| 113
| 0.52006
|
125602e2106c630ceae7420313338b531750a62c
| 193
|
py
|
Python
|
profilelambda/__init__.py
|
hvnsweeting/profilelambda
|
ee34a75eb5e7fe4c38269317939901c87f9afcfb
|
[
"MIT"
] | null | null | null |
profilelambda/__init__.py
|
hvnsweeting/profilelambda
|
ee34a75eb5e7fe4c38269317939901c87f9afcfb
|
[
"MIT"
] | null | null | null |
profilelambda/__init__.py
|
hvnsweeting/profilelambda
|
ee34a75eb5e7fe4c38269317939901c87f9afcfb
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""Top-level package for profilelambda."""
__author__ = """Viet Hung Nguyen"""
__email__ = 'hvn@familug.org'
__version__ = '0.4.0'
from .profilelambda import profile
| 19.3
| 42
| 0.678756
|
409e3eb1f448ee73bc9f39be26b36aa23a7d8a39
| 7,842
|
py
|
Python
|
tests/unit/cache/origin/test_fastly.py
|
ewdurbin/warehouse
|
6a23d2732c3a6a948cb10281afb42650af0b2394
|
[
"Apache-2.0"
] | 4
|
2018-03-29T10:42:56.000Z
|
2021-11-17T10:21:43.000Z
|
tests/unit/cache/origin/test_fastly.py
|
ewdurbin/warehouse
|
6a23d2732c3a6a948cb10281afb42650af0b2394
|
[
"Apache-2.0"
] | 258
|
2021-11-29T18:29:38.000Z
|
2022-03-31T18:34:18.000Z
|
tests/unit/cache/origin/test_fastly.py
|
alanbato/warehouse
|
4774aee21f41fe1ffe0e2bf22440666494acbcad
|
[
"Apache-2.0"
] | 1
|
2020-12-01T21:12:24.000Z
|
2020-12-01T21:12:24.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import celery.exceptions
import pretend
import pytest
import requests
from zope.interface.verify import verifyClass
from warehouse.cache.origin import fastly
from warehouse.cache.origin.interfaces import IOriginCache
class TestPurgeKey:
def test_purges_successfully(self, monkeypatch):
task = pretend.stub()
cacher = pretend.stub(purge_key=pretend.call_recorder(lambda k: None))
request = pretend.stub(
find_service=pretend.call_recorder(lambda iface: cacher),
log=pretend.stub(info=pretend.call_recorder(lambda *args, **kwargs: None)),
)
fastly.purge_key(task, request, "foo")
assert request.find_service.calls == [pretend.call(IOriginCache)]
assert cacher.purge_key.calls == [pretend.call("foo")]
assert request.log.info.calls == [pretend.call("Purging %s", "foo")]
@pytest.mark.parametrize(
"exception_type",
[
requests.ConnectionError,
requests.HTTPError,
requests.Timeout,
fastly.UnsuccessfulPurge,
],
)
def test_purges_fails(self, monkeypatch, exception_type):
exc = exception_type()
class Cacher:
@staticmethod
@pretend.call_recorder
def purge_key(key):
raise exc
class Task:
@staticmethod
@pretend.call_recorder
def retry(exc):
raise celery.exceptions.Retry
task = Task()
cacher = Cacher()
request = pretend.stub(
find_service=pretend.call_recorder(lambda iface: cacher),
log=pretend.stub(
info=pretend.call_recorder(lambda *args, **kwargs: None),
error=pretend.call_recorder(lambda *args, **kwargs: None),
),
)
with pytest.raises(celery.exceptions.Retry):
fastly.purge_key(task, request, "foo")
assert request.find_service.calls == [pretend.call(IOriginCache)]
assert cacher.purge_key.calls == [pretend.call("foo")]
assert task.retry.calls == [pretend.call(exc=exc)]
assert request.log.info.calls == [pretend.call("Purging %s", "foo")]
assert request.log.error.calls == [
pretend.call("Error purging %s: %s", "foo", str(exception_type()))
]
class TestFastlyCache:
def test_verify_service(self):
assert verifyClass(IOriginCache, fastly.FastlyCache)
def test_create_service(self):
purge_key = pretend.stub(delay=pretend.stub())
request = pretend.stub(
registry=pretend.stub(
settings={
"origin_cache.api_key": "the api key",
"origin_cache.service_id": "the service id",
}
),
task=lambda f: purge_key,
)
cacher = fastly.FastlyCache.create_service(None, request)
assert isinstance(cacher, fastly.FastlyCache)
assert cacher.api_key == "the api key"
assert cacher.service_id == "the service id"
assert cacher._purger is purge_key.delay
def test_adds_surrogate_key(self):
request = pretend.stub()
response = pretend.stub(headers={})
cacher = fastly.FastlyCache(api_key=None, service_id=None, purger=None)
cacher.cache(["abc", "defg"], request, response)
assert response.headers == {"Surrogate-Key": "abc defg"}
def test_adds_surrogate_control(self):
request = pretend.stub()
response = pretend.stub(headers={})
cacher = fastly.FastlyCache(api_key=None, service_id=None, purger=None)
cacher.cache(
["abc", "defg"],
request,
response,
seconds=9123,
stale_while_revalidate=4567,
stale_if_error=2276,
)
assert response.headers == {
"Surrogate-Key": "abc defg",
"Surrogate-Control": (
"max-age=9123, stale-while-revalidate=4567, stale-if-error=2276"
),
}
def test_multiple_calls_to_cache_dont_overwrite_surrogate_keys(self):
request = pretend.stub()
response = pretend.stub(headers={})
cacher = fastly.FastlyCache(api_key=None, service_id=None, purger=None)
cacher.cache(["abc"], request, response)
cacher.cache(["defg"], request, response)
assert response.headers == {"Surrogate-Key": "abc defg"}
def test_multiple_calls_with_different_requests(self):
request_a = pretend.stub()
request_b = pretend.stub()
response_a = pretend.stub(headers={})
response_b = pretend.stub(headers={})
cacher = fastly.FastlyCache(api_key=None, service_id=None, purger=None)
cacher.cache(["abc"], request_a, response_a)
cacher.cache(["defg"], request_b, response_b)
assert response_a.headers == {"Surrogate-Key": "abc"}
assert response_b.headers == {"Surrogate-Key": "defg"}
def test_purge(self, monkeypatch):
purge_delay = pretend.call_recorder(lambda *a, **kw: None)
cacher = fastly.FastlyCache(
api_key="an api key", service_id="the-service-id", purger=purge_delay
)
cacher.purge(["one", "two"])
assert purge_delay.calls == [pretend.call("one"), pretend.call("two")]
def test_purge_key_ok(self, monkeypatch):
cacher = fastly.FastlyCache(
api_key="an api key", service_id="the-service-id", purger=None
)
response = pretend.stub(
raise_for_status=pretend.call_recorder(lambda: None),
json=lambda: {"status": "ok"},
)
requests_post = pretend.call_recorder(lambda *a, **kw: response)
monkeypatch.setattr(requests, "post", requests_post)
cacher.purge_key("one")
assert requests_post.calls == [
pretend.call(
"https://api.fastly.com/service/the-service-id/purge/one",
headers={
"Accept": "application/json",
"Fastly-Key": "an api key",
"Fastly-Soft-Purge": "1",
},
)
]
assert response.raise_for_status.calls == [pretend.call()]
@pytest.mark.parametrize("result", [{"status": "fail"}, {}])
def test_purge_key_unsuccessful(self, monkeypatch, result):
cacher = fastly.FastlyCache(
api_key="an api key", service_id="the-service-id", purger=None
)
response = pretend.stub(
raise_for_status=pretend.call_recorder(lambda: None), json=lambda: result
)
requests_post = pretend.call_recorder(lambda *a, **kw: response)
monkeypatch.setattr(requests, "post", requests_post)
with pytest.raises(fastly.UnsuccessfulPurge):
cacher.purge_key("one")
assert requests_post.calls == [
pretend.call(
"https://api.fastly.com/service/the-service-id/purge/one",
headers={
"Accept": "application/json",
"Fastly-Key": "an api key",
"Fastly-Soft-Purge": "1",
},
)
]
assert response.raise_for_status.calls == [pretend.call()]
| 35.484163
| 87
| 0.60533
|
2ae3eb05103bddceaf967150efec5bcb54ffa38a
| 1,530
|
py
|
Python
|
ansiblelater/rules/CheckNativeYaml.py
|
robertdebock/ansible-later
|
427356cf617f097fce66dd32b2a227a6d6bbf59c
|
[
"MIT"
] | null | null | null |
ansiblelater/rules/CheckNativeYaml.py
|
robertdebock/ansible-later
|
427356cf617f097fce66dd32b2a227a6d6bbf59c
|
[
"MIT"
] | null | null | null |
ansiblelater/rules/CheckNativeYaml.py
|
robertdebock/ansible-later
|
427356cf617f097fce66dd32b2a227a6d6bbf59c
|
[
"MIT"
] | null | null | null |
from ansiblelater.standard import StandardBase
class CheckNativeYaml(StandardBase):
sid = "LINT0008"
description = "Use YAML format for tasks and handlers rather than key=value"
helptext = "task arguments appear to be in key value rather than YAML format"
version = "0.1"
types = ["playbook", "task", "handler"]
def check(self, candidate, settings):
tasks, errors = self.get_action_tasks(candidate, settings)
if not errors:
for task in tasks:
normal_form, error = self.get_normalized_task(task, candidate, settings)
if error:
errors.extend(error)
break
action = normal_form["action"]["__ansible_module__"]
arguments = [
bytes(x, "utf-8").decode("utf8", "ignore")
for x in normal_form["action"]["__ansible_arguments__"]
]
# Cope with `set_fact` where task["set_fact"] is None
if not task.get(action):
continue
if isinstance(task[action], dict):
continue
# strip additional newlines off task[action]
task_action = bytes(task[action].strip(), "utf-8").decode("utf8", "ignore")
if list(filter(lambda a: a != "\\", task_action.split())) != arguments:
errors.append(self.Error(task["__line__"], self.helptext))
return self.Result(candidate.path, errors)
| 41.351351
| 91
| 0.563399
|
09a195c343e9805bed9660f6cc8ae53a2e9308f8
| 3,833
|
py
|
Python
|
tests/test_importers.py
|
nopria/cadquery
|
f38f9cb2523dd8a10adf04c767570298134440eb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_importers.py
|
nopria/cadquery
|
f38f9cb2523dd8a10adf04c767570298134440eb
|
[
"Apache-2.0"
] | null | null | null |
tests/test_importers.py
|
nopria/cadquery
|
f38f9cb2523dd8a10adf04c767570298134440eb
|
[
"Apache-2.0"
] | null | null | null |
"""
Tests file importers such as STEP
"""
# core modules
import tempfile
import os
from cadquery import importers, Workplane
from tests import BaseTest
# where unit test output will be saved
OUTDIR = tempfile.gettempdir()
# test data directory
testdataDir = os.path.join(os.path.dirname(__file__), "testdata")
class TestImporters(BaseTest):
def importBox(self, importType, fileName):
"""
Exports a simple box to a STEP file and then imports it again
:param importType: The type of file we're importing (STEP, STL, etc)
:param fileName: The path and name of the file to write to
"""
# We're importing a STEP file
if importType == importers.ImportTypes.STEP:
# We first need to build a simple shape to export
shape = Workplane("XY").box(1, 2, 3).val()
# Export the shape to a temporary file
shape.exportStep(fileName)
# Reimport the shape from the new STEP file
importedShape = importers.importShape(importType, fileName)
# Check to make sure we got a solid back
self.assertTrue(importedShape.val().ShapeType() == "Solid")
# Check the number of faces and vertices per face to make sure we have a box shape
self.assertTrue(
importedShape.faces("+X").size() == 1
and importedShape.faces("+X").vertices().size() == 4
)
self.assertTrue(
importedShape.faces("+Y").size() == 1
and importedShape.faces("+Y").vertices().size() == 4
)
self.assertTrue(
importedShape.faces("+Z").size() == 1
and importedShape.faces("+Z").vertices().size() == 4
)
def testSTEP(self):
"""
Tests STEP file import
"""
self.importBox(importers.ImportTypes.STEP, OUTDIR + "/tempSTEP.step")
def testInvalidSTEP(self):
"""
Attempting to load an invalid STEP file should throw an exception, but
not segfault.
"""
tmpfile = OUTDIR + "/badSTEP.step"
with open(tmpfile, "w") as f:
f.write("invalid STEP file")
with self.assertRaises(ValueError):
importers.importShape(importers.ImportTypes.STEP, tmpfile)
def testImportMultipartSTEP(self):
"""
Import a STEP file that contains two objects and ensure that both are
loaded.
"""
filename = os.path.join(testdataDir, "red_cube_blue_cylinder.step")
objs = importers.importShape(importers.ImportTypes.STEP, filename)
self.assertEqual(2, len(objs.all()))
def testImportDXF(self):
"""
Test DXF import with various tolerances.
"""
filename = os.path.join(testdataDir, "gear.dxf")
obj = importers.importDXF(filename)
self.assertFalse(obj.val().isValid())
obj = importers.importDXF(filename, tol=1e-3)
self.assertTrue(obj.val().isValid())
self.assertEqual(obj.faces().size(), 1)
self.assertEqual(obj.wires().size(), 2)
obj = obj.wires().toPending().extrude(1)
self.assertTrue(obj.val().isValid())
self.assertEqual(obj.solids().size(), 1)
obj = importers.importShape(importers.ImportTypes.DXF, filename, tol=1e-3)
self.assertTrue(obj.val().isValid())
# additional files to test more DXF entities
filename = os.path.join(testdataDir, "MC 12x31.dxf")
obj = importers.importDXF(filename)
self.assertTrue(obj.val().isValid())
filename = os.path.join(testdataDir, "1001.dxf")
obj = importers.importDXF(filename)
self.assertTrue(obj.val().isValid())
if __name__ == "__main__":
import unittest
unittest.main()
| 32.760684
| 94
| 0.60527
|
20b9933fa4e26fdfb43cc4a7c921a30e70b9b553
| 4,836
|
py
|
Python
|
linear_regression_on_Boston_housing_dataset.py
|
installbtien/linear_regression_on_Boston_housing_dataset
|
f20edbaa4bdaccb5023d83803712d844f45baaf5
|
[
"MIT"
] | null | null | null |
linear_regression_on_Boston_housing_dataset.py
|
installbtien/linear_regression_on_Boston_housing_dataset
|
f20edbaa4bdaccb5023d83803712d844f45baaf5
|
[
"MIT"
] | null | null | null |
linear_regression_on_Boston_housing_dataset.py
|
installbtien/linear_regression_on_Boston_housing_dataset
|
f20edbaa4bdaccb5023d83803712d844f45baaf5
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.datasets import load_boston
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
boston_dataset = load_boston()
#get the boston data from the sklearn.dataset and assign to the boston_dataset (Dict)
#in this dictionary include ['data', 'target', 'feature_names', 'DESCR', 'filename']
#data:contains the information for various houses
#target: prices of the house
#feature_names: names of the features
#DESCR: describes the dataset
#in boston_dataset.DESCR describe the variable in the boston_dataset.data but without the MEDV
# so we need to get the MEDV in the target and assign into the boston
boston=pd.DataFrame(boston_dataset.data,columns=boston_dataset.feature_names)
#append a index in boston to storage the MEDV from boston_dataset.target
boston['MEDV']=boston_dataset.target
#data visualize
sns.set(rc={'figure.figsize':(11.7,8.27)})
sns.distplot(boston['MEDV'], bins=30)
#hist and kde
plt.show()
#build a correlation matrix to measure the linear relationships between the variables in the data
correlation_matrix = boston.corr().round(2)
#get the correlation value between the boston and round to the second decimal place
sns.heatmap(data=correlation_matrix, annot=True)
#output by the heatmap
plt.show()
#correlation matrix:
# CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX PTRATIO B LSTAT MEDV
#CRIM 1.00 -0.20 0.41 -0.06 0.42 -0.22 0.35 -0.38 0.63 0.58 0.29 -0.39 0.46 -0.39
#ZN -0.20 1.00 -0.53 -0.04 -0.52 0.31 -0.57 0.66 -0.31 -0.31 -0.39 0.18 -0.41 0.36
#INDUS 0.41 -0.53 1.00 0.06 0.76 -0.39 0.64 -0.71 0.60 0.72 0.38 -0.36 0.60 -0.48
#CHAS -0.06 -0.04 0.06 1.00 0.09 0.09 0.09 -0.10 -0.01 -0.04 -0.12 0.05 -0.05 0.18
#NOX 0.42 -0.52 0.76 0.09 1.00 -0.30 0.73 -0.77 0.61 0.67 0.19 -0.38 0.59 -0.43
#RM -0.22 0.31 -0.39 0.09 -0.30 1.00 -0.24 0.21 -0.21 -0.29 -0.36 0.13 -0.61 0.70
#AGE 0.35 -0.57 0.64 0.09 0.73 -0.24 1.00 -0.75 0.46 0.51 0.26 -0.27 0.60 -0.38
#DIS -0.38 0.66 -0.71 -0.10 -0.77 0.21 -0.75 1.00 -0.49 -0.53 -0.23 0.29 -0.50 0.25
#RAD 0.63 -0.31 0.60 -0.01 0.61 -0.21 0.46 -0.49 1.00 0.91 0.46 -0.44 0.49 -0.38
#TAX 0.58 -0.31 0.72 -0.04 0.67 -0.29 0.51 -0.53 0.91 1.00 0.46 -0.44 0.54 -0.47
#PTRATIO 0.29 -0.39 0.38 -0.12 0.19 -0.36 0.26 -0.23 0.46 0.46 1.00 -0.18 0.37 -0.51
#B -0.39 0.18 -0.36 0.05 -0.38 0.13 -0.27 0.29 -0.44 -0.44 -0.18 1.00 -0.37 0.33
#LSTAT 0.46 -0.41 0.60 -0.05 0.59 -0.61 0.60 -0.50 0.49 0.54 0.37 -0.37 1.00 -0.74
#MEDV -0.39 0.36 -0.48 0.18 -0.43 0.70 -0.38 0.25 -0.38 -0.47 -0.51 0.33 -0.74 1.00
#by choosing the data which is correlated to the MEDV, we discover that RM and LSTAT has most positive and negative correlaton
#using scatter plot to visualize the correlation between MEDV and (RM or LSTAT)
plt.figure(figsize=(20, 5))
features = ['LSTAT', 'RM']
target = boston['MEDV']
for i, col in enumerate(features):
plt.subplot(1,2,i+1)
x = boston[col]
y = target
plt.scatter(x, y, marker='o')
plt.title(col)
plt.xlabel(col)
plt.ylabel('MEDV')
plt.show()
#prepare the data for training the model
X = pd.DataFrame(np.c_[boston['LSTAT'], boston['RM']], columns = ['LSTAT','RM'])
Y = boston['MEDV']
# Splitting the data into training and testing sets
#80% for training set and 20% for testing set
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size = 0.2, random_state=5)
#X_train:[404 rows x 2 columns]
#X_test:[102 rows x 2 columns]
#Y_train:Name: MEDV, Length: 404, dtype: float64
#Y_test:Name: MEDV, Length: 102, dtype: float64
#Training and testing the model
lin_model = LinearRegression()
lin_model.fit(X_train, Y_train)
# model evaluation for training set
y_train_predict = lin_model.predict(X_train)
rmse = (np.sqrt(mean_squared_error(Y_train, y_train_predict)))
r2 = r2_score(Y_train, y_train_predict)
print("The model performance for training set")
print("--------------------------------------")
print('RMSE is {}'.format(rmse))
print('R2 score is {}'.format(r2))
print("\n")
# model evaluation for testing set
y_test_predict = lin_model.predict(X_test)
rmse = (np.sqrt(mean_squared_error(Y_test, y_test_predict)))
r2 = r2_score(Y_test, y_test_predict)
print("The model performance for testing set")
print("--------------------------------------")
print('RMSE is {}'.format(rmse))
print('R2 score is {}'.format(r2))
| 45.622642
| 127
| 0.644127
|
35a84aaadb7267ca4cab3d15dd7b7881c246e487
| 3,305
|
py
|
Python
|
nbs/ReactiveX/rxtrio/trioscheduler.py
|
pramasoul/aix
|
98333b875f6c6cda6dee86e6eab02c5ddc622543
|
[
"MIT"
] | null | null | null |
nbs/ReactiveX/rxtrio/trioscheduler.py
|
pramasoul/aix
|
98333b875f6c6cda6dee86e6eab02c5ddc622543
|
[
"MIT"
] | 1
|
2021-11-29T03:44:00.000Z
|
2021-12-19T05:34:04.000Z
|
nbs/ReactiveX/rxtrio/trioscheduler.py
|
pramasoul/aix
|
98333b875f6c6cda6dee86e6eab02c5ddc622543
|
[
"MIT"
] | null | null | null |
### WIP ###
from typing import Any, Optional
from rx.core import typing
from rx.disposable import CompositeDisposable, Disposable, SingleAssignmentDisposable
#from ..periodicscheduler import PeriodicScheduler
from rx.scheduler.periodicscheduler import PeriodicScheduler
class TrioScheduler(PeriodicScheduler):
"""A scheduler that schedules work via the Trio main event loop.
"""
def __init__(self) -> None:
"""Create a new TrioScheduler.
"""
import trio
super().__init__()
def schedule(self,
action: typing.ScheduledAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules an action to be executed.
Args:
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
return self.schedule_relative(0.0, action, state)
def schedule_relative(self,
duetime: typing.RelativeTime,
action: typing.ScheduledAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules an action to be executed after duetime.
Args:
duetime: Relative time after which to execute the action.
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
sad = SingleAssignmentDisposable()
def invoke_action() -> None:
sad.disposable = self.invoke_action(action, state=state)
msecs = max(0, int(self.to_seconds(duetime) * 1000.0))
#timer = self._root.after(msecs, invoke_action)
### Hacking ***
print("yo")
async def run_after(msecs, invoke_action):
await trio.sleep(msecs/1000)
invoke_action()
async def get_it_going():
async with trio.open_nursery() as nursery:
nursery.start_soon(run_after)
# This assumes we are in Jupyter running under `%autoawait trio`
#await get_it_going()
def dispose() -> None:
#self._root.after_cancel(timer)
pass
return CompositeDisposable(sad, Disposable(dispose))
def schedule_absolute(self,
duetime: typing.AbsoluteTime,
action: typing.ScheduledAction,
state: Optional[typing.TState] = None
) -> typing.Disposable:
"""Schedules an action to be executed at duetime.
Args:
duetime: Absolute time at which to execute the action.
action: Action to be executed.
state: [Optional] state to be given to the action function.
Returns:
The disposable object used to cancel the scheduled action
(best effort).
"""
duetime = self.to_datetime(duetime)
return self.schedule_relative(duetime - self.now, action, state=state)
| 31.47619
| 85
| 0.587897
|
07df900a1d29c2580b912555cfefe680a8341509
| 814
|
py
|
Python
|
my_classes/.history/ModulesPackages_PackageNamespaces/example3b/importer_20210726184631.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/.history/ModulesPackages_PackageNamespaces/example3b/importer_20210726184631.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
my_classes/.history/ModulesPackages_PackageNamespaces/example3b/importer_20210726184631.py
|
minefarmer/deep-Dive-1
|
b0675b853180c5b5781888266ea63a3793b8d855
|
[
"Unlicense"
] | null | null | null |
import os.path
import types
import sys
def import_(module_name, module_file. module_path):
if module_name in sys.modules:
return sys.modules[module_name]
module_rel_file_path = os.path.join(module_path, module_file)
module_abs_file_path = os.path.abspath(module_rel_file_path)
# read source code from file
with open(module_rel_file_path, 'r') as code_file:
source_code = code_file.read()
# create a module object
mod = types.ModuleType(module_name)
mod.__file__ = module_abs_file_path
# set a ref in sys.modules
sys.modules[module_name] = mod
# compile source_code
# code = compile(source_code, filename=module_abs_file_path, mode='exec')
# execute compiled source code
exec(code, mod.__dict__)
return sys.modules[module_name]
| 24.666667
| 77
| 0.72113
|
3ca765fb4ada973c543f6347cdc4a0fbf4478a9f
| 3,991
|
py
|
Python
|
Lib/test/test_importlib/import_/test_caching.py
|
oleksandr-pavlyk/cpython
|
eb002dbe0da9622245a355db5f0cd5aa2fc70b40
|
[
"0BSD"
] | 5
|
2021-12-03T23:11:53.000Z
|
2022-01-08T21:02:50.000Z
|
Lib/test/test_importlib/import_/test_caching.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 8
|
2022-01-07T11:31:11.000Z
|
2022-03-04T00:07:16.000Z
|
Lib/test/test_importlib/import_/test_caching.py
|
dalakatt/cpython
|
2f49b97cc5426087b46515254b9a97a22ee8c807
|
[
"0BSD"
] | 3
|
2017-10-18T09:35:14.000Z
|
2018-09-09T16:40:13.000Z
|
"""Test that sys.modules is used properly by import."""
from test.test_importlib import util
import sys
from types import MethodType
import unittest
import warnings
class UseCache:
"""When it comes to sys.modules, import prefers it over anything else.
Once a name has been resolved, sys.modules is checked to see if it contains
the module desired. If so, then it is returned [use cache]. If it is not
found, then the proper steps are taken to perform the import, but
sys.modules is still used to return the imported module (e.g., not what a
loader returns) [from cache on return]. This also applies to imports of
things contained within a package and thus get assigned as an attribute
[from cache to attribute] or pulled in thanks to a fromlist import
[from cache for fromlist]. But if sys.modules contains None then
ImportError is raised [None in cache].
"""
def test_using_cache(self):
# [use cache]
module_to_use = "some module found!"
with util.uncache('some_module'):
sys.modules['some_module'] = module_to_use
module = self.__import__('some_module')
self.assertEqual(id(module_to_use), id(module))
def test_None_in_cache(self):
#[None in cache]
name = 'using_None'
with util.uncache(name):
sys.modules[name] = None
with self.assertRaises(ImportError) as cm:
self.__import__(name)
self.assertEqual(cm.exception.name, name)
(Frozen_UseCache,
Source_UseCache
) = util.test_both(UseCache, __import__=util.__import__)
class ImportlibUseCache(UseCache, unittest.TestCase):
# Pertinent only to PEP 302; exec_module() doesn't return a module.
__import__ = util.__import__['Source']
def create_mock(self, *names, return_=None):
mock = util.mock_modules(*names)
original_load = mock.load_module
def load_module(self, fullname):
original_load(fullname)
return return_
mock.load_module = MethodType(load_module, mock)
return mock
# __import__ inconsistent between loaders and built-in import when it comes
# to when to use the module in sys.modules and when not to.
def test_using_cache_after_loader(self):
# [from cache on return]
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
with self.create_mock('module') as mock:
with util.import_state(meta_path=[mock]):
module = self.__import__('module')
self.assertEqual(id(module), id(sys.modules['module']))
# See test_using_cache_after_loader() for reasoning.
def test_using_cache_for_assigning_to_attribute(self):
# [from cache to attribute]
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
with self.create_mock('pkg.__init__', 'pkg.module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg.module')
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(id(module.module),
id(sys.modules['pkg.module']))
# See test_using_cache_after_loader() for reasoning.
def test_using_cache_for_fromlist(self):
# [from cache for fromlist]
with warnings.catch_warnings():
warnings.simplefilter("ignore", ImportWarning)
with self.create_mock('pkg.__init__', 'pkg.module') as importer:
with util.import_state(meta_path=[importer]):
module = self.__import__('pkg', fromlist=['module'])
self.assertTrue(hasattr(module, 'module'))
self.assertEqual(id(module.module),
id(sys.modules['pkg.module']))
if __name__ == '__main__':
unittest.main()
| 39.514851
| 79
| 0.643197
|
ecf77043729dac9519b2414027a5573d9226a958
| 24,582
|
py
|
Python
|
python/sandbox/explainer.py
|
geometer/sandbox
|
373ec96e69df76744a19b51f7caa865cbc6b58cd
|
[
"Apache-2.0"
] | 6
|
2020-04-19T11:26:18.000Z
|
2021-06-21T18:42:51.000Z
|
python/sandbox/explainer.py
|
geometer/sandbox
|
373ec96e69df76744a19b51f7caa865cbc6b58cd
|
[
"Apache-2.0"
] | 31
|
2020-04-21T17:24:39.000Z
|
2020-08-27T15:59:12.000Z
|
python/sandbox/explainer.py
|
geometer/sandbox
|
373ec96e69df76744a19b51f7caa865cbc6b58cd
|
[
"Apache-2.0"
] | null | null | null |
import itertools
import time
import sympy as sp
from .core import Constraint
from .predefined import enumerate_predefined_properties
from .property import *
from .propertyset import PropertySet
from .reason import Reason
from .rules.abstract import PredefinedPropertyRule
from .rules.advanced import *
from .rules.basic import *
from .rules.circle import *
from .rules.cycle import *
from .rules.line import *
from .rules.linear import *
from .rules.quadrilateral import *
from .rules.triangle_elements import *
from .rules.triangles import *
from .rules.trigonometric import *
from .scene import Scene
from .stats import Stats
from .util import LazyComment, Comment
class Explainer:
def __init__(self, scene, options={}):
self.scene = scene
self.__options = options
self.context = PropertySet(self.scene.points(max_layer=self.__max_layer))
self.__explanation_time = None
self.__iteration_step_count = -1
self.__rules = [
SegmentWithEndpointsOnAngleSidesRule(self.context),
CollinearityToSameLineRule(self.context),
NonCollinearityToDifferentLinesRule(self.context),
NonCollinearityToDifferentLinesRule2(self.context),
CollinearityToPointOnLineRule(self.context),
NonCollinearityToPointNotOnLineRule(self.context),
MissingLineKeysRule(self.context),
LengthRatioTransitivityRule(self.context),
ProportionalLengthsToLengthsRatioRule(self.context),
LengthRatiosWithCommonDenominatorRule(self.context),
SumOfThreeAnglesOnLineRule(self.context),
SumOfThreeAnglesOnLineRule2(self.context),
AngleInTriangleWithTwoKnownAnglesRule(self.context),
SumOfTwoAnglesInTriangleRule(self.context),
SumOfThreeAnglesInTriangleRule(self.context),
AngleBySumOfThreeRule(self.context),
SumOfTwoAnglesByThreeRule(self.context),
SumAndRatioOfTwoAnglesRule(self.context),
EqualSumsOfAnglesRule(self.context),
AngleFromSumOfTwoAnglesRule(self.context),
#SumOfAngles180DegreeRule(self.context),
AngleTypeAndPerpendicularRule(self.context),
CoincidenceTransitivityRule(self.context),
TwoPointsBelongsToTwoLinesRule(self.context),
TwoPointsBelongsToTwoPerpendicularsRule(self.context),
LengthRatioRule(self.context),
ParallelVectorsRule(self.context),
PerpendicularSegmentsRule(self.context),
Degree90ToPerpendicularSegmentsRule(self.context),
Degree90ToPerpendicularSegmentsRule2(self.context),
PerpendicularTransitivityRule(self.context),
PerpendicularToEquidistantRule(self.context),
EquidistantToPerpendicularRule(self.context),
PointsSeparatedByLineAreNotCoincidentRule(self.context),
PointInsideSegmentRelativeToLineRule(self.context),
SameSidePointInsideSegmentRule(self.context),
TwoPerpendicularsRule(self.context),
TwoPerpendicularsRule2(self.context),
ParallelSameSideRule(self.context),
CommonPerpendicularRule(self.context),
SideProductsInSimilarTrianglesRule(self.context),
CorrespondingAnglesInCongruentTrianglesRule(self.context),
CorrespondingAnglesInSimilarTrianglesRule(self.context),
CorrespondingSidesInCongruentTrianglesRule(self.context),
CorrespondingSidesInSimilarTrianglesRule(self.context),
LengthProductEqualityToRatioRule(self.context),
MiddleOfSegmentRule(self.context),
ConvexQuadrilateralRule(self.context),
SquareRule(self.context),
SquareDegeneracyRule(self.context),
NondegenerateSquareRule(self.context),
EquilateralTriangleRule(self.context),
CentreOfEquilateralTriangleRule(self.context),
BaseAnglesOfIsoscelesWithKnownApexAngleRule(self.context),
BaseAnglesOfIsoscelesRule(self.context),
LegsOfIsoscelesRule(self.context),
RotatedAngleRule(self.context),
AngleTypeByDegreeRule(self.context),
PointsCollinearityByAngleDegreeRule(self.context),
EqualAnglesToCollinearityRule(self.context),
AngleInsideBiggerOneRule(self.context),
RightAngleDegreeRule(self.context),
AngleTypesInObtuseangledTriangleRule(self.context),
PointInsidePartOfAngleRule(self.context),
PartOfAcuteAngleIsAcuteRule(self.context),
TwoPointsInsideSegmentRule(self.context),
TwoPointsOnRayRule(self.context),
SameAngleRule(self.context),
SameAngleRule2(self.context),
SameAngleDegreeRule(self.context),
TransversalRule(self.context),
SupplementaryAnglesRule(self.context),
VerticalAnglesRule(self.context),
ReversedVerticalAnglesRule(self.context),
CorrespondingAndAlternateAnglesRule(self.context),
CyclicOrderRule(self.context),
PlanePositionsToLinePositionsRule(self.context),
CeviansIntersectionRule(self.context),
SameSideToInsideAngleRule(self.context),
TwoAnglesWithCommonSideRule(self.context),
TwoAnglesWithCommonSideDegreeRule(self.context),
TwoPointsRelativeToLineTransitivityRule(self.context),
TwoPointsRelativeToLineTransitivityRule2(self.context),
CongruentAnglesDegeneracyRule(self.context),
PointAndAngleRule(self.context),
PointInsideAngleConfigurationRule(self.context),
PointInsideAngleAndPointOnSideRule(self.context),
PerpendicularToSideOfObtuseAngledRule(self.context),
PointOnSegmentWithEndpointsOnSidesOfAngleRule(self.context),
EquilateralTriangleByThreeSidesRule(self.context),
EquilateralTriangleByConrguentLegsAndAngleRule(self.context),
IsoscelesTriangleByConrguentLegsRule(self.context),
IsoscelesTriangleByConrguentBaseAnglesRule(self.context),
CongruentTrianglesByAngleAndTwoSidesRule(self.context),
CongruentTrianglesByThreeSidesRule(self.context),
SimilarTrianglesByTwoAnglesRule(self.context),
SimilarTrianglesByAngleAndTwoSidesRule(self.context),
SimilarTrianglesByThreeSidesRule(self.context),
SimilarTrianglesWithCongruentSideRule(self.context),
]
if options.get('circles'):
self.__rules += [
#ThreeNonCoincidentPointsOnACicrleAreNonCollinearRule(self.context),
CyclicQuadrilateralRule(self.context),
#CyclicQuadrilateralRule2(self.context),
#PointsOnCircleRule(self.context),
#ConcyclicToSameCircleRule(self.context),
#InscribedAnglesWithCommonCircularArcRule(self.context),
#PointsOnChordRule(self.context),
#TwoChordsIntersectionRule(self.context),
#ThreeCollinearPointsOnCircleRule(self.context),
]
if options.get('advanced'):
self.__rules += [
RightAngledTriangleMedianRule(self.context),
Triangle30_60_90SidesRule(self.context),
Triangle30_30_120SidesRule(self.context),
Triangle36_36_108SidesRule(self.context),
Triangle72_72_36SidesRule(self.context),
]
if options.get('trigonometric'):
self.__rules += [
LawOfSinesRule(self.context),
]
@property
def __max_layer(self):
return self.__options.get('max_layer', 'user')
def __reason(self, prop, comment, premises=None):
reason = Reason(self.__iteration_step_count, comment, premises)
def insert(pro):
for pre in pro.reason.premises:
if self.context.index_of(pre) is None:
insert(pre)
self.context.add(pro)
existing = self.context[prop]
if existing is None:
prop.reason = reason
prop.reason.obsolete = False
insert(prop)
elif reason.cost < existing.reason.cost:
#### +++ HACK +++
# TODO: move this hack outside of explainer
if isinstance(prop, AngleRatioProperty) and prop.same:
existing.same = True
#### --- HACK ---
reason.obsolete = existing.reason.obsolete
existing.reason = reason
if hasattr(prop, 'rule'):
existing.rule = prop.rule
elif hasattr(existing, 'rule'):
delattr(existing, 'rule')
#TODO: if the rule reference changed from 'synthetic',
# add the property to a transitivity set
if self.context.index_of(existing) is None:
insert(existing)
def explain(self):
start = time.time()
frozen = self.scene.is_frozen
if not frozen:
self.scene.freeze()
self.__explain_all()
if not frozen:
self.scene.unfreeze()
self.__explanation_time = time.time() - start
def __explain_all(self):
def iteration():
for rule in self.__rules:
for prop, comment, premises in rule.generate():
prop.rule = rule
yield (prop, comment, premises)
angle_values = [prop for prop in self.context.angle_value_properties() \
if prop.angle.vertex is not None]
for av in [av for av in angle_values if av.degree == 0]:
av_is_too_old = av.reason.obsolete
vertex = av.angle.vertex
pt0 = av.angle.vectors[0].end
pt1 = av.angle.vectors[1].end
for vec in av.angle.vectors:
for pt2 in self.context.not_collinear_points(vec.as_segment):
nc = self.context.collinearity_property(pt2, *vec.points)
if av_is_too_old and nc.reason.obsolete:
continue
segment = vertex.segment(pt2)
yield (
SameOrOppositeSideProperty(segment, pt0, pt1, True),
LazyComment('%s, %s', av, nc), #TODO: better comment
[av, nc]
)
for aa in [p for p in self.context.list(AngleKindProperty) if p.kind == AngleKindProperty.Kind.acute]:
base = aa.angle
if base.vertex is None:
continue
for vec0, vec1 in [base.vectors, reversed(base.vectors)]:
for pt in self.context.collinear_points(vec0.as_segment):
col = self.context.collinearity_property(pt, *vec0.points)
reasons_are_too_old = aa.reason.obsolete and col.reason.obsolete
for angle in [pt.angle(vec1.end, p) for p in vec0.points]:
ka = self.context.angle_value_property(angle)
if ka is None or reasons_are_too_old and ka.reason.obsolete:
continue
if ka.degree >= 90:
comment = Comment(
'$%{point:pt0}$, $%{point:pt1}$, $%{point:pt2}$ are collinear, $%{angle:base}$ is acute, and $%{anglemeasure:angle} = %{degree:degree}$',
{
'pt0': pt,
'pt1': vec0.points[0],
'pt2': vec0.points[1],
'base': base,
'angle': angle,
'degree': ka.degree
}
)
zero = base.vertex.angle(vec0.end, pt)
yield (AngleValueProperty(zero, 0), comment, [col, aa, ka])
break
for aa in self.context.angle_value_properties_for_degree(90):
base = aa.angle
if base.vertex is None:
continue
for vec0, vec1 in [base.vectors, reversed(base.vectors)]:
for perp in self.context.list(PerpendicularSegmentsProperty, [vec0.as_segment]):
other = perp.segments[0] if vec0.as_segment == perp.segments[1] else perp.segments[1]
if vec1.end not in other.points:
continue
foot = next(pt for pt in other.points if pt != vec1.end)
if foot in vec0.points:
continue
col = self.context.collinearity_property(foot, *vec0.points)
if col is None or not col.collinear:
continue
if aa.reason.obsolete and perp.reason.obsolete and col.reason.obsolete:
continue
yield (
PointsCoincidenceProperty(base.vertex, foot, True),
Comment(
'$%{point:foot}$ is the foot of the perpendicular from $%{point:pt}$ to $%{line:line}$, and $%{angle:angle}$ is right',
{'foot': foot, 'pt': vec1.end, 'line': vec0, 'angle': base}
),
[perp, col, aa]
)
# for oa in [p for p in self.context.list(AngleKindProperty) if p.kind == AngleKindProperty.Kind.obtuse]:
# base = oa.angle
# if base.vertex is None:
# continue
# for vec0, vec1 in [base.vectors, reversed(base.vectors)]:
# for pt in self.context.collinear_points(vec0.as_segment):
# col = self.context.collinearity_property(pt, *vec0.points)
# reasons_are_too_old = oa.reason.obsolete and col.reason.obsolete
# for angle in [pt.angle(vec1.end, p) for p in vec0.points]:
# ka = self.context.angle_value_property(angle)
# if ka is None or reasons_are_too_old and ka.reason.obsolete:
# continue
# if ka.degree <= 90:
# comment = LazyComment(
# '%s, %s, %s are collinear, %s is obtuse, and %s = %s',
# pt, *vec0.points, base, angle, ka.degree_str
# )
# zero = base.vertex.angle(vec0.end, pt)
# yield (AngleValueProperty(zero, 180), comment, [col, oa, ka])
# break
for ka in self.context.nondegenerate_angle_value_properties():
base = ka.angle
if ka.degree == 180 or base.vertex is None:
continue
ka_is_too_old = ka.reason.obsolete
for vec0, vec1 in [base.vectors, reversed(base.vectors)]:
for pt in self.context.collinear_points(vec0.as_segment):
col = self.context.collinearity_property(pt, *vec0.points)
reasons_are_too_old = ka_is_too_old and col.reason.obsolete
for angle in [pt.angle(vec1.end, p) for p in vec0.points]:
ka2 = self.context.angle_value_property(angle)
if ka2 is None or reasons_are_too_old and ka2.reason.obsolete:
continue
if ka2.degree > ka.degree:
comment = Comment(
'$%{point:pt0}$, $%{point:pt1}$, $%{point:pt2}$ are collinear and $%{anglemeasure:angle0}$ > $%{anglemeasure:angle1}$',
{'pt0': pt, 'pt1': vec0.points[0], 'pt2': vec0.points[1], 'angle0': angle, 'angle1': base}
)
zero = base.vertex.angle(vec0.end, pt)
yield (AngleValueProperty(zero, 0), comment, [col, ka2, ka])
break
for aa0, aa1 in itertools.combinations([a for a in self.context.list(AngleKindProperty) if a.angle.vertex and a.kind == AngleKindProperty.Kind.acute], 2):
vertex = aa0.angle.vertex
if vertex != aa1.angle.vertex:
continue
vectors0 = aa0.angle.vectors
vectors1 = aa1.angle.vectors
common = next((v for v in vectors0 if v in vectors1), None)
if common is None:
continue
other0 = next(v for v in vectors0 if v != common)
other1 = next(v for v in vectors1 if v != common)
col = self.context.collinearity_property(*other0.points, other1.end)
if col is None or not col.collinear or aa0.reason.obsolete and aa1.reason.obsolete and col.reason.obsolete:
continue
yield (
AngleValueProperty(other0.angle(other1), 0),
Comment(
'both $%{angle:angle0}$ and $%{angle:angle1}$ are acute',
{'angle0': aa0.angle, 'angle1': aa1.angle}
),
[aa0, aa1, col]
)
# for zero in [p for p in self.context.list(AngleValueProperty) if p.angle.vertex is None and p.degree == 0]:
# zero_is_too_old = zero.reason.obsolete
# ang = zero.angle
#
# for vec0, vec1 in [ang.vectors, reversed(ang.vectors)]:
# for i, j in [(0, 1), (1, 0)]:
# ncl = self.context.collinearity(*vec0.points, vec1.points[i])
# if ncl is None or ncl.collinear:
# continue
# ne = self.context.not_equal_property(*vec1.points)
# if ne is None:
# continue
# if zero_is_too_old and ncl.reason.obsolete and ne.reason.obsolete:
# continue
# yield (
# PointsCollinearityProperty(*vec0.points, vec1.points[j], False),
# 'Transitivity',
# [ncl, zero, ne]
# )
# yield (
# PointsCollinearityProperty(*vec1.points, vec0.points[i], False),
# 'Transitivity',
# [ncl, zero, ne]
# )
# yield (
# PointsCollinearityProperty(*vec1.points, vec0.points[j], False),
# 'Transitivity',
# [ncl, zero, ne]
# )
for zero in [p for p in self.context.list(AngleValueProperty) if p.angle.vertex is None and p.degree == 0]:
ang = zero.angle
ncl = self.context.collinearity_property(*ang.vectors[0].points, ang.vectors[1].points[0])
if ncl is None or ncl.collinear:
continue
ne = self.context.not_equal_property(*ang.vectors[1].points)
if ne is None:
continue
if zero.reason.obsolete and ncl.reason.obsolete and ne.reason.obsolete:
continue
comment = Comment(
'$%{vector:vec0} \\uparrow\\!\\!\\!\\uparrow %{vector:vec1}$',
{'vec0': ang.vectors[0], 'vec1': ang.vectors[1]}
)
premises = [zero, ncl, ne]
yield (
SameOrOppositeSideProperty(ang.vectors[0].as_segment, *ang.vectors[1].points, True),
comment, premises
)
yield (
SameOrOppositeSideProperty(ang.vectors[1].as_segment, *ang.vectors[0].points, True),
comment, premises
)
yield (
SameOrOppositeSideProperty(
ang.vectors[0].start.segment(ang.vectors[1].end),
ang.vectors[0].end, ang.vectors[1].start, False
),
comment, premises
)
yield (
SameOrOppositeSideProperty(
ang.vectors[1].start.segment(ang.vectors[0].end),
ang.vectors[1].end, ang.vectors[0].start, False
),
comment, premises
)
for prop, comment in enumerate_predefined_properties(self.scene, max_layer=self.__max_layer):
prop.rule = PredefinedPropertyRule.instance()
self.__reason(prop, comment, [])
self.__iteration_step_count = 0
while itertools.count():
explained_size = len(self.context)
for prop, comment, premises in iteration():
self.__reason(prop, comment, premises)
for prop in self.context.all:
prop.reason.obsolete = prop.reason.generation < self.__iteration_step_count - 1
self.__iteration_step_count += 1
if len(self.context) == explained_size:
break
def dump(self, properties_to_explain=[]):
def to_string(reason):
if reason.premises:
return '%s (%s)' % (
reason.comment,
', '.join(['*%s' % self.context.index_of(prop) for prop in reason.premises])
)
else:
return reason.comment
if len(self.context) > 0:
print('Explained:')
explained = self.context.all
explained.sort(key=lambda p: self.context.index_of(p))
for prop in explained:
print('\t%2d (%d): %s [%s]' % (self.context.index_of(prop), prop.reason.generation, prop, to_string(prop.reason)))
if properties_to_explain:
unexplained = [prop for prop in properties_to_explain if prop not in self.context]
if len(unexplained) > 0:
print('\nNot explained:')
for prop in unexplained:
print('\t%s' % prop)
def stats(self, properties_to_explain=[]):
def type_presentation(kind):
return kind.__doc__.strip() if kind.__doc__ else kind.__name__
unexplained = [prop for prop in properties_to_explain if prop not in self.context]
unexplained_by_kind = {}
for prop in unexplained:
kind = type(prop)
unexplained_by_kind[kind] = unexplained_by_kind.get(kind, 0) + 1
unexplained_by_kind = [(type_presentation(k), v) for k, v in unexplained_by_kind.items()]
unexplained_by_kind.sort(key=lambda pair: -pair[1])
return Stats([
('Explained properties', len(self.context)),
self.context.stats(),
('Explained property keys', self.context.keys_num()),
('Unexplained properties', len(unexplained)),
Stats(unexplained_by_kind),
('Iterations', self.__iteration_step_count),
('Explanation time', '%.3f sec' % self.__explanation_time),
], 'Explainer stats')
def explained(self, obj):
if isinstance(obj, Property):
return obj in self.context
if isinstance(obj, Scene.Angle):
rsn = self.context.angle_value_property(obj)
return rsn.degree if rsn else None
raise Exception('Explanation not supported for objects of type %s' % type(obj).__name__)
def explanation(self, obj):
if isinstance(obj, Property):
return self.context[obj]
if isinstance(obj, Scene.Angle):
return self.context.angle_value_property(obj)
return None
| 49.761134
| 173
| 0.551176
|
de349e2e0e45a054e97ed49798844286fda2f70b
| 3,225
|
py
|
Python
|
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_05_01_preview/models/token_py3.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | null | null | null |
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_05_01_preview/models/token_py3.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | null | null | null |
sdk/containerregistry/azure-mgmt-containerregistry/azure/mgmt/containerregistry/v2019_05_01_preview/models/token_py3.py
|
pjquirk/azure-sdk-for-python
|
cbf02ec4f177b96eae1dbbba87c34c2c93880150
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .proxy_resource_py3 import ProxyResource
class Token(ProxyResource):
"""An object that represents a token for a container registry.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar id: The resource ID.
:vartype id: str
:ivar name: The name of the resource.
:vartype name: str
:ivar type: The type of the resource.
:vartype type: str
:ivar creation_date: The creation date of scope map.
:vartype creation_date: datetime
:ivar provisioning_state: Provisioning state of the resource. Possible
values include: 'Creating', 'Updating', 'Deleting', 'Succeeded', 'Failed',
'Canceled'
:vartype provisioning_state: str or
~azure.mgmt.containerregistry.v2019_05_01_preview.models.ProvisioningState
:param scope_map_id: The resource ID of the scope map to which the token
will be associated with.
:type scope_map_id: str
:param object_id: The user/group/application object ID for which the token
has to be created.
:type object_id: str
:param credentials: The credentials that can be used for authenticating
the token.
:type credentials:
~azure.mgmt.containerregistry.v2019_05_01_preview.models.TokenCredentialsProperties
:param status: The status of the token example enabled or disabled.
Possible values include: 'enabled', 'disabled'
:type status: str or
~azure.mgmt.containerregistry.v2019_05_01_preview.models.Status
"""
_validation = {
'id': {'readonly': True},
'name': {'readonly': True},
'type': {'readonly': True},
'creation_date': {'readonly': True},
'provisioning_state': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'type': {'key': 'type', 'type': 'str'},
'creation_date': {'key': 'properties.creationDate', 'type': 'iso-8601'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'scope_map_id': {'key': 'properties.scopeMapId', 'type': 'str'},
'object_id': {'key': 'properties.objectId', 'type': 'str'},
'credentials': {'key': 'properties.credentials', 'type': 'TokenCredentialsProperties'},
'status': {'key': 'properties.status', 'type': 'str'},
}
def __init__(self, *, scope_map_id: str=None, object_id: str=None, credentials=None, status=None, **kwargs) -> None:
super(Token, self).__init__(**kwargs)
self.creation_date = None
self.provisioning_state = None
self.scope_map_id = scope_map_id
self.object_id = object_id
self.credentials = credentials
self.status = status
| 41.346154
| 120
| 0.635969
|
ab8a997943eb95aaab636ad73f1db579d5a228fc
| 138,073
|
py
|
Python
|
joby_m_anthony_iii/numerical_methods.py
|
jmanthony3/joby_m_anthony_iii
|
87ec9cbb29040ddff40541c7d86f58221751dcfe
|
[
"MIT"
] | null | null | null |
joby_m_anthony_iii/numerical_methods.py
|
jmanthony3/joby_m_anthony_iii
|
87ec9cbb29040ddff40541c7d86f58221751dcfe
|
[
"MIT"
] | null | null | null |
joby_m_anthony_iii/numerical_methods.py
|
jmanthony3/joby_m_anthony_iii
|
87ec9cbb29040ddff40541c7d86f58221751dcfe
|
[
"MIT"
] | null | null | null |
#################################
## Preamble
# import necessary modules/tools
import math
import numba as nb
import numpy as np
import os
import pandas as pd
import scipy as sc
import sympy as sp
import sys
from types import FunctionType
# # # # # # # # #
#################################
## Universal Variables/Methods/Classes
# common functions
def diagonality(matrix):
"""Determines if matrix is strictly, diagonally dominant.
Parameters
----------
matrix : array
Input matrix to be tested.
Returns
-------
is_strict_diagonal_matrix : boolean
Truth value whether matrix is strictly, diagonally dominant.
Raises
------
IndexError
Matrix of interest must be square.
Warnings
--------
Will print to console either if strictly, diagonally dominant, or if matrix, `A` is not strictly, diagonally dominant which could lead to poor solution of 'Ax = b'.
"""
matrix_name, A = "A", np.array(matrix)
if not(np.sum(np.shape(A)) - np.shape(A)[0] == np.shape(A)[0]):
raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
i, diags, long = 0, np.zeros_like(A), np.zeros_like(A)
while i < len(A):
j = 0
while j < len(A):
aij = A[i][j]
if i == j: long[i][j] = aij
else: diags[i][j] = aij
j += 1
i += 1
if np.sum(long) >= np.sum(diags):
print(f"Information: Matrix, {matrix_name} is strictly, diagonally dominant.")
is_strict_diagonal_matrix = True
else:
is_strict_diagonal_matrix = False
print(f"Warning! Matrix, {matrix_name} is not strictly, diagonally dominant. Solution may be inaccurate.")
return is_strict_diagonal_matrix
def eigen_values(matrix):
"""Directly finds eigenvalues of matrix by its determinant. Not recommended for large, sparse matrices.
Parameters
----------
matrix : array
Matrix of interest.
Returns
-------
lambdas : array
Eigenvector containing roots.
Raises
------
IndexError
Matrix of interest must be square.
"""
# See Also
# --------
matrix_name, A = "A", np.array(matrix)
if not(np.sum(np.shape(A)) - np.shape(A)[0] == np.shape(A)[0]):
raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
sym_r = sp.Symbol("r")
i, identityA = 0, np.zeros_like(A)
while i < len(A):
j = 0
while j < len(A[0]):
if i == j: identityA[i][j] = 1
j += 1
i += 1
lambda_identity = identityA*sym_r
determinant = sp.det(sp.Matrix(A - lambda_identity))
roots = sp.solve(determinant)
lambdas = []
for r in roots:
r = complex(r)
if np.imag(r) == 0: r = np.real(r)
lambdas.append(r)
return lambdas
# preceded by eigen_values
def spectral_radius(matrix):
"""Finds the spectral radius of matrix.
Parameters
----------
matrix : array
Matrix of interest.
Returns
-------
rho : float
Spectral radius.
Raises
------
IndexError
Matrix of interest must be square.
See Also
--------
eigen_values() : Function to find eigenvector of A.
"""
matrix_name, A = "A", np.array(matrix)
if not(np.sum(np.shape(A)) - np.shape(A)[0] == np.shape(A)[0]):
raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
rho = np.max(np.abs(eigen_values(A)))
return rho
# preceded by spectral_radius
class norms:
def __init__(self, x, x0=()):
"""
Parameters
----------
x : array
Newly approximated array.
x0 : array, optional
Previously approximated array.
Yields
------
self.vec_name : string
Connote symbol name as 'x'.
self.x : array
Newly approximated array.
self.old_vec_name : string
Connote symbol name as 'x0'.
self.x0 : array
Previously approximated array.
Raises
------
IndexError
If the input vectors are not the same length.
"""
self.vec_name, self.x = "x", np.array(x)
self.old_vec_name, self.x0 = "x0", np.array(x0)
if not(self.x0.shape[0] == 0 or len(x) == len(x0)):
raise IndexError(f"ERROR! {self.vec_name}, and {self.old_vec_name} must be the same size!")
# @nb.jit(nopython=True)
def l_infinity(self):
"""Maximum difference between absolute sum of i'th rows.
Returns
-------
norm : float
Scalar value.
Yields
------
self.norm : float
Scalar value.
Raises
------
IndexError
If the input vectors are not the same length.
Notes
-----
Best thought as "actual" distance between vectors.
Also calculates infinity norm of matrix(ces).
Examples
--------
[x0] = (1, 1, 1)^(t)
[x] = (1.2001, 0.99991, 0.92538)^(t)
||x0 - x|| = max{|1 - 1.2001|, |1 - 0.99991|, |1 - 0.92538|}
||x0 - x|| = 0.2001
"""
vec_name, x = self.vec_name, self.x
old_vec_name, x0 = self.old_vec_name, self.x0
# initialize loop
norm_i = np.zeros_like(x)
if x0.shape[0] == 0:
if np.sum(x.shape) == x.shape[0]:
for i in range(x.shape[0]):
# evaluate and store norm, ||.||
norm_i[i] = abs(x[i])
elif np.sum(x.shape) > x.shape[0]:
norm_ij = np.zeros_like(x)
for i in range(x.shape[0]):
for j in range(x.shape[1]):
# evaluate and store norm, ||.||
norm_ij[i][j] = abs(x[i][j])
norm_i[i] = np.sum(norm_ij[i][:])
elif len(x) == len(x0):
if np.sum(x0.shape) == x0.shape[0]:
for i in range(x0.shape[0]):
norm_i[i] = abs(x[i] - x0[i])
elif np.sum(x0.shape) > x0.shape[0]:
if np.sum(x.shape) == np.sum(x0.shape):
for i in range(x0.shape[0]):
for j in range(x0.shape[1]):
norm_ij = np.zeros_like(x)
# evaluate and store norm, ||.||
norm_ij[i][j] = abs(x[i][j] - x0[i][j])
norm_i[i] = np.sum(norm_ij[i][:])
elif np.sum(x.shape) == np.sum(x0.shape):
for i in range(x0.shape[0]):
# evaluate and store norm, ||.||
norm_i[i] = abs(x[i] - x0[i])
else:
raise IndexError(f"ERROR! {vec_name}, and {old_vec_name} must be the same size!")
# if no errors, then evaluate norm
self.norm = np.amax(norm_i)
# return the l_infinity norm
return self.norm
# @nb.jit(nopython=True)
def l_two(self):
"""Square root of sum of differences squared along i'th row.
Returns
-------
norm : float
Scalar value.
Yields
------
self.norm : float
Scalar value.
Raises
------
IndexError
If the input vectors are not the same length.
See Also
--------
spectral_radius() : Function to find the spectral radius of vector.
Examples
--------
[x0] = (1, 1, 1)^(t)
[x] = (1.2001, 0.99991, 0.92538)^(t)
||x0 - x|| = sqrt[ (1 - 1.2001)^2 \
+ (1 - 0.99991)^2 + (1 - 0.92538)^2 ]
||x0 - x|| = 0.21356
"""
vec_name, x = self.vec_name, self.x
old_vec_name, x0 = self.old_vec_name, self.x0
if x0.shape[0] == 0:
# initialize loop
norm_i = np.zeros_like(x)
if np.sum(x.shape) == x.shape[0]:
for i in range(len(x)):
# evaluate and store norm, ||.||
norm_i[i] += x[i]**2
norm = math.sqrt(np.sum(norm_i))
elif np.sum(x.shape) > x.shape[0]:
x0 = np.reshape(x, (x.shape[0], x.shape[1]))
xt = np.reshape(x, (x.shape[1], x.shape[0]))
norm = math.sqrt(spectral_radius(x0*xt))
elif len(x) == len(x0):
if np.sum(x0.shape) > x0.shape[0]:
x0 = np.reshape(x0, (x0.shape[0], x0.shape[1]))
xt = np.reshape(x, (x0.shape[1], x0.shape[0]))
else:
x0 = np.reshape(x0, (len(x0), 1))
xt = np.reshape(x, (1, len(x0)))
# xt = np.reshape(x, (1, x.shape[0]))
norm = math.sqrt(spectral_radius(x0*xt))
else:
raise IndexError(f"ERROR! {vec_name}, and {old_vec_name} must be the same size!")
self.norm = norm
return norm
# preceded by norms.()l_infinity() and norms().l_two()
def condition_number(matrix, norm_type="l_two"):
"""Find the condition number of a given matrix and norm type.
Parameters
----------
matrix : array
Input matrix for analysis.
norm_type : string, optional
Selects norm comparison which is 'l_two' by default.
Returns
-------
k : float
Condition number of matrix, A.
Warnings
--------
Will output evaluation of condition number and show in console.
See Also
--------
norms().l_two() : Method that yields the l_2 norm.
norms().l_infinity() : Method that yields the l_infinity norm.
"""
matrix_name, A = "A", np.array(matrix)
i, A_inv = 0, np.zeros_like(A)
while i < len(A):
j = 0
while j < len(A):
aij = A[i][j]
if aij != 0: A_inv[i][j] = 1/aij
j += 1
i += 1
if norm_type == "l_infinity":
norm, abnorm = norms(A).l_infinity(), norms(A_inv).l_infinity()
elif norm_type == "l_two":
norm, abnorm = norms(A).l_two(), norms(A_inv).l_two()
k = norm*abnorm
print(f"Information: Condition Number K({matrix_name}) = {k}")
return k
def make_array(domain, function, variable=sp.Symbol("x")):
"""Maps domain to range.
Parameters
----------
domain : array
Collection if input data.
function : expression
Function that maps the domain to range.
variable : string, optional
Sympy symbol or string representation of variable to respect in function.
Returns
-------
g : tuple
Mapped range from function.
Warnings
--------
Prints to console the input expression, and that the expression was in fact used.
"""
if isinstance(function, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(function(variable)))
function = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
i, X, g = 0, np.array(domain), np.zeros_like(domain)
while i < len(X):
j = 0
if np.sum(X.shape) > np.sum(X.shape[0]):
while j < len(X[0]):
g[i][j] = (function(X[i][j]))
j += 1
else: g[i] = function(X[i])
i += 1
return np.array(g)
def symmetry(matrix):
"""Determines boolean truth value whether given matrix is symmetric.
Parameters
----------
matrix : array
Matrix of interest.
Returns
-------
is_symmetric : bool
True if symmetric, else False.
Raises
------
IndexError
Matrix of interest must be square.
Warnings
--------
Console print that A is either symmetric or asymmetric.
"""
matrix_name, A = "A", np.array(matrix)
if not(np.sum(np.shape(A)) - np.shape(A)[0] == np.shape(A)[0]):
raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
i, At, is_symmetric = 0, np.transpose(A), False
for ai in A:
j = 0
for aj in ai:
if aj == At[i][j]: is_symmetric = True
else:
is_symmetric = False
print(f"Warning! Matrix, {matrix_name} is not symmetric.")
return is_symmetric
j += 1
i += 1
if is_symmetric: print(f"Information: Matrix, {matrix_name} is symmetric.")
return is_symmetric
def tridiagonality(matrix):
"""Determine boolean truth value whether given matrix is tridiagonal.
Parameters
----------
matrix : array
Matrix of interest.
Returns
-------
is_tridiagonal : bool
True if tridiagonal, else False.
Raises
------
IndexError
Matrix of interest must be square.
Warnings
--------
Prints to console that matrix is either tridiagonal or not.
"""
matrix_name, A = "A", np.array(matrix)
if not(np.sum(np.shape(A)) - np.shape(A)[0] == np.shape(A)[0]):
raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
diagonals = np.diagflat(np.diag(A))
above = np.diagflat(np.diag(A, k=1), k=1)
below = np.diagflat(np.diag(A, k=-1), k=-1)
non_A = A - (diagonals + above + below)
if np.sum(non_A) != 0:
is_tridiagonal = False
print(f"Warning! Matrix, {matrix_name} is not tridiagonal.")
else:
is_tridiagonal = True
print(f"Information: Matrix, {matrix_name} is tridiagonal.")
return is_tridiagonal
# # # # # # # # #
#################################
## Specific Functions
# --------------------
# eigenvalue solvers
class DirectSolver:
def __init__(self, A, power, max_iter=100):
"""
Parameters
----------
A : tuple
Characteristic matrix.
power : int
Signed power to which function error must be within.
max_iter : int, optional
Maximum iterations for which function may loop.
Yields
------
self.A : tuple
Either input functions or matrix of characteristic values.
self.tol : float
Specified tolerance to which method terminates.
self.max_iter : int
Maximum iterations allowed for method.
self.is_diagonal : bool
Truth value of whether matrix is diagonal.
self.eigenvalues : tuple
Eigenvalues of characteristic matrix, A.
self.spectral_radius : float
Spectral radius of characteristic matrix, A.
self.condition_number : float
Condition number of characteristic matrix, A.
Raises
------
IndexError
Matrix of interest must be square.
ValueError
If iterations constraint is not an integer.
Warnings
--------
Not recommended to use eigen_values() to find eigenvalues of characteristic matrix, A; therefore, do not use eigen_values() if matrix, A is a large, sparse matrix if desiring quick calculations.
See Also
--------
eigen_values() : Function to find eigenvalues of A.
spectral_radius() : Function that finds the spectral radius of characteristic matrix, A.
Notes
-----
Specified tolerance evaluated by `10**power`.
`norm_type` may be either `'l_infinity'` or `'l_two'` but is 'l_infinity' by default.
If `self.is_diagonal` is True, then matrix is diagonal. Else, not diagonal.
"""
matrix_name, A = "A", np.array(A)
if np.sum(A.shape[0]) != np.sum(A.shape[1]): raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
if max_iter <= 0 or not isinstance(max_iter, (int, float)): raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {max_iter} was given and not understood.")
self.A = A
self.tol = float(10**power)
self.max_iter = int(max_iter)
self.is_diagonal = diagonality(A)
self.is_tridiagonal = tridiagonality(A)
# self.eigen_values = eigen_values(A)
# self.spectral_radius = spectral_radius(A)
# self.condition_number = condition_number(A, norm_type)
def power_method(self, x):
"""Approximate the dominant eigenvalue and associated eigenvector of matrix, A given some non-zero vector, x.
Parameters
----------
x : array
Numpy array.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.x : tuple
Initial guess at eigenvector.
self.iterations : tuple
Collection of iterations through method.
self.mu : tuple
Collection of approximately largest eigenvalue.
self.lambdas : tuple
Collection of approximate eigenvectors.
self.errors : tuple
Collection of yielded norms.
Raises
------
IndexError
If x is neither n x 1 nor 1 x n array.
"""
A, tol, N = self.A, self.tol, self.max_iter
vec_name, x = "x", np.array(x)
if np.sum(x.shape) - np.sum(x.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.x = np.reshape(x,(len(x),1))
mu = [norms(x).l_infinity()]
x = x/mu[-1]
k, eigenvectors, errors = 1, [x], [1]
while errors[-1] > tol and k <= N:
y = np.matmul(A, x)
for yi in y:
if np.abs(yi) == norms(y).l_infinity():
yp = float(yi)
mu.append(yp)
eigenvectors.append(y/yp)
errors.append(norms(x, eigenvectors[-1]).l_infinity())
x = eigenvectors[-1]
k += 1
self.iterations = np.array(range(k))
self.mu = np.array(mu)
self.lambdas = np.array(eigenvectors)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Mu": self.mu, "Lambdas": self.lambdas, "Errors": self.errors})
def inverse_power_method(self, x, q):
"""Approximate eigenvalue closest to target, q and associated eigenvector of matrix, A given some non-zero vector, x.
Parameters
----------
x : array
Numpy array.
q : float
Target to which the closest eigenvalue of matrix will be found.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.x : tuple
Initial guess at eigenvector.
self.iterations : tuple
Collection of iterations through method.
self.mu : tuple
Collection of approximately largest eigenvalue.
self.lambdas : tuple
Collection of approximate eigenvectors.
self.errors : tuple
Collection of yielded norms.
Raises
------
IndexError
If x is neither n x 1 nor 1 x n array.
"""
A, tol, N = self.A, self.tol, self.max_iter
vec_name, x = "x", np.array(x)
if np.sum(x.shape) - np.sum(x.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.x = np.reshape(x,(len(x),1))
self.q = float(q)
A = np.linalg.inv(A-q*np.identity(len(A)))
mu = [1/norms(x).l_infinity() + q]
k, eigenvectors, errors = 1, [x], [1]
while errors[-1] > tol and k <= N:
y = np.matmul(A, x)
for yi in y:
if np.abs(yi) == norms(y).l_infinity():
yp = float(yi)
mu.append(1/yp + q)
eigenvectors.append(y/yp)
errors.append(norms(x, x0=eigenvectors[-1]).l_infinity())
x = eigenvectors[-1]
k += 1
self.iterations = np.array(range(k))
self.mu = np.array(mu)
self.lambdas = np.array(eigenvectors)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Mu": self.mu, "Lambdas": self.lambdas, "Errors": self.errors})
def qr_algorithm(self):
"""Approximate dominant eigenvalue and associated eigenvector of matrix, A.
Source: https://www.youtube.com/watch?v=FAnNBw7d0vg
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.lambdas : tuple
Collection of approximate eigenvectors.
self.errors : tuple
Collection of yielded norms.
"""
A, tol, N = self.A, self.tol, self.max_iter
k, eigenvectors, errors = 1, [np.diag(A)], [1]
while errors[-1] > tol and k <= N:
Q = np.zeros_like(A, dtype=float)
R = np.zeros_like(A, dtype=float)
QI = []
for j in range(len(A[0])):
ai = np.array(np.zeros(len(A)))
for i in range(len(A)):
ai[i] = A[i][j]
ai_perp = 0
for i in range(j):
R[i][j] = np.dot(ai, QI[i])
ai_perp += R[i][j]*QI[i]
ai -= ai_perp
R[j][j] = np.sqrt(np.sum(ai**2))
qi = ai/R[j][j]
QI.append(qi)
i = 0
for q in qi:
Q[i][j] = q
i += 1
A = np.matmul(R, Q)
eigenvectors.append(np.diag(A))
err = np.average([norms(np.diag(A, k=-1)).l_infinity(), norms(np.diag(A, k=1)).l_infinity()])
errors.append(err)
k += 1
self.iterations = np.array(range(k))
self.lambdas = np.array(eigenvectors)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Lambdas": self.lambdas, "Errors": self.errors})
def steepest_descent(self, x, b):
"""Approximate solution vector, x given matrix, A initial guess vector, x, and vector, b.
Parameters
----------
x : array
Numpy array.
b : array
Input numpy array.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.x : tuple
Initial guess at eigenvector.
self.b : tuple
Input numpy array.
self.iterations : tuple
Collection of iterations through method.
self.lambdas : tuple
Collection of approximate eigenvectors.
self.errors : tuple
Collection of yielded norms.
Raises
------
IndexError
If x is neither n x 1 nor 1 x n array.
IndexError
If b is neither n x 1 nor 1 x n array.
"""
A, tol, N = self.A, self.tol, self.max_iter
vec_name, x = "x", np.array(x)
if np.sum(x.shape) - np.sum(x.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.x = np.reshape(x,(len(x),1))
vec_name, b = "b", np.array(b)
if np.sum(b.shape) - np.sum(b.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.b = np.reshape(b,(len(b),1))
k, eigenvectors, errors = 1, [x], [1]
while errors[-1] > tol and k <= N:
r = b - np.matmul(A, x)
alpha = float(np.matmul(r.T, r)[0]/np.matmul(np.matmul(r.T, A), r)[0])
x1 = x + alpha*r
eigenvectors.append(x1)
errors.append(norms(x1, x).l_infinity())
x = x1
k += 1
self.iterations = np.array(range(k))
self.lambdas = np.array(eigenvectors)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Lambdas": self.lambdas, "Errors": self.errors})
def conjugate_gradient(self, x, b, C=None):
"""Approximate solution vector given matrix, A, initial guess vector, x, and vector, b.
Parameters
----------
x : array
Numpy array.
b : vector
Input numpy array.
C : None or matrix, optional
Preconditioning matrix.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.x : tuple
Initial guess at eigenvector.
self.b : tuple
Input numpy array.
self.iterations : tuple
Collection of iterations through method.
self.lambdas : tuple
Collection of approximate eigenvectors.
self.errors : tuple
Collection of yielded norms.
Raises
------
IndexError
If x is neither n x 1 nor 1 x n array.
IndexError
If b is neither n x 1 nor 1 x n array.
"""
A, tol, N = self.A, self.tol, self.max_iter
vec_name, x = "x", np.array(x)
if np.sum(x.shape) - np.sum(x.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.x = np.reshape(x,(len(x),1))
vec_name, b = "b", np.array(b)
if np.sum(b.shape) - np.sum(b.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
self.b = np.reshape(b,(len(b),1))
self.C = C
r0 = b - np.matmul(A, x)
if type(C) == type(None):
do_precondition = True
v0 = r0
else:
do_precondition = False
Minv = np.linalg.inv(C*np.transpose(C))
v0 = np.matmul(Minv, r0)
k, eigenvectors, errors = 1, [x], [1]
while errors[-1] > tol and k <= N:
if do_precondition:
alpha = float(np.matmul(r0.T, r0)[0]/np.matmul(np.matmul(v0.T, A)[0], v0)[0])
else:
alpha = float(np.matmul(np.matmul(r0.T, Minv), r0)[0]/np.matmul(np.matmul(v0.T, A), v0)[0])
x1 = x + alpha*v0
eigenvectors.append(x1)
errors.append(norms(x1, x).l_infinity())
r1 = r0 - alpha*np.matmul(A, v0)
if do_precondition:
s1 = float(np.matmul(r1.T, r1)[0]/np.matmul(r0.T, r0)[0])
else: s1 = float(np.matmul(np.matmul(r1.T, Minv)[0], r1)[0]/np.matmul(np.matmul(r0.T, Minv)[0], r0)[0])
x, r0 = x1, r1
if do_precondition: v0 = r1 + s1*v0
else: v0 = np.matmul(Minv, r1) + s1*v0
k += 1
self.iterations = np.array(range(k))
self.eigenvectors = np.array(eigenvectors)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Lambdas": self.eigenvectors, "Errors": self.errors})
# --------------------
# --------------------
# iterative techniques
class SingleVariableIteration:
def __init__(self, function, a, b, power=-6, variable=sp.Symbol("x"), iter_guess=True, k=0):
"""
Parameters
----------
function : expression
Input function.
a : float
Left-hand bound of interval.
b : float
Right-hand bound of interval.
power : float, optional
Signed, specified power of tolerance until satisfying method.
variable : symbol, optional
Respected variable in derivative. Assumed to be 'x' if not stated.
iter_guess : bool or integer, optional
Boolean value of `True` by default. If integer, iterate for that integer.
k : float, optional
Absolute maximum slope of function.
Yields
------
self.function : expression
Input function.
self.a : float
Left-hand bound of interval.
self.b : float
Right-hand bound of interval.
self.tol : float
Tolerance to satisfy method.
self.variable : symbol, optional
Respected variable in derivative. Assumed to be `'x'` if not stated.
self.iter_guess : bool or integer, optional
Boolean value of `True` by default. If integer, iterate for that integer.
self.k : float, optional
Absolute maximum slope of functon. Assumed 0 if not defined.
Raises
------
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Notes
-----
self.tol evaluated by: `10**power`.
"""
if isinstance(function, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(function(variable)))
function = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
# elif isinstance(f, (sp.Expr)):
# f = sp.lambdify(variable, f)
# self.function = f
# print("sympy expression converted to lambda function.")
elif isinstance(function, (str)):
g = lambda x: eval(function)
function = sp.lambdify(variable, g(variable))
print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
self.function, self.variable = function, variable
self.a, self.b, self.tol = float(a), float(b), float(10**power)
self.iter_guess, self.k = iter_guess, k
def find_k(self):
"""Find greatest integer for maximum iterations for tolerance.
Returns
-------
k : float
Maximum possible slope of input function.
Yields
------
self.k : float
Maximum possible slope of input function.
Warnings
--------
Prints to console the input expression, and that the expression was in fact used.
"""
a, b, variable = self.a, self.b, self.variable
sym_function = sp.N(sp.sympify(self.function(variable)))
function = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
k = self.k
# determine form of derivative
df = sp.lambdify(variable, sp.diff(sym_function))
for alpha in np.linspace(a, b, 1000):
df_alpha = abs(df(alpha))
if df_alpha > k: k = df_alpha
self.k = k
return k
def max_iterations(self, method, p0=0):
"""Find greatest integer for maximum iterations for tolerance.
Parameters
----------
method : string
Selection of iterative method for iterations are needed.
p0 : float, optional
Initial guess for function solution.
Returns
-------
max_iter : integer
Maximum number of iterations required for specified tolerance.
Yields
------
self.max_iter : integer
Maximum number of iterations required for specified tolerance.
Raises
------
ValueError
Prescribed method is not an available option.
Warnings
--------
Informs user the maximum number of iterations for method.
Notes
-----
Will round away from zero to higher integers.
Examples
--------
If `method == 'bisection'` & a=1, b=2, and tol=-3, then:
`max_iter` >= -log(`tol`/(`b` - `a`))/log(2)
`max_iter` >= -log((10**(-3)/(2 - 1))/log(2)
`max_iter` >= 9.96
`max_iter` = 10
Else, if a=1, b=2, tol=-3, p0=1.5, nd k=0.9, then:
`max_iter` >= log(`tol`/max('p0' - `a`, `b` - `p0`))/log(k)
`max_iter` >= log(10**(-3)/max(1.5 - 1, 2 - 1.5))/log(0.9)
`max_iter` >= log(10**(-3)/0.5)/log(0.9)
`max_iter` >= 58.98
`max_iter` >= 59
"""
a, b, tol, k = self.a, self.b, self.tol, self.k
p0 = float(p0)
if method == "bisection":
max_iter = math.ceil(-math.log(tol/(b - a))/math.log(2))
elif method in ("fixed_point", "newton_raphson", "secant_method", "false_position"):
max_iter = math.ceil(-math.log(tol/max(p0 - a, b - p0))/math.log(k))
else: raise ValueError(f"ERROR! I am sorry. The desired method must be: 'bisection', 'fixed_point', 'newton_raphson', 'secant_method', or 'false_position'.")
self.max_iter = max_iter
print(f"Information: With the inputs, I will terminate the technique after so many iterations, N = {max_iter}")
return max_iter
# next 5 functions preceded by find_k & max_iterations
def bisection(self):
"""Given f(x) in [a, b] find x within tolerance. Is a root-finding method: f(x) = 0.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of evaluated points, p.
self.errors : tuple
Collection of propogated error through method.
Raises
------
ValueError
If input for desired iterations was assigned not an integer.
ValueError
If initial guesses did not evaluate to have opposite signs.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Warnings
--------
Print to console if solution was found, or state that solution did not converge with given guess or prescribed tolerance.
Notes
-----
Relying on the Intermediate Value Theorem, this is a bracketed, root-finding method. Generates a sequence {p_n}^{inf}_{n=1} to approximate a zero of f(x), p and converges by O(1 / (2**N)).
Examples
--------
If f(x) = x**3 + 4*x**2 = 10
=> f(x) = x**3 + 4*x**2 - 10 = 0
"""
f, a, b, tol = self.function, self.a, self.b, self.tol
iter_guess = self.iter_guess
# calculate if expression
if isinstance(f, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(self.variable)))
f = sp.lambdify(self.variable, sym_function)
# check if f(a) and f(b) are opposite signs
if f(a)*f(b) < 0:
if iter_guess == True:
# if left unassigned, guess
N = self.max_iterations("bisection")
elif isinstance(iter_guess, (int, float)):
# if defined as integer, use
N = int(iter_guess)
# else, break for bad assignment
else: raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {iter_guess} was given and not understood.")
# initialize
k, approximations, errors = 0, [f(a)], [1]
# exit by whichever condition is TRUE first
while errors[-1] >= tol and k <= N:
x = (b - a)/2
p = a + x # new value, p
approximations.append(p)
if f(a)*f(p) > 0: a = p # adjust next bounds
else: b = p
errors.append(abs(x)) # error of new value, p
k += 1 # iterate to k + 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
self.iterations = np.array(range(k))
self.approximations = np.array(approximations)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Errors": self.errors})
# abort if f(a) is not opposite f(b)
else: raise ValueError(f"ERROR! Interval bounds, [a, b] = [{a}, {b}] must yield opposite signs in function, {sym_function}.")
# abort if not expression
else: raise TypeError("ERROR! The input function must be an expression.")
def false_position(self, p0, p1):
"""Given f(x) and initial guesses, p0 and p1 in [a, b] find x within tolerance.
Root-finding problem: f(x) = 0.
!!! Use lowest k !!!
Parameters
----------
p0 : float
First initial guess.
p1 : float
Second initial guess.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of evaluated points, p.
self.errors : tuple
Collection of propogated error through method.
Raises
------
ValueError
If input for desired iterations was assigned not an integer.
ValueError
If initial guesses did not evaluate to have opposite signs.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Warnings
--------
Print to console if solution was found, or state that solution did not converge with given guess or prescribed tolerance.
Notes
-----
Check that |g'(x)| <= (leading coefficient of g'(x)) for all x in [a, b].
Theorem:
1) Existence of a fixed-point:
If g in C[a,b] and g(x) in C[a, b] for all x in [a, b], then function, g has a fixed point in [a, b].
2) Uniqueness of a fixed point:
If g'(x) exists on [a, b] and a positive constant, k < 1 exist with {|g'(x)| <= k | x in (a, b)}, then there is exactly one fixed-point, p in [a, b].
Converges by O(linear) if g'(p) != 0, and O(quadratic) if g'(p) = 0 and g''(p) < M, where M = g''(xi) that is the error function.
Examples
--------
If g(x) = x**2 - 2
Then p = g(p) = p**2 - 2
=> p**2 - p - 2 = 0
"""
f, a, b, tol = self.function, self.a, self.b, self.tol
iter_guess, k = self.iter_guess, self.k
p0, p1 = float(p0), float(p1)
self.p0, self.p1 = p0, p1
# calculate if expression
if isinstance(f, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(self.variable)))
f = sp.lambdify(self.variable, sym_function)
# check if f(a) and f(b) are opposites signs
if f(p0)*f(p1) < 0:
if iter_guess == True and k == 0:
# if left unassigned, guess
N = self.max_iterations("false position", p0=p0)
elif iter_guess == True and k != 0:
# if left unassigned, guess
N = self.max_iterations("false position", k=k, p0=p0)
elif isinstance(iter_guess, (int, float)):
# if defined as integer, use
N = int(iter_guess)
# else, break for bad assignment
else: raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {iter_guess} was given and not understood.")
# initialize
k, approximations, errors = 0, [f(a)], [1]
# exit by whichever condition is TRUE first
while errors[-1] >= tol and k <= N:
q0, q1 = f(p0), f(p1)
p = p1 - q1*(p1 - p0)/(q1 - q0) # new value, p
approximations.append(p)
errors.append(abs(p - p0)) # error of new value, p
if f(p)*q1 < 0: p0 = p1 # adjust next bounds
p1 = p
k += 1 # iterate to k + 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
self.iterations = np.array(range(k))
self.approximations = np.array(approximations)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Errors": self.errors})
# abort if f(a) is not opposite f(b)
else: raise ValueError(f"ERROR! Interval bounds, [a, b] = [{a}, {b}] must yield opposite signs in function, {sym_function}.")
# abort if not expression
else: raise TypeError("ERROR! The input function must be an expression.")
def fixed_point(self, p0):
"""Given f(x) and initial guess, p0 in [a, b] find x within tolerance.
Root-finding problem: f(x) = 0.
!!! Use lowest k !!!
Parameters
----------
p0 : float
Initial guess.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of evaluated points, p.
self.errors : tuple
Collection of propogated error through method.
Raises
------
ValueError
If input for desired iterations was assigned not an integer.
ValueError
If initial guesses did not evaluate to have opposite signs.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Warnings
--------
Print to console if solution was found, or state that solution did not converge with given guess or prescribed tolerance.
Notes
-----
Check that |g'(x)| <= (leading coefficient of g'(x)) for all x in [a, b].
Theorem:
1) Existence of a fixed-point:
If g in C[a, b] and g(x) in C[a, b] for all x in [a, b], then function, g has a fixed point in [a, b].
2) Uniqueness of a fixed point:
If g'(x) exists on [a, b] and a positive constant, k < 1 exist with {|g'(x)| <= k | x in (a, b)}, then there is exactly one fixed-point, `p` in [a, b].
Converges by O(linear) if g'(p) != 0, and O(quadratic) if g'(p) = 0 and g''(p) < M, where M = g''(xi) that is the error function.
Examples
--------
If g(x) = x**2 - 2
Then p = g(p) = p**2 - 2
=> p**2 - p - 2 = 0
"""
f, a, b, tol = self.function, self.a, self.b, self.tol
iter_guess, k = self.iter_guess, self.k
p0 = float(p0)
self.p0 = p0
# calculate if expression
if isinstance(f, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(self.variable)))
f = sp.lambdify(self.variable, sym_function)
if iter_guess == True and k == 0:
# if left unassigned, guess
N = self.max_iterations("fixed point", p0=p0)
elif iter_guess == True and k != 0:
# if left unassigned, guess
N = self.max_iterations("fixed point", k=k, p0=p0)
elif isinstance(iter_guess, (int, float)):
# if defined as integer, use
N = int(iter_guess)
# else, break for bad assignment
else: raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {iter_guess} was given and not understood.")
# initialize
k, approximations, errors = 0, [f(a)], [1]
# exit by whichever condition is TRUE first
while errors[-1] >= tol and k <= N:
p = f(p0) # new value, p
approximations.append(p)
errors.append(abs((p - p0)/p0)) # error of new value, p
p0 = p # set future previous value
k += 1 # iterate to k + 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
self.iterations = np.array(range(k))
self.approximations = np.array(approximations)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Errors": self.errors})
# abort if not expression
else: raise TypeError("ERROR! The input function must be an expression.")
def newton_raphson(self, p0):
"""Given f(x) and initial guess, p0 in [a, b], find x within tolerance.
Root-finding problem: f(x) = 0.
!!! Use lowest k !!!
Parameters
----------
p0 : float
Initial guess.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of evaluated points, p.
self.errors : tuple
Collection of propogated error through method.
Raises
------
ValueError
If input for desired iterations was assigned not an integer.
ValueError
If initial guesses did not evaluate to have opposite signs.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Warnings
--------
Print to console if solution was found, or state that solution did not converge with given guess or prescribed tolerance.
Notes
-----
f'(x) != 0.
Not root-bracketed.
Initial guess must be close to real solution; else, will converge to different root or oscillate (if symmetric).
Check that |g'(x)| <= (leading coefficient of g'(x)) for all x in [a, b].
Technique based on first Taylor polynomial expansion of f about p0 and evaluated at x = p. |p - p0| is assumed small; therefore, 2nd order Taylor term, the error, is small.
Newton-Raphson has quickest convergence rate.
This method can be viewed as fixed-point iteration.
Theorem:
1) Existence of a fixed-point:
If g in C[a, b] and g(x) in C[a, b] for all x in [a, b], then function, g has a fixed point in [a, b].
2) Uniqueness of a fixed point:
If g'(x) exists on [a, b] and a positive constant, `k` < 1 exist with {|g'(x)| <= k | x in (a, b)}, then there is exactly one fixed-point, `p` in [a, b].
Converges by O(linear) if g'(p) != 0, and O(quadratic) if g'(p) = 0 and g''(p) < M, where M = g''(xi) that is the error function.
Examples
--------
If g(x) = x**2 - 2
Then p = g(p) = p**2 - 2
=> p**2 - p - 2 = 0
"""
f, a, b, tol = self.function, self.a, self.b, self.tol
iter_guess, k = self.iter_guess, self.k
p0 = float(p0)
self.p0 = p0
# calculate if expression
if isinstance(f,(FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(self.variable)))
f = sp.lambdify(self.variable, sym_function)
# determine form of derivative
df = sp.lambdify(self.variable, sp.diff(sym_function))
if iter_guess == True and k == 0:
# if left unassigned, guess
N = self.max_iterations("newton raphson", p0=p0)
elif iter_guess == True and k != 0:
# if left unassigned, guess
N = self.max_iterations("newton raphson", k=k, p0=p0)
elif isinstance(iter_guess, int):
# if defined as integer, use
N = iter_guess
# else, break for bad assignment
else: raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {iter_guess} was given and not understood.")
# initialize
k, approximations, errors = 0, [f(a)], [1]
# exit by whichever condition is TRUE first
while errors[-1] >= tol and k <= N:
fp0 = f(p0)
dfp0 = df(p0)
p = p0 - (fp0/dfp0) # new value, p
approximations.append(p)
errors.append(abs(p - p0)) # error of new value, p
p0 = p # set future previous value
k += 1 # iterate to k + 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
self.iterations = np.array(range(k+1))
self.approximations = np.array(approximations)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Errors": self.errors})
# abort if not expression
else: raise TypeError("ERROR! The input function must be an expression.")
def secant_method(self, p0, p1):
"""Given f(x) and initial guesses, p0 and p1 in [a, b], find x within tolerance.
Root-finding problem: f(x) = 0.
!!! Use lowest k !!!
Parameters
----------
p0 : float
First initial guess.
p1 : float
Second initial guess.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of evaluated points, p.
self.errors : tuple
Collection of propogated error through method.
Raises
------
ValueError
If input for desired iterations was assigned not an integer.
ValueError
If initial guesses did not evaluate to have opposite signs.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Warnings
--------
Print to console if solution was found, or state that solution did not converge with given guess or prescribed tolerance.
Notes
-----
Not root-bracketed.
Bypasses need to calculate derivative (as in Newton-Raphson).
Check that |g'(x)| <= (leading coefficient of g'(x)) for all x in [a, b].
Theorem:
1) Existence of a fixed-point:
If g in C[a, b] and g(x) in C[a, b] for all x in [a, b], then function, g has a fixed point in [a, b].
2) Uniqueness of a fixed point:
If g'(x) exists on [a, b] and a positive constant, `k` < 1 exist with {|g'(x)| <= k | x in (a, b)}, then there is exactly one fixed-point, `p` in [a, b].
Converges by O(linear) if g'(p) != 0, and O(quadratic) if g'(p) = 0 and g''(p) < M, where M = g''(xi) that is the error function.
Examples
--------
If g(x) = x**2 - 2
Then p = g(p) = p**2 - 2
=> p**2 - p - 2 = 0
"""
f, a, b, tol = self.function, self.a, self.b, self.tol
iter_guess, k = self.iter_guess, self.k
p0, p1 = float(p0), float(p1)
self.p0, self.p1 = p0, p1
# calculate if expression
if isinstance(f, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(self.variable)))
f = sp.lambdify(self.variable, sym_function)
# check if f(a) and f(b) are opposite signs
if f(p0)*f(p1) < 0:
if iter_guess == True and k == 0:
# if left unassigned, guess
N = self.max_iterations("secant method", p0=p0)
elif iter_guess == True and k != 0:
# if left unassigned, guess
N = self.max_iterations("secant method", k=k, p0=p0)
elif isinstance(iter_guess, (int, float)):
# if defined as integer, use
N = (iter_guess)
# else, break for bad assignment
else: raise ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {iter_guess} was given and not understood.")
# initialize
k, approximations, errors = 0, [f(a)], [1]
# exit by whichever condition is TRUE first
while errors[-1] >= tol and k <= N:
q0, q1 = f(p0), f(p1)
# new value, p
p = p1 - q1*(p1 - p0)/(q1 - q0)
approximations.append(p)
errors.append(abs(p - p0)) # error of new value
p0, p1 = p1, p # set future previous values
k += 1 # iterate to k + 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
self.iterations = np.array(range(k))
self.approximations = np.array(approximations)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Errors": self.errors})
# abort if f(a) is not opposite f(b)
else: raise ValueError(f"ERROR! Interval bounds, [a, b] = [{a}, {b}] must yield opposite signs in function, {sym_function}.")
# abort if not expression
else: raise TypeError("ERROR! The input function must be an expression.")
class MultiVariableIteration:
def __init__(self, A, x0, b, power=-6, max_iter=100, norm_type="l_infinity"):
"""
Parameters
----------
A : tuple
Either input functions or matrix of characteristic values.
x0 : tuple
Either collection of symbols or initial guesses for system of equations.
b : tuple
Input vector.
power : float, optional
Signed, specified power of tolerance until satisfying method.
max_iter : integer, optional
Number of iterations.
norm_type : string, optional
String representation of desired norm function. `'l_infinity'` by default.
Yields
------
self.A : tuple
Either input functions or matrix of characteristic values.
self.x0 : tuple
Either collection of symbols or initial guesses for system of equations.
self.b : tuple
Input vector.
self.tol : float
Specified tolerance to which method terminates.
self.max_iter : int
Maximum iterations allowed for method.
self.norm_type : string
String representation of desired norm function.
self.is_diagonal : bool
Truth value of whether matrix is diagonal.
self.is_symmetric : bool
Truth value of whether matrix is symmetric.
self.is_tridiagonal : bool
Truth value of whether matrix is tridiagonal.
self.eigen_values : tuple
Eigenvalues of characteristic matrix, A.
self.spectral_radius : float
Spectral radius of characteristic matrix, A.
self.condition_number : float
Condition number of characteristic matrix, A.
Raises
------
IndexError
Matrix of interest must be square.
IndexError
If x0 is neither n x 1 nor 1 x n array.
IndexError
If b is neither n x 1 nor 1 x n array.
ValueError
If iterations constraint is not an integer.
ValueError
If desired norm method was neither `'l_infinity'` nor `'l_two'`.
Warnings
--------
Not recommended to use eigen_values() to find eigenvalues of characteristic matrix, A; therefore, if desiring quick calculations, do not use if matrix, A is a large, sparse matrix.
See Also
--------
eigen_values() : Function to find eigenvalues of matrix, A.
spectral_radius() : Function to find the spectral radius of characteristic matrix, A.
Notes
-----
Specified tolerance evaluated by: `10**power`.
norm_type may be either `'l_infinity'` or `'l_two'`. Is 'l_infinity' by default.
If `self.is_diagonal` is True, then matrix is diagonal. Else, not diagonal.
"""
matrix_name, vec_name, sys_name = "A", "x0", "b"
A, x0, b = np.array(A), np.array(x0), np.array(b)
if np.sum(A.shape[0]) != np.sum(A.shape[1]): raise IndexError(f"ERROR! Matrix, {matrix_name} must be square!")
if np.sum(x0.shape) - np.sum(x0.shape[0]) > 1: raise IndexError(f"Systems vector, {vec_name} must be n x 1 or 1 x n array!")
if np.sum(b.shape) - np.sum(b.shape[0])> 1: raise IndexError(f"Systems vector, {sys_name} must be n x 1 or 1 x n array!")
if max_iter <= 0 or not isinstance(max_iter, (int, float)): ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {max_iter} was given and not understood.")
if norm_type != "l_infinity" and norm_type != "l_two": raise ValueError("ERROR! Desired norm type was not understood. Please choose 'l_infinity' or 'l_two'.")
n = len(x0)
self.A = A
self.x0 = np.reshape(x0,(n,1))
self.b = np.reshape(b,(n,1))
self.tol = float(10**power)
self.max_iter = int(max_iter)
self.norm_type = norm_type
self.is_diagonal = diagonality(A)
self.is_symmetric = symmetry(A)
self.is_tridiagonal = tridiagonality(A)
# self.eigen_values = eigen_values(A)
# self.spectral_radius = spectral_radius(A)
# self.condition_number = condition_number(A, norm_type)
def __find_xk(self, x):
return np.matmul(self.T, x) + self.c
def find_omega(self, omega=0):
"""Given the characteristic matrix and solution vector, determine if prescribed omega is the optimum choice.
Parameters
----------
omega : float, optional
Relaxation parameter.
Returns
-------
omega : float
If found, is the optimum choice of omega.
Yields
------
self.user_omega : float
Supplied/default omega.
self.is_tridiagonal : bool
Truth value of whether matrix, A is tridiagonal.
self.best_omega : float
If found, is the optimum choice of omega.
Warnings
--------
If 0 < omega < 2, then method will converge regardless of choice for x0. Will inform user that matrix, A is not tridiagonal, but will proceed with calculation all the same. If matrix, A is poorly defined and not found to be positive definite, then user is informed but calculation proceeds. If an optimal omega cannot be found, then `self.best_omega` assigned from supplied/default omega.
See Also
--------
tridiagonality() : Determines if matrix, A is tridiagonal or not.
spectral_radius() : Uses the spectral radius of Gauss-Seidel's T-matrix to calculate omega.
Notes
-----
Unless specified, omega will be 0 and chosen, if possible.
"""
matrix_name = "A"
A, x0, omega = np.array(self.A), np.array(self.x0), float(omega)
self.user_omega = omega
xn = sp.Matrix(np.reshape(np.zeros_like(x0), (len(x0), 1)))
xt = sp.Matrix(np.reshape(np.zeros_like(x0), (1, len(x0))))
i = 0
for x in np.array(x0): xn[i], xt[i] = x, x; i += 1
y = xt*sp.Matrix(A)*xn
if y[0] > 0: state = True
else: state = False
if self.is_symmetric and state: theorem_6_22 = True
else: theorem_6_22 = False
i, theorem_6_25 = 1, True
while i <= len(A) and theorem_6_25 == True:
Ai = sp.Matrix(A[:i,:i])
if sp.det(Ai) > 0: theorem_6_25 = True
else : theorem_6_25 = False
i += 1
if theorem_6_22 or theorem_6_25:
if 0 < omega and omega < 2: print("According to Ostrowski-Reich's Theorem, the successive relaxation technique will converge.")
if self.is_tridiagonal:
D = np.diagflat(np.diag(A))
L = np.diagflat(np.diag(A, k=-1), k=-1)
U = np.diagflat(np.diag(A, k=1), k=1)
DL = D - L
i, DL_inv = 0, np.zeros_like(DL)
while i < len(DL_inv):
j = 0
while j < len(DL_inv[0]):
dl = DL[i][j]
if dl != 0: DL_inv[i][j] = 1/(dl)
j += 1
i += 1
Tg = DL_inv*U
omega = 2 / (1 + math.sqrt(1 - spectral_radius(Tg)))
print(f"I believe {omega} would be the best choice.")
else:
print(f"Warning! Matrix, {matrix_name} is not tridiagonal.")
print(f"Assigning supplied omega, {omega} as `self.best_omega`.")
else:
print(f"Warning! Matrix, {matrix_name} is not positive definite.")
print(f"Assigning supplied omega, {omega} as `self.best_omega`.")
self.best_omega = omega
return omega
def gauss_seidel(self):
"""Given A*x = b, use `self.norm_type` to find x via the Gauss-Seidel Method.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
-------
self.iterations : tuple
Running collection of iterations through method.
self.approximations : tuple
Finally evaluated solution.
self.errors : tuple
Aggregate of yielded norms.
Warnings
--------
Prints to console whether or not a solution was found within the specified tolerance with the supplied, initial guess.
See Also
--------
norms.l_infinity() : Will find the l_infinity norm between x0 and xi.
norms.l_two() : Will find the l_2 norm between x0 and xi.
Notes
-----
gauss_seidel():
[x]_(k) = ( (D - L)^(-1) * U ) * [x]_(k - 1) + ( (D - L)^(-1) )*[b]
"""
A, x0, b, tol, N = self.A, self.x0, self.b, self.tol, self.max_iter
norm_type, norm = self.norm_type, tol*10
# A = np.zeros((N, N))
# np.fill_diagonal(A, ai)
# A = A + np.diagflat(bi, 1)
# A = A + np.diagflat(ci, -1)
# x0 = np.zeros(N)
# b = np.array(di)
# A1, A2 = np.zeros((n, n)), np.zeros((n, n))
# np.fill_diagonal(A1, np.diagonal(A))
# A1 = A1 - np.tril(A, k=-1)
# i = 0
# while i < n:
# j = 0
# while j <= i:
# a1ij = A1[i][j]
# if a1ij != 0:
# A2[i][j] = 1/a1ij
# j += 1
# i += 1
# self.T = np.matmul(A2, np.triu(A, k=1))
# self.c = np.matmul(A2, b)
k, n, approximations, errors = 1, len(x0), [x0], [norm]
while errors[-1] > tol and k <= N:
i, xi = 0, np.zeros_like(x0)
while i < n:
j, y1, y2 = 0, 0., 0.
while j <= i-1:
y1 += A[i][j]*xi[j]
j += 1
j = i + 1
while j < n:
y2 += A[i][j]*x0[j]
j += 1
xi[i] = (-y1 - y2 + b[i])/A[i][i]
i += 1
# xi = self.__find_xk(x0)
if norm_type == "l_infinity":
norm = norms(xi, x0).l_infinity()
elif norm_type == "l_two":
norm = norms(xi, x0).l_two()
approximations.append(xi)
errors.append(norm)
x0 = xi
k += 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
# m, n = len(approximations[0]), len(approximations)
# j, x = 0, np.zeros((m,n))
# while j < n:
# i = 0
# while i < m:
# x[i][j] = float(approximations[j][i])
# i += 1
# j += 1
self.iterations = np.array(range(k))
self.approximations = np.array(approximations)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Error": self.errors})
def jacobi(self):
"""Given A*x = b, use `self.norm_type` to find x via the Jacobi Method.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
-------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of approximated, iterative solutions.
self.errors : tuple
Collection of yielded norms.
Warnings
--------
Prints to console whether or not a solution was found within the specified tolerance with the supplied, initial guess.
See Also
--------
norms.l_infinity() : Will find the l_infinity norm between x0 and xi.
norms.l_two() : Will find the l_2 norm between x0 and xi.
Notes
-----
jacobi():
[x]_(k) = ( D^(-1)*(L + U) ) * [x]_(k - 1) + ( D^(-1) ) * [b]
"""
A, x0, b, tol, N = self.A, self.x0, self.b, self.tol, self.max_iter
norm_type, norm = self.norm_type, tol*10
k, n, approximations, errors = 1, len(x0), [x0], [norm]
while errors[-1] > tol and k <= N:
i, xi = 0, np.zeros_like(x0)
while i < n:
j, y = 0, 0.
while j < n:
if j != i:
y += A[i][j]*x0[j]
j += 1
xi[i] = (-y + b[i])/A[i][i]
i += 1
if norm_type == "l_infinity":
norm = norms(xi, x0).l_infinity()
elif norm_type == "l_two":
norm = norms(xi, x0).l_two()
approximations.append(xi)
errors.append(norm)
x0 = xi
k += 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
# m, n = len(approximations[0]), len(approximations)
# X_matrix, j = np.zeros((m,n)), 0
# while j < n:
# i = 0
# while i < m:
# X_matrix[i][j] = float(approximations[j][i])
# i += 1
# j += 1
self.iterations = np.array(range(k))
self.approximations = np.array(approximations)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Error": self.errors})
# def newton_raphson(self, functions, symbols, x0, powers, max_iter=100, norm_type=None):
# """Given an array of functions, symbols, and initial guesses, employ the Newton-Raphson Method to find solution within tolerance.
# Root-finding problem: f(x) = 0.
# !!! Use lowest k !!!
# Parameters
# ----------
# functions
# symbols
# x0
# powers
# max_iter
# nomr_type
# p0 : float
# Initial guess.
# k : float, optional
# Absolute maximum slope of function.
# Yields
# -------
# self.iterations : tuple
# Collection of iterations through method.
# self.approximations : tuple
# Collection of approximated, iterative solutions.
# self.errors : tuple
# Collection of yielded norms.
# Raises
# ------
# __bad_iter : string
# If input for desired iterations was assigned not an integer.
# __must_be_expression : string
# If input `f` was of array, list, tuple, etcetera...
# Warns
# -----
# __solution_found : string
# Inform user that solution was indeed found.
# __solution_not_found : string
# If initial guess or tolerance were badly defined.
# Notes
# -----
# f'(x) != 0.
# Not root-bracketed.
# Initial guess must be close to real solution; else, will converge to different root or oscillate (if symmetric).
# Check that |g'(x)| <= (leading coefficient of g'(x)) for all x in [a, b].
# Technique based on first Taylor polynomial expansion of `f` about `p0` and evaluated at x = p. |p - p0| is assumed small; therefore, 2nd order Taylor term, the error, is small.
# Newton-Raphson has quickest convergence rate.
# This method can be viewed as fixed-point iteration.
# Theorem:
# 1) Existence of a fixed-point:
# If g in C[a, b] and g(x) in C[a, b] for all x in [a, b], then function, g has a fixed point in [a, b].
# 2) Uniqueness of a fixed point:
# If g'(x) exists on [a, b] and a positive constant, `k` < 1 exist with {|g'(x)| <= k | x in (a, b)}, then there is exactly one fixed-point, `p` in [a, b].
# Converges by O(linear) if g'(p) != 0, and O(quadratic) if g'(p) = 0 and g''(p) < M, where M = g''(xi) that is the error function.
# Examples
# --------
# If g(x) = x**2 - 2
# Then p = g(p) = p**2 - 2
# => p**2 - p - 2 = 0
# """
# def jacobian(g, sym_x, x):
# n = len(x)
# jacMatrix = np.zeros((n, n))
# for i in range(0, n):
# for j in range(0, n):
# J_ij = sp.diff(g[i](*sym_x), sym_x[j])
# temp = sp.lambdify(sym_x, J_ij)(*x)
# if isinstance(temp, type(np.array([1]))): temp = temp[0]
# jacMatrix[i][j] = temp
# return
# norm_type = self.norm_type
# functions, x0, b, norm = self.A, self.x0, self.b, self.tol*10
# xi = np.zeros_like(x0)
# X0, error = [], []
# k, n = 0, len(x0)
# for symbol in symbols:
# if isinstance(symbol, (str, type(sp.Symbol("x")))): continue
# else: raise TypeError(f"All elements of `symbols` must be of type string or symbol: {symbol} was neither.")
# if max_iter <= 0 or not isinstance(max_iter, (int, float)): ValueError(f"ERROR! Maximum iterations, N must be an integer greater than zero. {max_iter} was given and not understood.")
# if norm_type == None:
# tol = []
# for p in powers: tol.append(10**p)
# else: tol = 10**powers
# functions, x0 = np.reshape(functions, (1, n))[0], np.reshape(x0, (n, 1))
# X0.append(x0)
# error.append(tol)
# for k in range(1, max_iter):
# J = jacobian(functions, symbols, x0)
# xk, g = np.zeros_like(x0), np.zeros_like(x0)
# for i in range(0, n):
# g[i] = sp.lambdify(symbols, functions[i](*symbols))(*x0)
# y0 = np.linalg.solve(J, -g)
# xk = x0 + y0
# if norm_type == "l_two":
# boolean = []
# for i in range(0, n-1):
# if abs(xk[i] - x0[i])[0] <= tol[i]: boolean.append(1)
# else: boolean.append(0)
# x0 = xk
# if sum(boolean) < n: continue
# else: break
# elif norm_type == "l_infinity":
# norm = norms.l_infinity(xk, x0)
# error.append(norm)
# X0.append(xk)
# tol_exit = 0
# for tl in tol:
# if norm <= tl: tol_exit += 0
# else: tol_exit += 1
# if tol_exit == 0:
# self.iterations = np.array(range(k))
# self.approximations = np.array(X0)
# self.errors = np.array(error)
# return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Error": self.errors})
# else: x0 = xk
# else: raise ValueError("ERROR! Desired norm type was not understood. Please choose 'l_infinity' or 'l_two'.")
# return x0
def successive_relaxation(self, omega=None):
"""Given A*x = b, use `self.norm_type` to find vector, x via the Successive Relaxtion Method. Is Successive Over-Relaxation if omega > 1, Successive Under-Relaxation if omega < 1, and is Gauss-Seidel if omega = 1.
Parameters
----------
omega : None or float, optional
Relaxation parameter.
Returns
-------
pandas.DataFrame : dataframe
Summarized dataframe from iterations.
Yields
-------
self.iterations : tuple
Collection of iterations through method.
self.approximations : tuple
Collection of approximated, iterative solutions.
self.errors : tuple
Collection of yielded norms.
Warnings
--------
Prints to console optimal choice of omega, regardless of assignment, and whether or not a solution was found within the specified tolerance with the supplied, initial guess.
See Also
--------
norms.l_infinity() : Will find the l_infinity norm between x0 and xi.
norms.l_two() : Will find the l_2 norm between x0 and xi.
find_omega() : Will analyze system of equation to find an optimal omega, if possible, and inform user.
gauss_seidel() : Technique is Gauss-Seidel's modified by omega.
Notes
-----
gauss_seidel():
[x]_(k) = ( (D - L)^(-1) * U ) * [x]_(k - 1) + ( (D - L)^(-1) )*[b]
successive_relaxation():
[x]_(k) = ( (D - wL)^(-1) * ((1 - w)*D + w*U) ) * [x]_(k - 1) + w*( (D - w*L)^(-1) )*[b]
omega will be analyzed independent of assigned value which will be used if not specified in assignment.
"""
if omega == None:
try: w = self.user_omega
except AttributeError:
try: w = self.best_omega
except AttributeError:
# w = super().find_omega(A, x0)
w = self.find_omega()
print(f"Warning! Omega was not given; therefore, I attempted to choose one, {w}.")
else: print(f"Warning! Using `self.best_omega` = {w}.")
else: print(f"Warning! Using `self.user_omega` = {w}.")
if w <= 0: raise ValueError("Either a positive omega was not given, or I could not choose one.")
elif omega != None and isinstance(omega, (int, float)):
# omega = find_omega(A, x0, w)
w = self.find_omega(omega=omega)
print(f"Warning! omega = {omega} given. Which is not optimum: {w}")
w = omega
else: raise ValueError(f"ERROR! Either a positive omega was not given, or I could not choose one.")
A, x0, b, tol, N = self.A, self.x0, self.b, self.tol, self.max_iter
norm_type, norm = self.norm_type, tol*10
k, n, approximations, errors = 0, len(x0), [x0], [norm]
while norm > tol and k <= N:
i, xi = 0, np.zeros_like(x0)
# xgs = super().gauss_seidel(x0)
xgs = self.gauss_seidel()["Approximations"].values[-1]
while i < n:
xi[i] = (1 - w)*x0[i] + w*xgs[i]
i += 1
if norm_type == "l_infinity":
norm = norms(xi, x0).l_infinity()
elif norm_type == "l_two":
norm = norms(xi, x0).l_two()
approximations.append(xi)
errors.append(norm)
x0 = xi
k += 1
if k <= N: print("Congratulations! Solution found!")
else: print("Warning! Solution could not be found with initial guess or tolerance.")
# m, n = len(approximations[0]), len(approximations)
# X_matrix, j = np.zeros((m,n)), 0
# while j < n:
# i = 0
# while i < m:
# X_matrix[i][j] = float(approximations[j][i])
# i += 1
# j += 1
self.iterations = np.array(range(k))
self.approximations = np.array(approximations)
self.errors = np.array(errors)
return pd.DataFrame(data={"Iterations": self.iterations, "Approximations": self.approximations, "Error": self.errors})
# --------------------
# --------------------
# interpolations
class cubic_spline:
def __init__(self, domain, function):
self.domain, self.function = domain, function
def clamped(self, variable=sp.Symbol("x"), fp=0):
"""Given a domain and range, construct a spline polynomial within interval by some condition.
Parameters
----------
X : array
Input domain.
f : array or expression
Desired/Found range of interest.
x : symbol
Respected variable in derivative of equation. Assumed to be `'x'` if not stated.
fp : array or expression
Derivative at each point in `f`.
Returns
-------
Y : array
Finally evaluated solutions.
splines_j : list
Aggregate of splines on each interval.
spline : string
Totally constructed spline polynomial.
Raises
------
bad_X : string
If {`X`} is neither n x 1 nor 1 x n array.
bad_f : string
If `f` is not an expression or function and is not an n x 1 or 1 x n array.
bad_data : string
If {`X`} and {`f`} are of unequal length.
bad_fp : string
If `fp` is not an expression or function and is not an n x 1 or 1 x n array.
missing_fp : string
Output message that derivative data or expression is missing.
See Also
--------
make_array() : Translates input expression to array from given `X`.
endpoint() : Relies on another technique to find derivatives at endpoints if not explicitly provided by data, `fp` nor an expression.
midpoint() : Finds the derivatives at points within the bounds of the endpoints.
diagonality() : Determines whether input matrix is strictly, diagonally dominant.
Notes
-----
`fp` will be calculated if not specified.
Method uses many, low-ordered polynomials to fit larger data sets. This minimizes computational load, which conversely greatly increases for larger data sets that yield high-ordered polynomials.
General form:
Sj(x) = aj + bj(x - xj) + cj(x - xj)^2 + dj(x - xj)^3
Clamped splines fit the constructed polynomial to the given data and its der
ivatives at either endpoint.
If selected `condition` is `'natural'`, then `fp = 0`, because derivative is assumed to be straight line outside of data set.
Definitions of cubic spline conditions:
a) S(x) is a cubic polynomial, Sj(x) on sub-interval [x_(j), x_(j + 1)] for each j = 0, 1, ..., n - 1;
b) Sj(x_(j)) = f(x_(j)) and Sj(x_(j + 1)) = f(x_(j + 1)) for each j = 0, 1, ..., n - 1;
c) S_(j + 1)(x_(j + 1)) = Sj(x_(j + 1)) for each j = 0, 1, ..., n - 2;
d) S_(j + 1)'(x_(j + 1)) = Sj'(x_(j + 1)) for each j = 0, 1, ..., n - 2;
e) One of the following conditions is satisfied:
1) S''(x0) = S''(xn) = 0 -> `'natural'`
2) S'(x0) = f'(x0) and S'(xn) = f'(xn) -> `'clamped'`
"""
def algorithm(g, gp):
Y, YP = np.array(g), np.array(gp)
# STEP 1: build list, h_i
i, H = 0, np.zeros(n)
while i < n:
H[i] = X[i+1] - X[i]
i += 1
# STEP 2: define alpha list endpoints
A, AP, ALPHA = Y, YP, np.zeros(m)
ALPHA[0] = 3*(A[1] - A[0])/H[0] - 3*AP[0]
ALPHA[n] = 3*AP[n] - 3*(A[n] - A[n-1])/H[n-1]
# STEP 3: build list, alpha_i
i = 1
while i <= n-1:
ALPHA[i] = 3/H[i]*(A[i+1] - A[i]) - 3/H[i-1]*(A[i] - A[i-1])
i += 1
# Algorithm 6.7 to solve tridiagonal
# STEP 4: define l, mu, and z first points
L, MU, Z, C = np.zeros(m), np.zeros(m), np.zeros(m), np.zeros(m)
L[0], MU[0] = 2*H[0], 0.5
Z[0] = ALPHA[0]/L[0]
# STEP 5: build lists l, mu, and z
i = 1
while i <= n-1:
L[i] = 2*(X[i+1] - X[i-1]) - H[i-1]*MU[i-1]
MU[i] = H[i]/L[i]
Z[i] = (ALPHA[i] - H[i-1]*Z[i-1])/L[i]
i += 1
# STEP 6: define l, z, and c endpoints
L[n] = H[n-1]*(2-MU[i-1])
Z[n] = (ALPHA[n] - H[n-1]*Z[n-1])/L[n]
C[n] = Z[n]
# STEP 7: build lists c, b, and d
i, j, B, D = 1, 0, np.zeros(n), np.zeros(n)
while i <= n:
j = n-i
C[j] = Z[j] - MU[j]*C[j+1]
B[j] = (A[j+1] - A[j])/H[j] - H[j]*(C[j+1] + 2*C[j])/3
D[j] = (C[j+1] - C[j])/(3*H[j])
i += 1
return Y, A, B, C, D
sym_X, sym_function, sym_fp = "self.X", "self.f", "fp"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
bad_f = "Input range, " + sym_function + " was neither function nor expression and not an n x 1 or 1 x n array."
bad_data = "Arrays " + sym_X + " and " + sym_function + " must be of equal length."
bad_fp = "Derivative range was neither function nor expression and not an n x 1 or 1 x n array."
bad_fp_data = "Arrays " + sym_X + ", " + sym_function + ", and " + sym_fp + " must be of equal length."
missing_fp = "Missing derivative data or expression."
f, X = self.function, self.domain
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if not isinstance(f, (FunctionType, sp.Expr)):
if np.sum(f.shape) > np.sum(f.shape[0]): raise ValueError("ERROR! " + bad_f)
elif len(X) != len(f): raise ValueError(bad_data)
else: g = f
elif isinstance(f, (FunctionType, sp.Expr)):
g = make_array(X, f)
if np.sum(fp.shape) != 0:
if not isinstance(fp, (FunctionType, sp.Expr)):
if np.sum(fp.shape) > np.sum(fp.shape[0]): raise ValueError("ERROR! " + bad_fp)
elif len(X) != len(fp): raise ValueError("ERROR! " + bad_fp_data)
else: gp = fp
elif isinstance(fp, (FunctionType, sp.Expr)): gp = make_array(X, fp)
elif fp == 0:
if isinstance(f,(FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(variable)))
f = sp.lambdify(variable, sym_function)
fp = sp.diff(sym_function)
gp = make_array(X, fp)
elif not isinstance(f,(FunctionType, sp.Expr)):
gp = []
if len(X) > 2:
gp.append(endpoint(X, f, X[1]-X[0], "three", "left"))
i, n = 1, len(f) - 1
while i < n:
gp.append(midpoint(X, f, X[i]-X[i-1], "three", i))
i += 1
gp.append(endpoint(X, f, X[-2]-X[-1], "three", "right"))
elif len(X) > 5:
gp.append(endpoint(X, f, X[1]-X[0], "five", "left"))
i, n = 1, len(X) - 1
while i < n:
gp.append(midpoint(X, f, X[i]-X[i-1], "five", i))
i += 1
gp.append(endpoint(X, f, X[-2]-X[-1], "five", "right"))
else: raise ValueError("ERROR! " + missing_fp)
m = len(X)
n = m - 1
Y, A, B, C, D = algorithm(g, gp)
j, splines_j = 0, []
while j <= n-1:
xj, aj, bj, cj, dj = X[j], A[j], B[j], C[j], D[j]
sj = aj + bj*(variable - xj) + cj*(variable - xj)**2 + dj*(variable - xj)**3
splines_j.append(sj)
j += 1
spline = sp.simplify(sum(splines_j))
return Y, splines_j, spline
def natural(self, variable=sp.Symbol("x")):
"""Given a domain and range, construct a spline polynomial within interval by some condition.
Parameters
----------
X : array
Input domain.
f : array or expression
Desired/Found range of interest.
Returns
-------
Y : array
Finally evaluated solutions.
splines_j : list
Aggregate of splines on each interval.
spline : string
Totally constructed spline polynomial.
Raises
------
bad_X : string
If {`X`} is neither n x 1 nor 1 x n array.
bad_f : string
If `f` is not an expression or function and is not an n x 1 or 1 x n array.
bad_data : string
If {`X`} and {`f`} are of unequal length.
See Also
--------
make_array() : Translates input expression to array from given `X`.
diagonality() : Determines whether input matrix is strictly, diagonally dominant.
Notes
-----
Method uses many, low-ordered polynomials to fit larger data sets. This minimizes computational load, which conversely greatly increases for larger data sets that yield high-ordered polynomials.
General form:
Sj(x) = aj + bj(x - xj) + cj(x - xj)^2 + dj(x - xj)^3
Clamped splines fit the constructed polynomial to the given data and its der
ivatives at either endpoint.
If selected `condition` is `'natural'`, then `fp = 0`, because derivative is assumed to be straight line outside of data set.
Definitions of cubic spline conditions:
a) S(x) is a cubic polynomial, Sj(x) on sub-interval [x_(j), x_(j + 1)] for each j = 0, 1, ..., n - 1;
b) Sj(x_(j)) = f(x_(j)) and Sj(x_(j + 1)) = f(x_(j + 1)) for each j = 0, 1, ..., n - 1;
c) S_(j + 1)(x_(j + 1)) = Sj(x_(j + 1)) for each j = 0, 1, ..., n - 2;
d) S_(j + 1)'(x_(j + 1)) = Sj'(x_(j + 1)) for each j = 0, 1, ..., n - 2;
e) One of the following conditions is satisfied:
1) S''(x0) = S''(xn) = 0 -> `'natural'`
2) S'(x0) = f'(x0) and S'(xn) = f'(xn) -> `'clamped'`
"""
def algorithm(g):
Y = g
# STEP 1: build list, h_i
H, i = np.zeros(n), 0
while i < n:
H[i] = X[i+1] - X[i]
i += 1
# STEP 2: build list, alpha_i
A, ALPHA = Y, np.zeros(m)
i = 1
while i <= n-1:
ALPHA[i] = 3/H[i]*(A[i+1] - A[i]) - 3/H[i-1]*(A[i] - A[i-1])
i += 1
# Algorithm 6.7 to solve tridiagonal
# STEP 3: define l, mu, and z first points
L, MU, Z, C = np.zeros(m), np.zeros(m), np.zeros(m), np.zeros(m)
L[0], MU[0], Z[0] = 1, 0, 0
# STEP 4: build lists l, mu, and z
i = 1
while i <= n-1:
L[i] = 2*(X[i+1] - X[i-1]) - H[i-1]*MU[i-1]
MU[i] = H[i]/L[i]
Z[i] = (ALPHA[i] - H[i-1]*Z[i-1])/L[i]
i += 1
# STEP 5: define l, z, and c endpoints
L[n], Z[n], C[n] = 1, 0, 0
# STEP 6: build lists c, b, and d
i, j, B, D = 1, 0, np.zeros(n), np.zeros(n)
while i <= n:
j = n-i
C[j] = Z[j] - MU[j]*C[j+1]
B[j] = (A[j+1] - A[j])/H[j] - H[j]*(C[j+1] + 2*C[j])/3
D[j] = (C[j+1] - C[j])/(3*H[j])
i += 1
return Y, A, B, C, D
sym_X, sym_function = "self.X", "self.f"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
bad_f = "Input range, " + sym_function + " was neither function nor expression and not an n x 1 or 1 x n array."
bad_data = "Arrays " + sym_X + " and " + sym_function + " must be of equal length."
X, f = np.array(self.domain), self.function
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if not isinstance(f, (FunctionType, sp.Expr)):
f = np.array(f)
if np.sum(f.shape) > np.sum(f.shape[0]): raise ValueError("ERROR! " + bad_f)
elif len(X) != len(f): raise ValueError("ERROR! " + bad_data)
else: g = f
elif isinstance(f, (FunctionType, sp.Expr)):
g = make_array(X, f)
m = len(X)
n = m - 1
Y, A, B, C, D = algorithm(g)
j, splines_j = 0, []
while j <= n-1:
xj, aj, bj, cj, dj = X[j], A[j], B[j], C[j], D[j]
sj = aj + bj*(variable - xj) + cj*(variable - xj)**2 + dj*(variable - xj)**3
splines_j.append(sj)
j += 1
spline = sp.simplify(sum(splines_j))
return Y, splines_j, spline
def hermite(X, FX, x=sp.Symbol("x"), FP=0):
"""Given a domain and range, construct a Hermetic polynomial.
Parameters
----------
X : array
Input domain.
FX : array
Desired/Found range of interest.
x : symbol
Respected variable in derivative of equation. Assumed to be `'x'` if not stated.
FP : array or expression
Derivative at each point in `FX`.
Returns
-------
polynomial : expression
Lambdified Hermetic polynomial.
Raises
------
bad_X : string
If {`X`} is neither n x 1 nor 1 x n array.
bad_FX : string
If {`FX`} is neither n x 1 nor 1 x n array.
bad_data : string
If {`X`} and {`FX`} are of unequal length.
bad_FP : string
If `FP` is not an expression or function and is not an n x 1 or 1 x n array.
bad_FP_data : string
If {`X`}, {`FX`}, or {`FP`} are of unequal lengths.
missing_FP : string
If `FP = 0` and `FX` is not an expression, then missing derivative data or expression.
Warns
-----
made_poly : string
Displays the string form of the equation.
See Also
--------
make_array() : Prints string that expression was used to make array.
Notes
-----
`FP` calculated if not specified.
Slow computation time for larger data sets.
Oscullating curve incorporates Taylor and Lagrangian polynomials to kiss the data and match each data point's derivatives. Which fits the curve to the shape of the data and its trend.
"""
sym_X, sym_FX, sym_FP = "X", "FX", "FP"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
bad_FX = "Input range, " + sym_FX + " was neither an n x 1 nor a 1 x n array."
bad_data = "Arrays " + sym_X + " and " + sym_FX + " must be of equal length."
bad_FP = "Derivative range was neither function nor expression and not an n x 1 or 1 x n array."
bad_FP_data = "Arrays " + sym_X + ", " + sym_FX + ", and " + sym_FP + " must be of equal length."
missing_FP = "Missing derivative data or expression."
made_poly = "I have found your requested polynomial! P = "
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if not isinstance(FX, (FunctionType, sp.Expr)):
if np.sum(FX.shape) > np.sum(FX.shape[0]): raise ValueError("ERROR! " + bad_FX)
elif len(X) != len(FX): raise ValueError("ERROR! " + bad_data)
elif isinstance(FX,(FunctionType, sp.Expr)): g = make_array(X, FX)
if FP != 0:
if not isinstance(FP, (FunctionType, sp.Expr)):
if np.sum(FP.shape) > np.sum(FP.shape[0]): raise ValueError("ERROR! " + bad_FP)
if len(X) != len(FP): raise ValueError("ERROR! " + bad_FP_data)
elif isinstance(FP,(FunctionType, sp.Expr)): FP = make_array(X, FP)
elif FP == 0:
if isinstance(FX,(FunctionType, sp.Expr)):
fp = sp.lambdify(x, sp.diff(FX(x)))
gp = make_array(X, fp)
else: print("Warning! " + missing_FP)
n = len(X)
i, Q, Z = 0, np.zeros((2*n+1,2*n+1)), np.zeros((2*n+1,1))
while i < n:
Z[2*i], Z[2*i + 1] = X[i], X[i]
Q[2*i][0], Q[2*i + 1][0] = g[i], g[i]
Q[2*i + 1][1] = gp[i]
if i != 0: Q[2*i][1] = (Q[2*i][0] - Q[2*i - 1][0]) \
/ (Z[2*i] - Z[2*i - 1])
i += 1
i = 2
while i < 2*n + 1:
j = 2
while j <= i:
Q[i][j] = (Q[i][j - 1] - Q[i - 1][j - 1]) \
/ (Z[i] - Z[i - j])
j += 1
i += 1
i, y, terms = 0, 1, []
while i < n:
j, xi = 2*i, (x - X[i])
qjj, qj1 = Q[j][j], Q[j + 1][j + 1]
terms.append(qjj*y)
y = y*xi
terms.append(qj1*y)
y = y*xi
i += 1
polynomial = sp.lambdify(x, sp.simplify(sum(terms)))
print("Congratulations! ", made_poly + str(polynomial(x)))
return polynomial
def lagrange(X, Y, x=sp.Symbol("x")):
"""Given a domain and range, construct a Lagrangian polynomial.
Parameters
----------
X : array
Input domain.
Y : array or expression
Desired/Found range of interest.
x : symbol
Respected variable in derivative of equation. Assumed to be `'x'` if not stated.
Returns
-------
yn : list
Aggregate of Lagrangian terms.
sp.lambdify(x, polynomial) : expression
Lambdified Lagrangian polynomial.
bound : list
Propogation of error through construction.
sum(bound)
Total error.
Raises
------
bad_X : string
If {`X`} is neither n x 1 nor 1 x n array.
bad_Y : string
If {`Y`} is neither n x 1 nor 1 x n array.
bad_data : string
If {`X`} and {`Y`} are of unequal length.
Warns
-----
made_poly : string
Displays the string form of the equation.
See Also
--------
make_array() : Prints string that expression was used to make array.
Notes
--------
Polynomial will quickly begin to oscillate for larger data sets.
Finds a polynomial of degree n-1.
Polynomial is of the following form:
P(x) = f(x0)L_(n,0)(x) + ... + f(xn)L_(n,n)(x), where
L_(n,k) = prod_(i=0, i!=k)^(n) (x - xi)/(xk - xi)
Examples
--------
A Lagrange polynomial between (2,4) and (5,1) would be found as follows:
L_(0)(x) = (x - 5)/(2 - 5) = -(x - 5)/3
L_(1)(x) = (x - 2)/(5 - 2) = (x - 2)/3
=> P(x) = (4)*(-(x - 5)/3) + (1)*((x - 2)/3)
= -x + 6
"""
def term(xk, yk, x):
num, den, L_k = [], [], []
for xl in X:
if xl != xk:
num.append(x-xl)
den.append(xk-xl)
L_k = (np.divide(np.prod(num), np.prod(den)))
return L_k * yk
def error(n, xi, x):
i, roots, g, xi_error = 0, [], [], []
while i <= n:
root = X[i]
roots.append(x - root)
g = np.prod(roots)
k = 0
while k <= n:
xi = sp.simplify(sp.diff(xi))
k += 1
dxi = np.abs(xi.evalf(subs={x: root})/(math.factorial(k)))
xi_error.append(np.abs(dxi))
xi_err = np.max(xi_error)
g_prime = sp.diff(g)
r = sp.solve(g_prime)
if i == 0:
r = g_prime
gx = g.evalf(subs={x: r})
elif i == 1:
gx = g.evalf(subs={x: r[0]})
else:
R = []
for s in r:
if not isinstance(s, complex):
R.append(g.evalf(subs={x: s}))
gx = np.amax(np.abs(R))
i += 1
return np.abs(xi_err*gx)
sym_X, sym_Y = "X", "Y"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
bad_Y = "Input range, " + sym_Y + " was neither an n x 1 nor a 1 x n array."
bad_data = "Arrays " + sym_X + " and " + sym_Y + " must be of equal length."
made_poly = "I have found your requested polynomial! P = "
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if not isinstance(Y,(FunctionType, sp.Expr)):
if np.sum(Y.shape) > np.sum(Y.shape[0]): raise ValueError("ERROR! " + bad_Y)
elif len(X) != len(Y): raise ValueError("ERROR! " + bad_data)
elif isinstance(Y,(FunctionType, sp.Expr)): Y = make_array(X, Y)
k, yn, bound = 0, [], []
for xk in X:
yn.append(term(xk, Y[k], x))
bound.append(error(k, sp.simplify(sum(yn)), x))
k += 1
polynomial = sp.simplify(sum(yn))
print("Congratulations! ", made_poly, str(polynomial))
return yn, sp.lambdify(x, polynomial), bound, sum(bound)
class least_squares:
def linear(X_i, Y_i, n, variable=sp.Symbol("x")):
"""Given a domain and range, construct some polynomial.
Parameters
----------
X_i : array
Input domain.
Y_i : array or expression
Desired/Found range of interest.
n : int
Degree of polynomial.
Returns
-------
P : expression
Lambdified linear least square polynomial.
E : float
Total error.
Raises
------
bad_X : string
If {`X_i`} is neither n x 1 nor 1 x n array.
bad_Y : string
If {`Y_i`} is neither n x 1 nor 1 x n array.
bad_data : string
If {`X_i`} and {`Y_i`} are of unequal length.
bad_n : string
If prescribed `n` is not an integer or is zero.
Warns
-----
made_poly : string
Displays the string form of the equation.
"""
def poly(X):
terms, k = [], 0
for x in X:
terms.append(x*(variable**k))
k += 1
p = sp.simplify(sum(terms))
err, i = 0, 0
for x_i in X_i:
px = p.subs(variable, x_i)
err += (Y_i[i] - px)**2
i += 1
return p, err
sym_X_i, sym_Y_i = "X_i", "Y_i"
bad_X = "Input domain, " + sym_X_i + " was neither an n x 1 nor a 1 x n array."
bad_Y = "Input range, " + sym_Y_i + " was neither an n x 1 nor a 1 x n array."
bad_data = "Arrays " + sym_X_i + " and " + sym_Y_i + " must be of equal length."
bad_n = "Degree of polynomial must be integer and non-zero."
made_poly = "I have found your requested polynomial! P = "
if np.sum(X_i.shape) > np.sum(X_i.shape[0]): raise ValueError("ERROR! " + bad_X)
if isinstance(Y_i, (FunctionType, sp.Expr)):
Y_i = make_array(X_i, Y_i)
if np.sum(Y_i.shape) > np.sum(Y_i.shape[0]): raise ValueError("ERROR! " + bad_Y)
if len(X_i) != len(Y_i): raise ValueError("ERROR! " + bad_data)
if not isinstance(n,(int)) or n == 0: raise ValueError("ERROR! " + bad_n)
m = len(X_i)
A, x = np.zeros((n+1, n+1)), np.zeros((n+1,1))
i, b = 0, np.zeros_like(x)
while i <= n:
j = 0
while j <= n:
a_ij, k = 0, 0
while k < m:
a_ij += (X_i[k])**(i + j)
k += 1
A[i][j] = a_ij
j += 1
b_i, k = 0, 0
while k < m:
b_i += Y_i[k]*(X_i[k]**(i))
k += 1
b[i] = b_i
i += 1
x = np.transpose(np.linalg.solve(A, b))
k, X, terms = 0, x[0], []
for x in X:
terms.append(x*(variable**k))
k += 1
polynomial = sp.simplify(sum(terms))
print("Congratulations! ", made_poly, str(polynomial))
P = sp.lambdify(variable, polynomial)
i, E = 0, 0
for x_i in X_i:
E += (Y_i[i] - P(x_i))**2
i += 1
return P, E
def power(X, Y):
"""Given a domain and range, yield the coefficients for an equation of the form `y = A*(x^B)`.
Parameters
----------
X : array
Input domain.
Y : array or expression
Desired/Found range of interest.
Returns
-------
A : float
Leading coefficient.
B : float
Exponent.
Raises
------
bad_X : string
If {`X`} is neither n x 1 nor 1 x n array.
bad_Y : string
If {`Y`} is neither n x 1 nor 1 x n array.
bad_data : string
If {`X`} and {`Y`} are of unequal length.
Warns
-----
made_poly : string
Displays the string form of the equation.
"""
sym_X, sym_Y = "X", "Y"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
bad_Y = "Input range, " + sym_Y + " was neither an n x 1 nor a 1 x n array."
bad_data = "Arrays " + sym_X + " and " + sym_Y + " must be of equal length."
bad_n = "Degree of polynomial must be integer and non-zero."
made_poly = "I have found your requested polynomial! P = "
X, Y = np.array(X), np.array(Y)
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if isinstance(Y, (FunctionType, sp.Expr)):
Y = make_array(X, Y)
if np.sum(Y.shape) > np.sum(Y.shape[0]): raise ValueError("ERROR! " + bad_Y)
if len(X) != len(Y): raise ValueError("ERROR! " + bad_data)
n = len(X)
q1, q2, q3, q4 = [], [], [], []
for i in range(n):
xi, yi = X[i], Y[i]
q1.append(np.log(xi)*np.log(yi))
q2.append(np.log(xi))
q3.append(np.log(yi))
q4.append(np.log(xi)**2)
num = n*np.sum(q1) - np.sum(q2)*np.sum(q3)
den = n*np.sum(q4) - (np.sum(q2))**2
b = num/den
a = math.exp((np.sum(q3) - b*np.sum(q2))/n)
return a, b
def linear_interpolation(x0, y0, x1, y1, x):
return y0 + (x - x0)*(y1 - y0)/(x1 - x0)
def newton_difference(X, FX, x0, variable=sp.Symbol("x"), direction=0):
"""Given a domain and range, construct some polynomial by Newton's Divided Difference.
Parameters
----------
X : array
Input domain.
FX : array or expression
Desired/Found range of interest.
x0 : float
Point about which polynomial is evaluated.
direction : string
`'forward'` or `'backward'` construction. Will be chosen automatically if not specified.
Returns
-------
p : expression
Lambdified constructed polynomial.
p(x0) : float
Evaluation of `p` at `x`.
Raises
------
bad_X : string
If {`X_i`} is neither n x 1 nor 1 x n array.
bad_FX : string
If {`FX`} is neither n x 1 nor 1 x n array.
bad_data : string
If {`X`} and {`FX`} are of unequal length.
bad_direction : string
If `direction` is neither `'forward'` nor `'backward'`.
Warns
-----
made_poly : string
Displays the string form of the equation.
See Also
--------
make_array() : Prints string that expression was used to make array.
Notes
-----
Direction will be chosen if not specified.
Polynomials best made with even spacing in `X`; although, this is not completely necessary.
"""
def fterm(i, j):
fij = (fxn[i][j] - fxn[i-1][j])/(fxn[i][0] - fxn[i-j][0])
return fij
sym_X, sym_FX = "X", "FX"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
bad_FX = "Input range, " + sym_FX + " was neither an n x 1 nor a 1 x n array."
bad_data = "Arrays " + sym_X + " and " + sym_FX + " must be of equal length."
bad_direction = "Supplied direction was not understood. Please specify 'forward' or 'backward', or let me choose."
made_poly = "I have found your requested polynomial! P = "
X, x0 = np.array(X), float(x0)
if not isinstance(FX,(FunctionType, sp.Expr)):
FX = np.array(FX)
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if np.sum(FX.shape) > np.sum(FX.shape[0]): raise ValueError("ERROR! " + bad_FX)
if len(X) != len(FX): raise ValueError("ERROR! " + bad_data)
if isinstance(FX,(FunctionType, sp.Expr)): FX = make_array(X, FX)
if direction == 0:
if x0 <= np.median(X): direction = "forward"
else: direction = "backward"
elif direction != "forward" and direction != "backward": raise ValueError(bad_direction)
m = len(X)
n = m + 1
fxn, coeff, term, poly = np.zeros((m,n)), [], [], []
m, n = m - 1, n - 1 # change m and n from length to index
j, fxn[:,0], fxn[:,1] = 1, X, FX
while j < m:
i = 1
while i < m:
fk = fterm(i, j)
fxn[i][j+1] = fk
if direction == "forward" and i == j:
coeff.append(fk)
if direction == "backward" and i == m - 1:
coeff.append(fk)
i += 1
j += 1
for c in coeff:
k = coeff.index(c)
term.append(variable - X[k])
poly.append(c*np.prod(term))
if direction == "forward": polynomial = sp.simplify(sum(poly) + FX[0])
if direction == "backward": polynomial = sp.simplify(sum(poly) + FX[m])
print("Congratulations! ", made_poly, str(polynomial))
p = sp.lambdify(variable, polynomial)
return p, p(x0)
# --------------------
# --------------------
# numerical differentiation and integration
class simpson:
def open(f, X, h=0, a=0, b=0, variable=sp.Symbol("x")):
"""Find the integral of a function within some interval, using Simpson's Rule.
Parameters
----------
f : expression
Polynomial equation that defines graphical curve.
X : list
Domain over which `f` is evaluated.
h : float
Step-size through interval.
a : float
Left-hand bound of interval.
b : float
Right-hand bound of interval.
Returns
-------
XJ : list
Values of domain at which `f` was analyzed.
YJ : list
Evaluations of `f` from domain.
F : float
Total area under curve, `f`.
Raises
------
bad_X : string
If {`X_i`} is neither n x 1 nor 1 x n array.
bad_f : string
If {`f`} is not an expression.
Warns
-----
__func_func : string
Evaluate input expression for Newton difference approximation.
Notes
-----
`X = 0` if not a list nor n x 1 or 1 x n array.
Unless specified and if `X` is defined, `a` and `b` will be the minimum and maximum, respectively, of `X`.
Theorem:
Let f be in C4[a,b], n be even, h = (b-a)/n, and xj = a + jh for j = 0, 1, ..., n. There exists a mu in (a,b) for which the quadrature for n sub-intervals can be written with its error term as:
int_(a)^(b)f(x)dx = h[f(a) + 2*[sum_(j=1)^(n/2 - 1){f(x_(2j))}] + 4*[sum_(j=1)^(n/2){f(x_(2j-1))}] + f(b)]/3 - (b-a)*(h^4)f''''(mu)/180.
Where: (b-a)*(h^4)f''''(mu)/180 -> O(h^4)
"""
X = np.array(X)
sym_X, sym_function = "X", "f"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
bad_f = "Input range, " + sym_function + " must be expression, not list or tuple."
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if not isinstance(f,(FunctionType, sp.Expr)):
if np.sum(f.shape) > np.sum(f.shape[0]): raise ValueError("ERROR! " + bad_X)
else: raise ValueError("ERROR! " + bad_f)
if isinstance(f,(FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(variable)))
f = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
if h == 0: h = X[1]-X[0]
if a == 0: a = min(X)
if b == 0: b = max(X)
h, a, b = float(h), float(a), float(b)
n = math.ceil((b-a)/h)
XJ1, XJ2, XJ, = [], [], []
YJ1, YJ2, YJ, = [], [], []
XJ.append(a); YJ.append(f(a))
j, z1 = 1, 0
while j <= (n/2)-1:
xj = a + 2*j*h
yj = f(xj)
XJ1.append(xj); YJ1.append(yj)
z1 += yj
j += 1
k, z2 = 1, 0
while k <= n/2:
xj = a + (2*k - 1)*h
yj = f(xj)
XJ2.append(xj); YJ2.append(yj)
z2 += yj
k += 1
l = 0
while l < np.array(XJ1).shape[0]:
XJ.append(XJ2[l]); YJ.append(YJ2[l])
XJ.append(XJ1[l]); YJ.append(YJ1[l])
l += 1
XJ.append(XJ2[l]); YJ.append(YJ2[l])
XJ.append(b); YJ.append(f(b))
F = h/3*(f(a) + 2*z1 + 4*z2 + f(b))
return XJ, YJ, F
def closed(f, X, h=0, a=0, b=0, variable=sp.Symbol("x")):
"""Find the integral of a function within some interval, using Simpson's Rule.
Parameters
----------
f : expression
Polynomial equation that defines graphical curve.
X : list
Domain over which `f` is evaluated.
h : float
Step-size through interval.
a : float
Left-hand bound of interval.
b : float
Right-hand bound of interval.
Returns
-------
XJ : list
Values of domain at which `f` was analyzed.
YJ : list
Evaluations of `f` from domain.
F : float
Total area under curve, `f`.
Raises
------
bad_X : string
If {`X_i`} is neither n x 1 nor 1 x n array.
bad_f : string
If {`f`} is not an expression.
Warns
-----
__func_func : string
Evaluate input expression for Newton difference approximation.
Notes
-----
`X = 0` if not a list nor n x 1 or 1 x n array.
Unless specified and if `X` is defined, `a` and `b` will be the minimum and maximum, respectively, of `X`.
Theorem:
Let f be in C4[a,b], n be even, h = (b-a)/n, and xj = a + jh for j = 0, 1, ..., n. There exists a mu in (a,b) for which the quadrature for n sub-intervals can be written with its error term as:
int_(a)^(b)f(x)dx = h[f(a) + 2*[sum_(j=1)^(n/2 - 1){f(x_(2j))}] + 4*[sum_(j=1)^(n/2){f(x_(2j-1))}] + f(b)]/3 - (b-a)*(h^4)f''''(mu)/180.
Where: (b-a)*(h^4)f''''(mu)/180 -> O(h^4)
"""
X = np.array(X)
sym_X, sym_function = "X", "f"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
other_bad_X = "Input domain, " + sym_X + " must be only 4 elements!"
bad_f = "Input range, " + sym_function + " must be expression, not list or tuple."
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if np.sum(X.shape[0]) != 4: raise ValueError("ERROR! " + other_bad_X)
if not isinstance(f,(FunctionType, sp.Expr)):
f = np.array(f)
if np.sum(f.shape) == np.sum(f.shape[0]) and np.sum(f.shape) == 4: Y = np.array(f)
elif np.sum(f.shape) > np.sum(f.shape[0]): raise ValueError("ERROR! " + bad_X)
else: raise ValueError("ERROR! " + bad_f)
if h == 0: h = X[1]-X[0]
if a == 0: a = min(X)
if b == 0: b = max(X)
if isinstance(f,(FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(variable)))
f = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
Y = make_array(X, f)
if a < np.min(X): Y[0] = f(a)
if b > np.max(X): Y[3] = f(b)
h, a, b = float(h), float(a), float(b)
F = 3*h/8*(Y[0] + 3*(Y[1] + Y[2]) + Y[3])
return X, Y, F
class trapezoidal:
def open(f, X, h=0, a=0, b=0, variable=sp.Symbol("x")):
"""Find the integral of a function within some interval, using Trapezoidal Rule.
Parameters
----------
f : expression
Polynomial equation that defines graphical curve.
X : list
Domain over which `f` is evaluated.
h : float
Step-size through interval.
a : float
Left-hand bound of interval.
b : float
Right-hand bound of interval.
Returns
-------
XJ : list
Values of domain at which `f` was analyzed.
YJ : list
Evaluations of `f` from domain.
F : float
Total area under curve, `f`.
Raises
------
bad_X : string
If {`X_i`} is neither n x 1 nor 1 x n array.
bad_f : string
If {`f`} is not an expression.
Warns
-----
__func_func : string
Evaluate input expression for Newton difference approximation.
Notes
-----
`X = 0` if not a list nor n x 1 or 1 x n array.
Unless specified and if `X` is defined, `a` and `b` will be the minimum and maximum, respectively, of `X`.
Theorem:
Let f be in C2[a,b], h = (b-a)/n, and xj = a + jh for j = 0, 1, ..., n. There exists a mu in (a,b) for which the quadrature for n sub-intervals can be written with its error term as:
int_(a)^(b)f(x)dx = h[f(a) + 2*[sum_(j=1)^(n - 1){f(xj)}] + f(b)]/2 - (b-a)*(h^2)f''(mu)/12.
Where: (b-a)*(h^2)f''(mu)/12 -> O(h^2)
"""
X = np.array(X)
sym_X, sym_function = "X", "f"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
bad_f = "Input range, " + sym_function + " must be expression, not list or tuple."
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if not isinstance(f,(FunctionType, sp.Expr)):
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
else: raise ValueError("ERROR! " + bad_f)
if isinstance(f,(FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(variable)))
f = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
if h == 0: h = X[1]-X[0]
if a == 0: a = min(X)
if b == 0: b = max(X)
h, a, b = float(h), float(a), float(b)
XJ, YJ = [], []
XJ.append(a); YJ.append(f(a))
j, n, z = 1, math.ceil((b-a)/h), 0
while j <= n-1:
x_j = a + j*h
XJ.append(x_j)
y_j = f(x_j)
YJ.append(y_j)
z += y_j
j += 1
XJ.append(b); YJ.append(f(b))
F = h/2*(f(a) + 2*z + f(b))
return XJ, YJ, F
def closed(f, X, h=0, a=0, b=0, variable=sp.Symbol("x")):
"""Find the integral of a function within some interval, using Trapezoidal Rule.
Parameters
----------
f : expression
Polynomial equation that defines graphical curve.
X : list
Domain over which `f` is evaluated.
h : float
Step-size through interval.
a : float
Left-hand bound of interval.
b : float
Right-hand bound of interval.
Returns
-------
XJ : list
Values of domain at which `f` was analyzed.
YJ : list
Evaluations of `f` from domain.
F : float
Total area under curve, `f`.
Raises
------
bad_X : string
If {`X_i`} is neither n x 1 nor 1 x n array.
bad_f : string
If {`f`} is not an expression.
Warns
-----
__func_func : string
Evaluate input expression for Newton difference approximation.
Notes
-----
`X = 0` if not a list nor n x 1 or 1 x n array.
Unless specified and if `X` is defined, `a` and `b` will be the minimum and maximum, respectively, of `X`.
Theorem:
Let f be in C2[a,b], h = (b-a)/n, and xj = a + jh for j = 0, 1, ..., n. There exists a mu in (a,b) for which the quadrature for n sub-intervals can be written with its error term as:
int_(a)^(b)f(x)dx = h[f(a) + 2*[sum_(j=1)^(n - 1){f(xj)}] + f(b)]/2 - (b-a)*(h^2)f''(mu)/12.
Where: (b-a)*(h^2)f''(mu)/12 -> O(h^2)
"""
X = np.array(X)
sym_X, sym_function = "X", "f"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
other_bad_X = "Input domain, " + sym_X + " must be only 2 elements!"
bad_f = "Input range, " + sym_function + " must be expression, not list or tuple."
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if np.sum(X.shape[0]) != 2: raise ValueError("ERROR! " + other_bad_X)
if not isinstance(f,(FunctionType, sp.Expr)):
f = np.array(f)
if np.sum(f.shape) == np.sum(f.shape[0]) and np.sum(f.shape) == 2: Y = np.array(f)
elif np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
else: raise ValueError("ERROR! " + bad_f)
if h == 0: h = X[1]-X[0]
if a == 0: a = min(X)
if b == 0: b = max(X)
if isinstance(f,(FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(f(variable)))
f = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
Y = make_array(X, f)
if a < np.min(X): Y[0] = f(a)
if b > np.max(X): Y[1] = f(b)
h, a, b = float(h), float(a), float(b)
F = h/2*(Y[0] + Y[1])
return X, Y, F
def endpoint(X, Y, h, point_type, which_end):
"""Find the derivative at an endpoint of data set.
Parameters
----------
X : list
Domain of collected data.
Y : array or expression
Range of collected data.
h : float
Step-size through interval.
point_type : string
Determines if 3 or 5 pt. method is used.
which_end : string
Dictates whether evaluated point is left or right most data point.
Returns
-------
dY : float
Evaluated derivative at point.
Raises
------
bad_X : string
If {`X`} is neither n x 1 nor 1 x n array.
bad_Y : string
If {`Y`} is not an expression.
bad_data : string
If `X` and `Y` are of unequal length.
See Also
--------
make_array() : Prints string that expression was used to make array.
Notes
-----
5 point is more accurate than 3 point; however, round-off error increases.
"""
sym_X, sym_Y = "X", "Y"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
bad_Y = "Input range, " + sym_Y + " was neither an n x 1 nor a 1 x n array."
bad_data = "Arrays " + sym_X + " and " + sym_Y + " must be of equal length."
if not isinstance(Y,(FunctionType, sp.Expr)):
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if np.sum(Y.shape) > np.sum(Y.shape[0]): raise ValueError("ERROR! " + bad_Y)
if len(X) != len(Y): raise ValueError("ERROR! " + bad_data)
if isinstance(Y,(FunctionType, sp.Expr)): Y = make_array(X, Y)
h, dY = float(h), 0
if which_end == "left":
i = 0
if point_type == "three":
dY = (-3*Y[i] + 4*Y[i+1] - Y[i+2])/(2*h)
if point_type == "five":
dY = (-25*Y[i] + 48*Y[i+1] \
- 36*Y[i+2] + 16*Y[i+3] \
- 3*Y[i+4])/(12*h)
if which_end == "right":
i = -1
if point_type == "three":
dY = (-3*Y[i] + 4*Y[i-1] - Y[i-2])/(2*h)
if point_type == "five":
dY = (-25*Y[i] + 48*Y[i-1] \
- 36*Y[i-2] + 16*Y[i-3] \
- 3*Y[i-4])/(12*h)
return dY
def gaussian_legendre(function, a, b):
return sc.integrate.quad(function, a, b)
def integrate(function, a, b):
return sc.integrate.quad(function, a, b)
def midpoint(X, Y, h, point_type, i):
"""Find derivative information at some point within data set.
Parameters
----------
X : list
Domain of collected data.
Y : array or expression
Range of collected data.
h : float
Step-size through interval.
point_type : string
Determines if 3 or 5 pt. method is used.
i : int
Index at which point is to be evaluated.
Returns
-------
dY : float
Evaluated derivative at point.
Raises
------
bad_X : string
If {`X`} is neither n x 1 nor 1 x n array.
bad_Y : string
If {`Y`} is not an expression.
bad_data : string
If `X` and `Y` are of unequal length.
bad_i : string
`i` must be an integer and non-zero for indexing.
bad_type : string
If `point_type` was not an acceptable option.
See Also
--------
make_array() : Prints string that expression was used to make array.
Notes
-----
5 point is more accurate than 3 point; however, round-off error increases.
"""
sym_X, sym_Y = "X", "Y"
bad_X = "Input domain, " + sym_X + " was neither an n x 1 nor a 1 x n array."
bad_Y = "Input range, " + sym_Y + " was neither an n x 1 nor a 1 x n array."
bad_data = "Arrays " + sym_X + " and " + sym_Y + " must be of equal length."
bad_i = "Index must be an integer."
bad_type = "I am sorry. The selected type was not understood. Please select: 'three', 'five', or '2nd_derivative'."
if not isinstance(Y,(FunctionType, sp.Expr)):
if np.sum(X.shape) > np.sum(X.shape[0]): raise ValueError("ERROR! " + bad_X)
if np.sum(Y.shape) > np.sum(Y.shape[0]): raise ValueError("ERROR! " + bad_Y)
if len(X) != len(Y): raise ValueError("ERROR! " + bad_data)
if isinstance(Y,(FunctionType, sp.Expr)): Y = make_array(X, Y)
if not isinstance(i,int): raise ValueError("ERROR! " + bad_i)
h, dY = float(h), 0
if point_type == "three":
dY = (Y[i+1] - Y[i-1])/(2*h)
if point_type == "five":
dY = (Y[i-2] - 8*Y[i-1] \
+ 8*Y[i+1] - Y[i+2])/(12*h)
if point_type == "2nd_derivative":
dY = (Y[i-1] - 2*Y[i] + Y[i+1])/(h**2)
else: raise ValueError("ERROR! " + bad_type)
return dY
def richard_extrapolation(function, x0, h, order, direction=0, variable=sp.Symbol("x")):
"""Results in higher-accuracy of derivative at point in function with lower-order formulas to minimize round-off error and increase O(h) of truncation error.
Parameters
----------
function : expression
Polynomial over which derivative must be calculated.
x0 : float
Point about which extrapolation centers
h : float
Step-size through interval.
order : int
Order for rate of convergence.
direction : string
`'forward'` or `'backward'` construction.
Returns
-------
p : expression
Lambdified constructed polynomial.
p(x0) : float
Evaluation of `p` at `x`.
Raises
------
bad_function : string
If `function` is not an expression.
bad_order : string
`order` must be an integer and non-zero.
bad_direction : string
If `direction` is neither `'forward'` nor `'backward'`.
Warns
-----
__func_func : string
Evaluate input expression for Newton difference approximation.
See Also
--------
newton_difference() : Newton Difference method to build extrapolation for function's derivative and order of error.
"""
sym_function = "function"
bad_function = "Function, " + sym_function + " must be expression."
bad_order = "Expected integer."
bad_direction = "Supplied direction was not understood. Please specify 'forward' or 'backward'."
made_poly = "I have found your requested polynomial! P = "
if not isinstance(function,(FunctionType, sp.Expr)):
raise TypeError("ERROR! " + bad_function)
if isinstance(function,(FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(function(variable)))
function = sp.lambdify(variable, sym_function)
print(f"Information: Input expression, {sym_function} used.")
if not isinstance(order,int): raise TypeError("ERROR! " + bad_order)
if direction != 0 and direction != "forward" and direction != "backward": raise ValueError("ERROR! " + bad_direction)
def f(h):
x = x0 + h
return x, function(x)
x0, h = float(x0), float(h)
i, X, FX = 0, [], []
while i < order:
dx = h / (2**order) * (2**i)
x_i, fx_i = f(dx)
X.append(x_i); FX.append(fx_i)
i += 1
m = len(X)
n = m + 1
return newton_difference(X, FX, x0, direction)
# --------------------
# --------------------
# differential equations
class __ode(object):
"""Assign common attributes to objects.
"""
def __init__(self, function, a, b, alpha, variables=(sp.Symbol("t"), sp.Symbol("y")), steps=100):
"""
Parameters
----------
function : expression
Time derivative of function to approximate.
a : float
Initial time.
b : float
Final time.
alpha : float
Initial value at a.
variables : tuple, optional
Collection of symbolic or string variables to respect in function.
steps : int or float, optional
Maximum number of time steps to discretize domain.
Yields
------
self.function : expression
Time derivative of function to approximate.
self.a : float
Initial time.
self.b : float
Final time.
self.alpha : float
Initial value at a.
self.variables : tuple, optional
Collection of symbolic or string variables to respect in function.
self.steps : int or float, optional
Maximum number of time steps to discretize domain.
Raises
------
ValueError
If time steps constraint is not an integer.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Notes
-----
Make sure the independent variable is the first element of `variables`!
"""
if steps <= 0 or not isinstance(steps, (int, float)): raise ValueError(f"ERROR! Number of time steps, N must be an integer greater than zero. {steps} was given and not understood.")
if np.sum(np.array(function).shape) > 0:
F = []
for f in function:
if isinstance(f, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(f(variables[0])))
f = sp.lambdify(variables[0], sym_function)
print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(f(*variables)))
f = sp.lambdify(variables, sym_function)
print(f"Information: Input expression, {sym_function} used.")
elif isinstance(f, (str)):
g = lambda x: eval(f)
f = sp.lambdify(*variables, g(*variables))
print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
F.append(f)
function = F
else:
if isinstance(function, (FunctionType, sp.Expr)):
sym_function = sp.N(sp.sympify(function(*variables)))
function = sp.lambdify(variables, sym_function)
print(f"Information: Input expression, {sym_function} used.")
elif isinstance(function, (str)):
g = lambda x: eval(function)
function = sp.lambdify(*variables, g(*variables))
print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
self.function = function
self.a, self.b = a, b
self.alpha = alpha
self.variables = np.array(variables)
self.steps = int(steps + 1)
class ivp(__ode):
"""Class containing Initial Value Problem methods.
"""
def __init__(self, function, a, b, alpha, variables=(sp.Symbol("t"), sp.Symbol("y")), steps=100):
"""
Parameters
----------
function : expression
Time derivative of function to approximate.
a : float
Initial time.
b : float
Final time.
alpha : float
Initial value at a.
variables : tuple, optional
Collection of symbolic or string variables to respect in function.
steps : int or float, optional
Maximum number of time steps to discretize domain.
Attributes
----------
forward_euler()
improved_euler()
backward_euler()
crank_nicholson()
runge_kutta()
Yields
------
self.function : expression
Time derivative of function to approximate.
self.a : float
Initial time.
self.b : float
Final time.
self.alpha : float
Initial value at a.
self.variables : tuple, optional
Collection of symbolic or string variables to respect in function.
self.steps : int or float, optional
Maximum number of time steps to discretize domain.
Raises
------
ValueError
If time steps constraint is not an integer.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Notes
-----
Make sure the independent variable is the first element of `variables`!
"""
super().__init__(function, a, b, alpha, variables=variables, steps=steps)
def forward_euler(self):
"""March forward through time to approximate Initial Value Problem differential equation between endpoints a and b.
Returns
-------
pandas.Dataframe() : dataframe
Dataframe of method iterations and time domains, range of approximations for input function, and iterative increments.
Yields
------
self.step_size : float
Domain step size.
self.iterations : tuple
Collection of steps through method.
self.domain : tuple
Discretized domain between endpoints a and b for so many steps.
self.range : tuple
Range mapped from method through discretized domain between endpoints a and b for so many steps.
self.increments : tuple
Collection of increments between steps.
Raises
------
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
"""
if np.sum(np.array(self.function).shape) > 0:
F = []
for f in self.function:
if isinstance(f, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(f(self.variables[0])))
f = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(f(*self.variables)))
f = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(f, (str)):
g = lambda x: eval(f)
f = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
F.append(f)
function = F
else:
if isinstance(self.function, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(self.function(self.variables[0])))
function = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(self.function(*self.variables)))
function = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(self.function, (str)):
g = lambda x: eval(self.function)
function = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
a, b, alpha = self.a, self.b, self.alpha
variables, N = self.variables, self.steps
h, t, w0 = float((b - a)/N), a, alpha
self.step_size = h
Y, increments = [w0], [0]
for i in range(1, N):
w = w0 + h*function(t, w0)
Y.append(w)
increments.append(w - w0)
t, w0 = a + i*h, w
self.iterations = np.array(range(N))
self.domain = np.array(np.arange(a, t+h, h))
self.range = np.array(Y)
self.increments = np.array(increments)
return pd.DataFrame(data={"Iterations": self.iterations, "Domain": self.domain, "Range": self.range, "Increments": self.increments})
def improved_euler(self):
"""Approximate solution of Initial Value Problem differential equation given initial time, initial value, and final time.
Returns
-------
pandas.Dataframe() : dataframe
Dataframe of method iterations and time domains, range of approximations for input function, and iterative increments.
Yields
------
self.step_size : float
Domain step size.
self.iterations : tuple
Collection of steps through method.
self.domain : tuple
Discretized domain between endpoints a and b for so many steps.
self.range : tuple
Range mapped from method through discretized domain between endpoints a and b for so many steps.
self.increments : tuple
Collection of increments between steps.
Raises
------
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
See Also
--------
runge_kutta()
Notes
-----
Is 2nd-Order Runge-Kutta method where endpoint a = b = 0.5 and lambda = 1.
"""
if np.sum(np.array(self.function).shape) > 0:
F = []
for f in self.function:
if isinstance(f, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(f(self.variables[0])))
f = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(f(*self.variables)))
f = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(f, (str)):
g = lambda x: eval(f)
f = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
F.append(f)
function = F
else:
if isinstance(self.function, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(self.function(self.variables[0])))
function = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(self.function(*self.variables)))
function = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(self.function, (str)):
g = lambda x: eval(self.function)
function = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
a, b, alpha = self.a, self.b, self.alpha
variables, N = self.variables, self.steps
h, t, w0 = float((b - a)/N), a, alpha
self.step_size = h
ea, eb, lam = 1/2, 1/2, 1
Y, increments = [w0], [0]
for i in range(1, N):
w = w0 + h*(ea*function(t, w0) + eb*function(t + lam*h, w0 + lam*h*function(t, w0)))
Y.append(w)
increments.append(np.abs(w - w0))
t, w0 = a + i*h, w
self.iterations = np.array(range(N))
self.domain = np.array(np.arange(a, t+h, h))
self.range = np.array(Y)
self.increments = np.array(increments)
return pd.DataFrame(data={"Iterations": self.iterations, "Domain": self.domain, "Range": self.range, "Increments": self.increments})
def backward_euler(self):
"""Use information at next time step to approximate Initial Value Problem differential equation between endpoints a and b.
Returns
-------
pandas.Dataframe() : dataframe
Dataframe of method iterations and time domains, range of approximations for input function, and iterative increments.
Yields
------
self.step_size : float
Domain step size.
self.iterations : tuple
Collection of steps through method.
self.domain : tuple
Discretized domain between endpoints a and b for so many steps.
self.range : tuple
Range mapped from method through discretized domain between endpoints a and b for so many steps.
self.increments : tuple
Collection of increments between steps.
Raises
------
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
See Also
--------
SingleVariableIteration.newton_raphson()
"""
if np.sum(np.array(self.function).shape) > 0:
F = []
for f in self.function:
if isinstance(f, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(f(self.variables[0])))
f = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(f(*self.variables)))
f = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(f, (str)):
g = lambda x: eval(f)
f = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
F.append(f)
function = F
else:
if isinstance(self.function, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(self.function(self.variables[0])))
function = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(self.function(*self.variables)))
function = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(self.function, (str)):
g = lambda x: eval(self.function)
function = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
a, b, alpha = self.a, self.b, self.alpha
variables, N = self.variables, self.steps
h, t, w0 = float((b - a)/N), a, alpha
self.step_size = h
Y, increments = [w0], [0]
for i in range(1, N):
t = a + i*h
# w = w0 + h*function(t + h, w0 + h*function(t, w0))
w = lambda x: x - (w0 + h*function(t + h, x))
sys.stdout = open(os.devnull, "w")
foo = SingleVariableIteration(w, t, t+h, iter_guess=100)
w = foo.newton_raphson(w0)["Approximations"].values[-1]
sys.stdout = sys.__stdout__
Y.append(w)
increments.append(np.abs(w - w0))
# t, w0 = a + i*h, w
w0 = w
self.iterations = np.array(range(N))
self.domain = np.array(np.arange(a, t+h, h))
self.range = np.array(Y)
self.increments = np.array(increments)
return pd.DataFrame(data={"Iterations": self.iterations, "Domain": self.domain, "Range": self.range, "Increments": self.increments})
def trapezoidal(self, power=-6, M=100):
"""Use information at next time step to approximate Initial Value Problem differential equation between endpoints a and b.
Parameters
----------
power : int or float, optional
Signed power to which function error must be within.
M : int or float, optional
Maximum iterations for Newton-Raphson loop.
Returns
-------
pandas.Dataframe() : dataframe
Dataframe of method iterations and time domains, range of approximations for input function, and iterative increments.
Yields
------
self.step_size : float
Domain step size.
self.iterations : tuple
Collection of steps through method.
self.domain : tuple
Discretized domain between endpoints a and b for so many steps.
self.range : tuple
Range mapped from method through discretized domain between endpoints a and b for so many steps.
self.increments : tuple
Collection of increments between steps.
Raises
------
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
"""
if np.sum(np.array(self.function).shape) > 0:
F = []
for f in self.function:
if isinstance(f, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(f(self.variables[0])))
f = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(f(*self.variables)))
f = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(f, (str)):
g = lambda x: eval(f)
f = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
F.append(f)
function = F
else:
if isinstance(self.function, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(self.function(self.variables[0])))
function = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(self.function(*self.variables)))
function = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(self.function, (str)):
g = lambda x: eval(self.function)
function = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
a, b, alpha = self.a, self.b, self.alpha
variables, N = self.variables, self.steps
h, t, w0, tol = float((b - a)/N), a, alpha, 10**power
self.step_size = h
fpy = sp.lambdify(variables, sp.diff(function(*variables), variables[0]))
Y, increments = [w0], [0]
for i in range(1, N):
k1 = w0 + h*function(t, w0)/2
j, wj0, FLAG = 1, k1, False
while FLAG == False:
wj1 = wj0 - (wj0 - h/2*function(t + h, wj0) - k1)/(\
1 - h/2*fpy(t + h, wj0))
if np.abs(wj1 - wj0) <= tol:
w = wj1
FLAG = True
else:
wj0 = wj1
j += 1
if j > M: FLAG = True
# f = lambda x: x - h/2*function(t + h, x) - k1
# foo = SingleVariableIteration(f, a, b, power, variable=variables, iter_guess=M)
# w = foo.newton_raphson(k1)["Approximations"][-1]
Y.append(w)
increments.append(np.abs(w - w0))
t, w0 = a + i*h, w
self.iterations = np.array(range(N))
self.domain = np.array(np.arange(a, t+h, h))
self.range = np.array(Y)
self.increments = np.array(increments)
return pd.DataFrame(data={"Iterations": self.iterations, "Domain": self.domain, "Range": self.range, "Increments": self.increments})
def runge_kutta(self):
"""Approximate solution of initial value problem.
Returns
-------
pandas.Dataframe() : dataframe
Dataframe of method iterations and time domains, range of approximations for input function, and iterative increments.
Yields
------
self.step_size : float
Domain step size.
self.iterations : tuple
Collection of steps through method.
self.domain : tuple
Discretized domain between endpoints a and b for so many steps.
self.range : tuple
Range mapped from method through discretized domain between endpoints a and b for so many steps.
self.increments : tuple
Collection of increments between steps.
Raises
------
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
"""
if np.sum(np.array(self.function).shape) > 0:
F = []
for f in self.function:
if isinstance(f, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(f(self.variables[0])))
f = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(f(*self.variables)))
f = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(f, (str)):
g = lambda x: eval(f)
f = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
F.append(f)
function = F
else:
if isinstance(self.function, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(self.function(self.variables[0])))
function = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(self.function(*self.variables)))
function = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(self.function, (str)):
g = lambda x: eval(self.function)
function = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
a, b, alpha = self.a, self.b, self.alpha
variables, N = self.variables, self.steps
h, t, w0 = float((b - a)/N), a, alpha
self.step_size = h
Y, increments = [w0], [0]
for i in range(1, N):
k1 = h*function(t, w0)
k2 = h*function(t + h/2, w0 + k1/2)
k3 = h*function(t + h/2, w0 + k2/2)
k4 = h*function(t + h, w0 + k3)
w = w0 + (k1 + 2*k2 + 2*k3 + k4) / 6
Y.append(w)
increments.append(w - w0)
t, w0 = a + i*h, w
self.iterations = np.array(range(N))
self.domain = np.array(np.arange(a, t+h, h))
self.range = np.array(Y)
self.increments = np.array(increments)
return pd.DataFrame(data={"Iterations": self.iterations, "Domain": self.domain, "Range": self.range, "Increments": self.increments})
class bvp(__ode):
"""Class containing Boundary Value Problem methods.
"""
def __init__(self, function, a, b, alpha, beta, variables=(sp.Symbol("x"), sp.Symbol("y"), sp.Symbol("yp")), steps=100):
"""
Parameters
----------
function : expression
Time derivative of function to approximate.
a : float
Initial time.
b : float
Final time.
alpha : float
Initial value at a.
beta : float
Initial value at b.
variables : tuple, optional
Collection of symbolic or string variables to respect in function.
steps : int or float, optional
Maximum number of time steps to discretize domain.
Attributes
----------
linear_shooting_method()
finite_difference_method()
Yields
------
self.function : expression
Time derivative of function to approximate.
self.a : float
Initial time.
self.b : float
Final time.
self.alpha : float
Initial value at a.
self.beta : float
Initial value at b.
self.variables : tuple, optional
Collection of symbolic or string variables to respect in function.
self.steps : int or float, optional
Maximum number of time steps to discretize domain.
Raises
------
ValueError
If time steps constraint is not an integer.
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
Notes
-----
Make sure the independent variable is the first element of `variables`!
"""
super().__init__(function, a, b, alpha, variables=variables, steps=steps)
self.beta = beta
def linear_shooting_method(self):
"""Solve a Boundary Value Problem differential equation with 2 Initial Value Problem differential equations.
Returns
-------
pandas.Dataframe() : dataframe
Dataframe of method iterations and time domains, range of approximations for input function, and iterative increments.
Yields
------
self.step_size : float
Domain step size.
self.iterations : tuple
Collection of steps through method.
self.domain : tuple
Discretized domain between endpoints a and b for so many steps.
self.range : tuple
Range mapped from method through discretized domain between endpoints a and b for so many steps.
self.derivatives : tuple
Collection of derivatives at each step.
Raises
------
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
"""
# Parameters
# ----------
# f : expression
# Equation to which derivative will be made.
# a : int or float
# Initial time.
# b : int or float
# Final time.
# alpha : float
# Initial value of solution y(t = a).
# beta : float
# Initial value of solution y(t = b).
# h : float
# Domain step-size.
# Returns
# -------
# pandas.Dataframe() : dataframe
# Dataframe of method iterations and time domains & range of approximations for input function and its time derivative.
if np.sum(np.array(self.function).shape) > 0:
F = []
for f in self.function:
if isinstance(f, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(f(self.variables[0])))
f = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(f(*self.variables)))
f = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(f, (str)):
g = lambda x: eval(f)
f = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
F.append(f)
function = F
else:
if isinstance(self.function, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(self.function(self.variables[0])))
function = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(self.function(*self.variables)))
function = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(self.function, (str)):
g = lambda x: eval(self.function)
function = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
a, b, alpha, beta = self.a, self.b, self.alpha, self.beta
variables, N = self.variables, self.steps
h = float((b - a)/N)
self.step_size = h
u1, u2, v1, v2 = [alpha], [0], [0], [1]
p, q, r, ypp = function
for i in range(N):
x = a + i*h
k11 = h*u2[i]
k12 = h*(p(x)*u2[i] + q(x)*u1[i] + r(x))
k21 = h*(u2[i] + k12/2)
k22 = h*(p(x + h/2)*(u2[i] + k12/2) + q(x + h/2)*(u1[i] + k11/2) + r(x + h/2))
k31 = h*(u2[i] + k22/2)
k32 = h*(p(x + h/2)*(u2[i] + k22/2) + q(x + h/2)*(u1[i] + k21/2) + r(x + h/2))
k41 = h*(u2[i] + k32)
k42 = h*(p(x + h)*(u2[i] + k32) + q(x + h)*(u1[i] + k31) + r(x + h))
u1.append(u1[i] + (k11 + 2*k21 + 2*k31 + k41)/6)
u2.append(u2[i] + (k12 + 2*k22 + 2*k32 + k42)/6)
###############################
k11 = h*v2[i]
k12 = h*(p(x)*v2[i] + q(x)*v1[i])
k21 = h*(v2[i] + k12/2)
k22 = h*(p(x + h/2)*(v2[i] + k12/2) + q(x + h/2)*(v1[i] + k11/2))
k31 = h*(v2[i] + k22/2)
k32 = h*(p(x + h/2)*(v2[i] + k22/2) + q(x + h/2)*(v1[i] + k21/2))
k41 = h*(v2[i] + k32)
k42 = h*(p(x + h)*(v2[i] + k32) + q(x + h)*(v1[i] + k31))
v1.append(v1[i] + (k11 + 2*k21 + 2*k31 + k41)/6)
v2.append(v2[i] + (k12 + 2*k22 + 2*k32 + k42)/6)
w1, w2 = [alpha], [(beta - u1[-1])/v1[-1]]
for i in range(1, N+1):
w1.append(u1[i] + w2[0]*v1[i])
w2.append(u2[i] + w2[0]*v2[i])
x = a + i*h
# return pd.DataFrame(data={"Iterations": range(N+1), "Domain": np.linspace(a, b, N+1), "Range": w1, "W2": w2})
self.iterations = np.array(range(N+1))
self.domain = np.array(np.linspace(a, b, N+1))
self.range = np.array(w1)
self.derivatives = np.array(w2)
return pd.DataFrame(data={"Iterations": self.iterations, "Domain": self.domain, "Range": self.range, "Derivatives": self.derivatives})
def finite_difference_method(self, solver_method="gauss_seidel"):
"""Solve a Boundary Value Problem differential equation with 2 Initial Value Problem differential equations.
Parameters
----------
solver_method : str, optional
Unless specified, system of equations will be solved by the 'gauss_seidel' method.
Returns
-------
pandas.Dataframe() : dataframe
Dataframe of method iterations and time domains, range of approximations for input function, and iterative increments.
Yields
------
self.step_size : float
Domain step size.
self.iterations : tuple
Collection of steps through method.
self.domain : tuple
Discretized domain between endpoints a and b for so many steps.
self.range : tuple
Range mapped from method through discretized domain between endpoints a and b for so many steps.
self.derivatives : tuple
Collection of derivatives at each step.
Raises
------
TypeError
If input expression cannot be understood as lambda or sympy expression nor as string.
ValueError
Prescribed method is not an available option.
See Also
--------
MultiVariableIteration.gauss_seidel()
MultiVariableIteration.successive_relaxation()
MultiVariableIteration.jacobi()
"""
# Parameters
# ----------
# f : expression
# Equation to which derivative will be made.
# a : int or float
# Initial time.
# b : int or float
# Final time.
# alpha : float
# Initial value of solution y(t = a).
# beta : float
# Initial value of solution y(t = b).
# h : float
# Domain step-size.
# Returns
# -------
# pandas.Dataframe() : dataframe
# Dataframe of method iterations and time domains & range of approximations for input function and its time derivative.
if np.sum(np.array(self.function).shape) > 0:
F = []
for f in self.function:
if isinstance(f, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(f(self.variables[0])))
f = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(f(*self.variables)))
f = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(f, (str)):
g = lambda x: eval(f)
f = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
F.append(f)
function = F
else:
if isinstance(self.function, (FunctionType, sp.Expr)):
try:
sym_function = sp.N(sp.sympify(self.function(self.variables[0])))
function = sp.lambdify(self.variables[0], sym_function)
# print(f"Information: Input expression, {sym_function} used.")
except:
sym_function = sp.N(sp.sympify(self.function(*self.variables)))
function = sp.lambdify(self.variables, sym_function)
# print(f"Information: Input expression, {sym_function} used.")
elif isinstance(self.function, (str)):
g = lambda x: eval(self.function)
function = sp.lambdify(*self.variables, g(*self.variables))
# print("String expression converted to lambda function.")
else: raise TypeError("Unknown input.")
a, b, alpha, beta = self.a, self.b, self.alpha, self.beta
variables, N = self.variables, self.steps
h = float((b - a)/N)
self.step_size = h
ai, bi, ci, di = [], [], [], []
p, q, r, ypp = function
x = a + h
ai.append(2 + (h**2)*q(x))
bi.append(-1 + (h/2)*p(x))
di.append(-(h**2)*r(x) + (1 + (h/2)*p(x))*alpha)
for i in range(2, N):
x = a + i*h
ai.append(2 + (h**2)*q(x))
bi.append(-1 + (h/2)*p(x))
ci.append(-1 - (h/2)*p(x))
di.append(-(h**2)*r(x))
x = b - h
ai.append(2 + (h**2)*q(x))
ci.append(-1 - (h/2)*p(x))
di.append(-(h**2)*r(x) + (1 - (h/2)*p(x))*beta)
A = np.zeros((N, N))
np.fill_diagonal(A, ai)
A = A + np.diagflat(bi, 1)
A = A + np.diagflat(ci, -1)
x0 = np.zeros(N)
c = np.array(di)
foo = MultiVariableIteration(A, x0, c, max_iter=1000)
if solver_method == "gauss_seidel":
foo.gauss_seidel()
elif solver_method == "successive_relaxation":
foo.successive_relaxation()
elif solver_method == "jacobi":
foo.jacobi()
else: raise ValueError("ERROR! The desired method must be: 'gauss_seidel', 'successive_relaxation', or 'jacobi'.")
approximations = foo.approximations[-1]
approximations = np.insert(approximations, 0, alpha)
approximations = np.append(approximations, beta)
# return pd.DataFrame(data={"Iterations": range(len(np.linspace(a, b, N+2))), "Domain": np.linspace(a, b, N+2), "Range": approximations}), foo.iterations, foo.errors
self.iterations = np.array(range(N+2))
self.domain = np.array(np.linspace(a, b, N+2))
self.range = np.array(approximations)
return pd.DataFrame(data={"Iterations": self.iterations, "Domain": self.domain, "Range": self.range}), foo.iterations, foo.errors
# --------------------
# # # # # # # # #
#################################
## Test
# test compile of module.
class test: # test class
def test(): # test function
"""Was the module loaded correctly?
Raises
------
success : string
Prints a message of successful function call.
"""
success = "Test complete."
sys.exit(success)
# # # # # # # # #
#################################
## End of Code
# test.test() # "Test complete."
# # # # # # # # #
| 30.365736
| 390
| 0.633831
|
857c24f0249e3afd05bda2523a9b87bd9e7b77c7
| 1,476
|
py
|
Python
|
wblib/data_models.py
|
AartGoossens/wattbikelib
|
3b0500ff47abb331e9d425cd1a9ef205236156f4
|
[
"MIT"
] | null | null | null |
wblib/data_models.py
|
AartGoossens/wattbikelib
|
3b0500ff47abb331e9d425cd1a9ef205236156f4
|
[
"MIT"
] | 3
|
2017-11-25T19:07:25.000Z
|
2020-11-18T21:14:47.000Z
|
wblib/data_models.py
|
AartGoossens/wblib
|
3b0500ff47abb331e9d425cd1a9ef205236156f4
|
[
"MIT"
] | null | null | null |
from .exceptions import RideSessionException
from .tools import build_hub_files_url
class RideSessionResponseModel:
def __init__(self, data):
self._validate(data)
self.sessions = [RideSessionModel(s) for s in data['results']]
def _validate(self, response):
sessions = response['results']
if not len(sessions):
raise RideSessionException('No results returned')
class RideSessionModel(dict):
def get_user_id(self):
return self['user']['objectId']
def get_session_id(self):
return self['objectId']
def _build_url(self, extension):
return build_hub_files_url(
user_id=self.get_user_id(),
session_id=self.get_session_id(),
extension=extension)
def get_tcx_url(self):
return self._build_url('tcx')
def get_wbs_url(self):
return self._build_url('wbs')
def get_wbsr_url(self):
return self._build_url('wbsr')
class LoginResponseModel(dict):
def get_user_id(self):
return self['objectId']
def get_session_token(self):
return self['sessionToken']
class PerformanceStateModel:
def __init__(self, data):
self.data = data['results'][0]['performanceState']
def get_max_minute_power(self):
return self.data.get('mmp', None)
def get_max_hr(self):
return self.data.get('mhr', None)
def get_ftp(self):
return self.data.get('ftp', None)
| 25.016949
| 70
| 0.649051
|
a9dba2fc1a42526f8359f553575dbabd2c79f11c
| 6,846
|
py
|
Python
|
bindings/python/ensmallen_graph/datasets/string/desulfotomaculumhydrothermale.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/desulfotomaculumhydrothermale.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
bindings/python/ensmallen_graph/datasets/string/desulfotomaculumhydrothermale.py
|
caufieldjh/ensmallen_graph
|
14e98b1cdbc73193a84a913d7d4f2b2b3eb2c43a
|
[
"MIT"
] | null | null | null |
"""
This file offers the methods to automatically retrieve the graph Desulfotomaculum hydrothermale.
The graph is automatically retrieved from the STRING repository.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:36:22.638624
The undirected graph Desulfotomaculum hydrothermale has 2619 nodes and
181433 weighted edges, of which none are self-loops. The graph is dense
as it has a density of 0.05292 and has 8 connected components, where the
component with most nodes has 2602 nodes and the component with the least
nodes has 2 nodes. The graph median node degree is 109, the mean node degree
is 138.55, and the node degree mode is 5. The top 5 most central nodes
are 1121428.DESHY_60166___1 (degree 996), 1121428.DESHY_110225___1 (degree
969), 1121428.DESHY_80042___1 (degree 868), 1121428.DESHY_60191___1 (degree
824) and 1121428.DESHY_160145___1 (degree 765).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import DesulfotomaculumHydrothermale
# Then load the graph
graph = DesulfotomaculumHydrothermale()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen_graph import EnsmallenGraph # pylint: disable=import-error
def DesulfotomaculumHydrothermale(
directed: bool = False,
verbose: int = 2,
cache_path: str = "graphs/string",
**additional_graph_kwargs: Dict
) -> EnsmallenGraph:
"""Return new instance of the Desulfotomaculum hydrothermale graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False,
Wether to load the graph as directed or undirected.
By default false.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache_path: str = "graphs",
Where to store the downloaded graphs.
additional_graph_kwargs: Dict,
Additional graph kwargs.
Returns
-----------------------
Instace of Desulfotomaculum hydrothermale graph.
Report
---------------------
At the time of rendering these methods (please see datetime below), the graph
had the following characteristics:
Datetime: 2021-02-02 22:36:22.638624
The undirected graph Desulfotomaculum hydrothermale has 2619 nodes and
181433 weighted edges, of which none are self-loops. The graph is dense
as it has a density of 0.05292 and has 8 connected components, where the
component with most nodes has 2602 nodes and the component with the least
nodes has 2 nodes. The graph median node degree is 109, the mean node degree
is 138.55, and the node degree mode is 5. The top 5 most central nodes
are 1121428.DESHY_60166___1 (degree 996), 1121428.DESHY_110225___1 (degree
969), 1121428.DESHY_80042___1 (degree 868), 1121428.DESHY_60191___1 (degree
824) and 1121428.DESHY_160145___1 (degree 765).
References
---------------------
Please cite the following if you use the data:
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
Usage example
----------------------
The usage of this graph is relatively straightforward:
.. code:: python
# First import the function to retrieve the graph from the datasets
from ensmallen_graph.datasets.string import DesulfotomaculumHydrothermale
# Then load the graph
graph = DesulfotomaculumHydrothermale()
# Finally, you can do anything with it, for instance, compute its report:
print(graph)
# If you need to run a link prediction task with validation,
# you can split the graph using a connected holdout as follows:
train_graph, validation_graph = graph.connected_holdout(
# You can use an 80/20 split the holdout, for example.
train_size=0.8,
# The random state is used to reproduce the holdout.
random_state=42,
# Wether to show a loading bar.
verbose=True
)
# Remember that, if you need, you can enable the memory-time trade-offs:
train_graph.enable(
vector_sources=True,
vector_destinations=True,
vector_outbounds=True
)
# Consider using the methods made available in the Embiggen package
# to run graph embedding or link prediction tasks.
"""
return AutomaticallyRetrievedGraph(
graph_name="DesulfotomaculumHydrothermale",
dataset="string",
directed=directed,
verbose=verbose,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 35.842932
| 223
| 0.710488
|
fcfdd6b333d94c3c2623d1b7ad825f342134d6eb
| 452
|
py
|
Python
|
app/marks/urls.py
|
CFelipe/marks
|
cc7e42eca9599f7e9df96f93c764faadf15f9bea
|
[
"MIT"
] | 21
|
2019-05-13T12:54:47.000Z
|
2022-01-10T19:51:26.000Z
|
app/marks/urls.py
|
CFelipe/marks
|
cc7e42eca9599f7e9df96f93c764faadf15f9bea
|
[
"MIT"
] | 19
|
2018-11-18T20:10:09.000Z
|
2019-11-16T02:47:38.000Z
|
app/marks/urls.py
|
CFelipe/marks
|
cc7e42eca9599f7e9df96f93c764faadf15f9bea
|
[
"MIT"
] | 1
|
2018-06-12T21:33:19.000Z
|
2018-06-12T21:33:19.000Z
|
from django.conf.urls import include, url
from django.contrib import admin
from django.shortcuts import get_object_or_404, get_list_or_404
from django.db.models import Count
from django.conf import settings
from marksapp.models import Bookmark, Tag
urlpatterns = [url(r"^", include("marksapp.urls")), url(r"^admin/", admin.site.urls)]
if settings.DEBUG:
import debug_toolbar
urlpatterns += [url(r"^__debug__/", include(debug_toolbar.urls))]
| 32.285714
| 85
| 0.772124
|
e607d7ba3ec7fa67fe5d10e9821ad0f46b919731
| 4,377
|
py
|
Python
|
examples/enclave_manager/tcf_enclave_manager/tcf_enclave_helper.py
|
manojsalunke85/avalon
|
99fc49ac215ac3dfcfb0547f8abebc0b131dfad1
|
[
"Apache-2.0"
] | null | null | null |
examples/enclave_manager/tcf_enclave_manager/tcf_enclave_helper.py
|
manojsalunke85/avalon
|
99fc49ac215ac3dfcfb0547f8abebc0b131dfad1
|
[
"Apache-2.0"
] | null | null | null |
examples/enclave_manager/tcf_enclave_manager/tcf_enclave_helper.py
|
manojsalunke85/avalon
|
99fc49ac215ac3dfcfb0547f8abebc0b131dfad1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This file defines the Enclave class to simplify integration of the SGX
enclave module into the rest of the tcf flow. Typically, an application
will call the initialize_enclave function first,
then will call create_enclave_signup_data.
"""
import random
import logging
import tcf_enclave_bridge as tcf_enclave
import crypto_utils.keys as keys
logger = logging.getLogger(__name__)
# -----------------------------------------------------------------
def initialize_enclave(enclave_config):
"""initialize_enclave -- Call the initialization function on the
enclave module
"""
return tcf_enclave.initialize_with_configuration(enclave_config)
# -----------------------------------------------------------------
class EnclaveHelper(object):
"""
Wraps calls to the client for symmetry with the enclave service client
"""
# -------------------------------------------------------
@classmethod
def create_enclave_signup_data(cls, tcf_instance_keys=None):
"""create_enclave_signup_data -- Create enclave signup data
:param tcf_instance_keys: Object of type TransactionKeys
"""
if tcf_instance_keys is None:
tcf_instance_keys = keys.TransactionKeys()
nonce = '{0:016X}'.format(random.getrandbits(64))
hashed_identity = tcf_instance_keys.hashed_identity
logger.debug("tx hashed identity: %s", hashed_identity)
try:
enclave_data = tcf_enclave.create_signup_info(
hashed_identity, nonce)
except Exception as err:
raise Exception('failed to create enclave signup data; {}'
.format(str(err)))
enclave_info = dict()
enclave_info['nonce'] = nonce
enclave_info['sealed_data'] = enclave_data.sealed_signup_data
enclave_info['verifying_key'] = enclave_data.verifying_key
enclave_info['encryption_key'] = enclave_data.encryption_key
enclave_info['enclave_id'] = enclave_data.verifying_key
enclave_info['proof_data'] = ''
if not tcf_enclave.enclave.is_sgx_simulator():
enclave_info['proof_data'] = enclave_data.proof_data
return cls(enclave_info, tcf_instance_keys)
# -------------------------------------------------------
def __init__(self, enclave_info, tcf_instance_keys):
# Initialize the keys that can be used later to
# register the enclave
self.tcf_instance_keys = tcf_instance_keys
try:
self.nonce = enclave_info['nonce']
self.sealed_data = enclave_info['sealed_data']
self.verifying_key = enclave_info['verifying_key']
self.encryption_key = enclave_info['encryption_key']
self.proof_data = enclave_info['proof_data']
self.enclave_id = enclave_info['enclave_id']
except KeyError as ke:
raise Exception("missing enclave initialization parameter; {}"
.format(str(ke)))
self.enclave_keys = \
keys.EnclaveKeys(self.verifying_key, self.encryption_key)
# -------------------------------------------------------
def send_to_sgx_worker(self, encrypted_request):
"""
Submit workorder request to the SGX Worker enclave
:param encrypted_request: base64 encoded encrypted workorder request
"""
return tcf_enclave.send_to_sgx_worker(
self.sealed_data,
encrypted_request)
# -------------------------------------------------------
def get_enclave_public_info(self):
"""
Return information about the enclave
"""
return tcf_enclave.get_enclave_public_info(self.sealed_data)
# -------------------------------------------------------
| 37.410256
| 76
| 0.620059
|
7c4602834646f2539046fad341f08bb592084b23
| 1,503
|
py
|
Python
|
src/obstools/image/orient.py
|
astromancer/obstools
|
c83f562a4f602cbe4420bec07308159761818a79
|
[
"MIT"
] | null | null | null |
src/obstools/image/orient.py
|
astromancer/obstools
|
c83f562a4f602cbe4420bec07308159761818a79
|
[
"MIT"
] | null | null | null |
src/obstools/image/orient.py
|
astromancer/obstools
|
c83f562a4f602cbe4420bec07308159761818a79
|
[
"MIT"
] | null | null | null |
# from astropy.io.fits import PrimaryHDU
class ImageOrienter(object):
"""
Simple base class that stores the orientation state. Images are
re-oriented upon item access.
"""
def __init__(self, hdu, flip=(), x=False, y=False):
"""
[summary]
Parameters
----------
hdu : [type]
[description]
flip : tuple, optional
[description], by default ()
x : bool, optional
[description], by default False
y : bool, optional
[description], by default False
"""
# assert isinstance(hdu, PrimaryHDU)
assert hdu.ndim >= 2
self.hdu = hdu
# set some array-like attributes
self.ndim = self.hdu.ndim
self.shape = self.hdu.shape
# setup tuple of slices for array
# orient = [slice(None)] * self.hdu.ndim
orient = [..., slice(None), slice(None)]
for i, (s, t) in enumerate(zip('xy', (x, y)), 1):
if (s in flip) or t:
orient[-i] = slice(None, None, -1)
self.orient = tuple(orient)
def __call__(self, data):
return data[self.orient]
def __getitem__(self, item):
if self.hdu.ndim == 2:
# `section` fails with 2d data
return self.hdu.data[self.orient][item]
# reading section for performance
return self.hdu.section[item][self.orient]
def __array__(self):
return self.hdu.data[self.orient]
| 27.833333
| 67
| 0.547572
|
086ab65dbccf964a7bab6fc25ef4c29abc6e3ffa
| 1,327
|
py
|
Python
|
apps/blog/urls.py
|
HengCodes/izone
|
1423f40c49914217690a1be18909de36702acf0e
|
[
"MIT"
] | null | null | null |
apps/blog/urls.py
|
HengCodes/izone
|
1423f40c49914217690a1be18909de36702acf0e
|
[
"MIT"
] | null | null | null |
apps/blog/urls.py
|
HengCodes/izone
|
1423f40c49914217690a1be18909de36702acf0e
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from django.urls import path
# from .views import goview
from .views import (IndexView, DetailView, CategoryView, TagView, AboutView,
SilianView, MySearchView, ArchiveView, TimelineView, CourseView, CourseListView)
urlpatterns = [
# path('go/', goview, name='go'), # 测试用页面
path('', IndexView.as_view(), name='index'), # 主页,自然排序
path('hot/', IndexView.as_view(), {'sort': 'v'}, name='index_hot'), # 主页,按照浏览量排序
path('article/<slug:slug>/', DetailView.as_view(), name='detail'), # 文章内容页
path('course/<slug:slug>/', CourseView.as_view(), name='course'),
path('category/<slug:slug>/', CategoryView.as_view(), name='category'),
path('category/<slug:slug>/hot/', CategoryView.as_view(), {'sort': 'v'},
name='category_hot'),
path('tag/<slug:slug>/', TagView.as_view(), name='tag'),
path('tag/<slug:slug>/hot/', TagView.as_view(), {'sort': 'v'}, name='tag_hot'),
path('about/', AboutView, name='about'), # About页面
path('timeline/', TimelineView.as_view(), name='timeline'), # timeline页面
path('archive/', ArchiveView.as_view(), name='archive'), # 归档页面
path('silian.xml', SilianView.as_view(content_type='application/xml'), name='silian'), # 死链页面
path('search/', MySearchView.as_view(), name='search_view'), # 全文搜索
]
| 51.038462
| 100
| 0.639035
|
e9eb8afa599164175f9fd1f3db996207a5f67409
| 23
|
py
|
Python
|
src/psd_tools/version.py
|
tannerhelland/psd-tools
|
f4d3fbdcb2164ab745046f9663ae2275a5b2b1ba
|
[
"MIT"
] | 1
|
2021-07-30T03:41:21.000Z
|
2021-07-30T03:41:21.000Z
|
src/psd_tools/version.py
|
tannerhelland/psd-tools
|
f4d3fbdcb2164ab745046f9663ae2275a5b2b1ba
|
[
"MIT"
] | null | null | null |
src/psd_tools/version.py
|
tannerhelland/psd-tools
|
f4d3fbdcb2164ab745046f9663ae2275a5b2b1ba
|
[
"MIT"
] | 2
|
2021-03-31T07:58:12.000Z
|
2021-07-30T03:41:22.000Z
|
__version__ = '1.9.13'
| 11.5
| 22
| 0.652174
|
ef48d2c14bf9709b9886e97c673d64bd21f40839
| 20,761
|
py
|
Python
|
ucscsdk/mometa/nfs/NfsExportDef.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 9
|
2016-12-22T08:39:25.000Z
|
2019-09-10T15:36:19.000Z
|
ucscsdk/mometa/nfs/NfsExportDef.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 10
|
2017-01-31T06:59:56.000Z
|
2021-11-09T09:14:37.000Z
|
ucscsdk/mometa/nfs/NfsExportDef.py
|
parag-may4/ucscsdk
|
2ea762fa070330e3a4e2c21b46b157469555405b
|
[
"Apache-2.0"
] | 13
|
2016-11-14T07:42:58.000Z
|
2022-02-10T17:32:05.000Z
|
"""This module contains the general information for NfsExportDef ManagedObject."""
from ...ucscmo import ManagedObject
from ...ucsccoremeta import UcscVersion, MoPropertyMeta, MoMeta
from ...ucscmeta import VersionMeta
class NfsExportDefConsts():
CONFIG_STATE_ADDED = "added"
CONFIG_STATE_NONE = "none"
CONFIG_STATE_REMOVE = "remove"
FSM_PREV_ADD_REPO_BEGIN = "AddRepoBegin"
FSM_PREV_ADD_REPO_FAIL = "AddRepoFail"
FSM_PREV_ADD_REPO_LOCAL = "AddRepoLocal"
FSM_PREV_ADD_REPO_SUCCESS = "AddRepoSuccess"
FSM_PREV_IMAGE_SYNC_BEGIN = "ImageSyncBegin"
FSM_PREV_IMAGE_SYNC_FAIL = "ImageSyncFail"
FSM_PREV_IMAGE_SYNC_LOCAL = "ImageSyncLocal"
FSM_PREV_IMAGE_SYNC_SUCCESS = "ImageSyncSuccess"
FSM_PREV_NOP = "nop"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_CANCELLED = "ERR-DIAG-cancelled"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_FSM_RESTARTED = "ERR-DIAG-fsm-restarted"
FSM_RMT_INV_ERR_CODE_ERR_DIAG_TEST_FAILED = "ERR-DIAG-test-failed"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_AUTHENTICATION_FAILURE = "ERR-DNLD-authentication-failure"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_ERROR = "ERR-DNLD-error"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_HOSTKEY_MISMATCH = "ERR-DNLD-hostkey-mismatch"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_INVALID_IMAGE = "ERR-DNLD-invalid-image"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_FILE = "ERR-DNLD-no-file"
FSM_RMT_INV_ERR_CODE_ERR_DNLD_NO_SPACE = "ERR-DNLD-no-space"
FSM_RMT_INV_ERR_CODE_ERR_DNS_DELETE_ERROR = "ERR-DNS-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_GET_ERROR = "ERR-DNS-get-error"
FSM_RMT_INV_ERR_CODE_ERR_DNS_SET_ERROR = "ERR-DNS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_DIGEST_VALIDATION_ERROR = "ERR-Digest-Validation-error"
FSM_RMT_INV_ERR_CODE_ERR_EXEC_GEN_CERT_ERROR = "ERR-Exec-Gen-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_EXEC_GET_CA_CERT_ERROR = "ERR-Exec-Get-CA-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_FILTER_ILLEGAL_FORMAT = "ERR-FILTER-illegal-format"
FSM_RMT_INV_ERR_CODE_ERR_FSM_NO_SUCH_STATE = "ERR-FSM-no-such-state"
FSM_RMT_INV_ERR_CODE_ERR_GET_CA_CERT_ERROR = "ERR-Get-CA-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_CERT_ERROR = "ERR-Get-Cert-error"
FSM_RMT_INV_ERR_CODE_ERR_GET_OUT_DIGET_MESSAGE_ERROR = "ERR-Get-Out-Diget-Message-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_REQUEST_ERROR = "ERR-HTTP-Request-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_SET_ERROR = "ERR-HTTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_HTTPS_SET_ERROR = "ERR-HTTPS-set-error"
FSM_RMT_INV_ERR_CODE_ERR_IPV6_ADDR_CONFIGURED = "ERR-Ipv6-addr-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_CONFIG_CHILD_OBJECT_CANT_BE_CONFIGURED = "ERR-MO-CONFIG-child-object-cant-be-configured"
FSM_RMT_INV_ERR_CODE_ERR_MO_META_NO_SUCH_OBJECT_CLASS = "ERR-MO-META-no-such-object-class"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_NO_SUCH_PROPERTY = "ERR-MO-PROPERTY-no-such-property"
FSM_RMT_INV_ERR_CODE_ERR_MO_PROPERTY_VALUE_OUT_OF_RANGE = "ERR-MO-PROPERTY-value-out-of-range"
FSM_RMT_INV_ERR_CODE_ERR_MO_ACCESS_DENIED = "ERR-MO-access-denied"
FSM_RMT_INV_ERR_CODE_ERR_MO_DELETION_RULE_VIOLATION = "ERR-MO-deletion-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_DUPLICATE_OBJECT = "ERR-MO-duplicate-object"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CONTAINMENT = "ERR-MO-illegal-containment"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_CREATION = "ERR-MO-illegal-creation"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_ITERATOR_STATE = "ERR-MO-illegal-iterator-state"
FSM_RMT_INV_ERR_CODE_ERR_MO_ILLEGAL_OBJECT_LIFECYCLE_TRANSITION = "ERR-MO-illegal-object-lifecycle-transition"
FSM_RMT_INV_ERR_CODE_ERR_MO_NAMING_RULE_VIOLATION = "ERR-MO-naming-rule-violation"
FSM_RMT_INV_ERR_CODE_ERR_MO_OBJECT_NOT_FOUND = "ERR-MO-object-not-found"
FSM_RMT_INV_ERR_CODE_ERR_MO_RESOURCE_ALLOCATION = "ERR-MO-resource-allocation"
FSM_RMT_INV_ERR_CODE_ERR_NTP_DELETE_ERROR = "ERR-NTP-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_GET_ERROR = "ERR-NTP-get-error"
FSM_RMT_INV_ERR_CODE_ERR_NTP_SET_ERROR = "ERR-NTP-set-error"
FSM_RMT_INV_ERR_CODE_ERR_POLICY_RESOLUTION_IN_PROGRESS = "ERR-Policy-resolution-in-progress"
FSM_RMT_INV_ERR_CODE_ERR_TOKEN_REQUEST_DENIED = "ERR-TOKEN-request-denied"
FSM_RMT_INV_ERR_CODE_ERR_UPDATE_VM_IP_MASK_GATEWAY_ERROR = "ERR-Update-VM-IP-Mask-Gateway-error"
FSM_RMT_INV_ERR_CODE_ERR_AAA_CONFIG_MODIFY_ERROR = "ERR-aaa-config-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_ACCT_REALM_SET_ERROR = "ERR-acct-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ADMIN_PASSWD_SET = "ERR-admin-passwd-set"
FSM_RMT_INV_ERR_CODE_ERR_AUTH_REALM_SET_ERROR = "ERR-auth-realm-set-error"
FSM_RMT_INV_ERR_CODE_ERR_AUTHENTICATION = "ERR-authentication"
FSM_RMT_INV_ERR_CODE_ERR_AUTHORIZATION_REQUIRED = "ERR-authorization-required"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_CHASSISPACK_UNDER_DG = "ERR-create-chassispack-under-dg"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_HFP_UNDER_DG = "ERR-create-hfp-under-dg"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_KEYRING = "ERR-create-keyring"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_LOCALE = "ERR-create-locale"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_ROLE = "ERR-create-role"
FSM_RMT_INV_ERR_CODE_ERR_CREATE_USER = "ERR-create-user"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_LOCALE = "ERR-delete-locale"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_ROLE = "ERR-delete-role"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_SESSION = "ERR-delete-session"
FSM_RMT_INV_ERR_CODE_ERR_DELETE_USER = "ERR-delete-user"
FSM_RMT_INV_ERR_CODE_ERR_ESTIMATE_IMPACT_ON_RECONNECT = "ERR-estimate-impact-on-reconnect"
FSM_RMT_INV_ERR_CODE_ERR_GET_MAX_HTTP_USER_SESSIONS = "ERR-get-max-http-user-sessions"
FSM_RMT_INV_ERR_CODE_ERR_HTTP_INITIALIZING = "ERR-http-initializing"
FSM_RMT_INV_ERR_CODE_ERR_INTERNAL_ERROR = "ERR-internal-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_DELETE_ERROR = "ERR-ldap-delete-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GET_ERROR = "ERR-ldap-get-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_MODIFY_ERROR = "ERR-ldap-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_GROUP_SET_ERROR = "ERR-ldap-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LDAP_SET_ERROR = "ERR-ldap-set-error"
FSM_RMT_INV_ERR_CODE_ERR_LOCALE_SET_ERROR = "ERR-locale-set-error"
FSM_RMT_INV_ERR_CODE_ERR_MAX_USERID_SESSIONS_REACHED = "ERR-max-userid-sessions-reached"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_LOCALE = "ERR-modify-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_ROLE = "ERR-modify-role"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER = "ERR-modify-user"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_LOCALE = "ERR-modify-user-locale"
FSM_RMT_INV_ERR_CODE_ERR_MODIFY_USER_ROLE = "ERR-modify-user-role"
FSM_RMT_INV_ERR_CODE_ERR_NFS_DOWN = "ERR-nfs-down"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_MODIFY_ERROR = "ERR-provider-group-modify-error"
FSM_RMT_INV_ERR_CODE_ERR_PROVIDER_GROUP_SET_ERROR = "ERR-provider-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GLOBAL_SET_ERROR = "ERR-radius-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_GROUP_SET_ERROR = "ERR-radius-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_RADIUS_SET_ERROR = "ERR-radius-set-error"
FSM_RMT_INV_ERR_CODE_ERR_ROLE_SET_ERROR = "ERR-role-set-error"
FSM_RMT_INV_ERR_CODE_ERR_SERVICE_NOT_READY = "ERR-service-not-ready"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_CACHE_FULL = "ERR-session-cache-full"
FSM_RMT_INV_ERR_CODE_ERR_SESSION_NOT_FOUND = "ERR-session-not-found"
FSM_RMT_INV_ERR_CODE_ERR_SET_PASSWORD_STRENGTH_CHECK = "ERR-set-password-strength-check"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_ENABLE_ERROR = "ERR-tacacs-enable-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GLOBAL_SET_ERROR = "ERR-tacacs-global-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_GROUP_SET_ERROR = "ERR-tacacs-group-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TACACS_SET_ERROR = "ERR-tacacs-set-error"
FSM_RMT_INV_ERR_CODE_ERR_TIMEZONE_SET_ERROR = "ERR-timezone-set-error"
FSM_RMT_INV_ERR_CODE_ERR_USER_ACCOUNT_EXPIRED = "ERR-user-account-expired"
FSM_RMT_INV_ERR_CODE_ERR_USER_SET_ERROR = "ERR-user-set-error"
FSM_RMT_INV_ERR_CODE_NONE = "none"
FSM_STAMP_NEVER = "never"
FSM_STATUS_ADD_REPO_BEGIN = "AddRepoBegin"
FSM_STATUS_ADD_REPO_FAIL = "AddRepoFail"
FSM_STATUS_ADD_REPO_LOCAL = "AddRepoLocal"
FSM_STATUS_ADD_REPO_SUCCESS = "AddRepoSuccess"
FSM_STATUS_IMAGE_SYNC_BEGIN = "ImageSyncBegin"
FSM_STATUS_IMAGE_SYNC_FAIL = "ImageSyncFail"
FSM_STATUS_IMAGE_SYNC_LOCAL = "ImageSyncLocal"
FSM_STATUS_IMAGE_SYNC_SUCCESS = "ImageSyncSuccess"
FSM_STATUS_NOP = "nop"
INT_ID_NONE = "none"
MONITOR_STATE_FAILED = "failed"
MONITOR_STATE_MONITORED = "monitored"
MONITOR_STATE_MONITORING = "monitoring"
MONITOR_STATE_UNREGISTERED = "unregistered"
OPER_STATE_FAILED = "failed"
OPER_STATE_SYNCED = "synced"
OPER_STATE_SYNCING = "syncing"
OPER_STATE_UNREGISTERED = "unregistered"
POLICY_OWNER_LOCAL = "local"
POLICY_OWNER_PENDING_POLICY = "pending-policy"
POLICY_OWNER_POLICY = "policy"
POLICY_OWNER_UNSPECIFIED = "unspecified"
class NfsExportDef(ManagedObject):
"""This is NfsExportDef class."""
consts = NfsExportDefConsts()
naming_props = set([u'name'])
mo_meta = MoMeta("NfsExportDef", "nfsExportDef", "nfs-export-def-[name]", VersionMeta.Version101a, "InputOutput", 0x7ff, [], ["admin"], [u'nfsEp'], [u'eventInst', u'faultInst', u'nfsClientDef', u'nfsExportDefFsm', u'nfsExportDefFsmTask', u'storageItem'], [None])
prop_meta = {
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101a, MoPropertyMeta.INTERNAL, None, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"config_state": MoPropertyMeta("config_state", "configState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["added", "none", "remove"], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x2, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dir": MoPropertyMeta("dir", "dir", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x4, 0, 510, None, [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"flt_aggr": MoPropertyMeta("flt_aggr", "fltAggr", "ulong", VersionMeta.Version101a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_descr": MoPropertyMeta("fsm_descr", "fsmDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_prev": MoPropertyMeta("fsm_prev", "fsmPrev", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["AddRepoBegin", "AddRepoFail", "AddRepoLocal", "AddRepoSuccess", "ImageSyncBegin", "ImageSyncFail", "ImageSyncLocal", "ImageSyncSuccess", "nop"], []),
"fsm_progr": MoPropertyMeta("fsm_progr", "fsmProgr", "byte", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], ["0-100"]),
"fsm_rmt_inv_err_code": MoPropertyMeta("fsm_rmt_inv_err_code", "fsmRmtInvErrCode", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["ERR-DIAG-cancelled", "ERR-DIAG-fsm-restarted", "ERR-DIAG-test-failed", "ERR-DNLD-authentication-failure", "ERR-DNLD-error", "ERR-DNLD-hostkey-mismatch", "ERR-DNLD-invalid-image", "ERR-DNLD-no-file", "ERR-DNLD-no-space", "ERR-DNS-delete-error", "ERR-DNS-get-error", "ERR-DNS-set-error", "ERR-Digest-Validation-error", "ERR-Exec-Gen-Cert-error", "ERR-Exec-Get-CA-Cert-error", "ERR-FILTER-illegal-format", "ERR-FSM-no-such-state", "ERR-Get-CA-Cert-error", "ERR-Get-Cert-error", "ERR-Get-Out-Diget-Message-error", "ERR-HTTP-Request-error", "ERR-HTTP-set-error", "ERR-HTTPS-set-error", "ERR-Ipv6-addr-configured", "ERR-MO-CONFIG-child-object-cant-be-configured", "ERR-MO-META-no-such-object-class", "ERR-MO-PROPERTY-no-such-property", "ERR-MO-PROPERTY-value-out-of-range", "ERR-MO-access-denied", "ERR-MO-deletion-rule-violation", "ERR-MO-duplicate-object", "ERR-MO-illegal-containment", "ERR-MO-illegal-creation", "ERR-MO-illegal-iterator-state", "ERR-MO-illegal-object-lifecycle-transition", "ERR-MO-naming-rule-violation", "ERR-MO-object-not-found", "ERR-MO-resource-allocation", "ERR-NTP-delete-error", "ERR-NTP-get-error", "ERR-NTP-set-error", "ERR-Policy-resolution-in-progress", "ERR-TOKEN-request-denied", "ERR-Update-VM-IP-Mask-Gateway-error", "ERR-aaa-config-modify-error", "ERR-acct-realm-set-error", "ERR-admin-passwd-set", "ERR-auth-realm-set-error", "ERR-authentication", "ERR-authorization-required", "ERR-create-chassispack-under-dg", "ERR-create-hfp-under-dg", "ERR-create-keyring", "ERR-create-locale", "ERR-create-role", "ERR-create-user", "ERR-delete-locale", "ERR-delete-role", "ERR-delete-session", "ERR-delete-user", "ERR-estimate-impact-on-reconnect", "ERR-get-max-http-user-sessions", "ERR-http-initializing", "ERR-internal-error", "ERR-ldap-delete-error", "ERR-ldap-get-error", "ERR-ldap-group-modify-error", "ERR-ldap-group-set-error", "ERR-ldap-set-error", "ERR-locale-set-error", "ERR-max-userid-sessions-reached", "ERR-modify-locale", "ERR-modify-role", "ERR-modify-user", "ERR-modify-user-locale", "ERR-modify-user-role", "ERR-nfs-down", "ERR-provider-group-modify-error", "ERR-provider-group-set-error", "ERR-radius-global-set-error", "ERR-radius-group-set-error", "ERR-radius-set-error", "ERR-role-set-error", "ERR-service-not-ready", "ERR-session-cache-full", "ERR-session-not-found", "ERR-set-password-strength-check", "ERR-tacacs-enable-error", "ERR-tacacs-global-set-error", "ERR-tacacs-group-set-error", "ERR-tacacs-set-error", "ERR-timezone-set-error", "ERR-user-account-expired", "ERR-user-set-error", "none"], ["0-4294967295"]),
"fsm_rmt_inv_err_descr": MoPropertyMeta("fsm_rmt_inv_err_descr", "fsmRmtInvErrDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, 0, 510, None, [], []),
"fsm_rmt_inv_rslt": MoPropertyMeta("fsm_rmt_inv_rslt", "fsmRmtInvRslt", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""((defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout),){0,32}(defaultValue|not-applicable|resource-unavailable|service-unavailable|intermittent-error|sw-defect|service-not-implemented-ignore|extend-timeout|capability-not-implemented-failure|illegal-fru|end-point-unavailable|failure|resource-capacity-exceeded|service-protocol-error|fw-defect|service-not-implemented-fail|task-reset|unidentified-fail|capability-not-supported|end-point-failed|fru-state-indeterminate|resource-dependency|fru-identity-indeterminate|internal-error|hw-defect|service-not-supported|fru-not-supported|end-point-protocol-error|capability-unavailable|fru-not-ready|capability-not-implemented-ignore|fru-info-malformed|timeout){0,1}""", [], []),
"fsm_stage_descr": MoPropertyMeta("fsm_stage_descr", "fsmStageDescr", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"fsm_stamp": MoPropertyMeta("fsm_stamp", "fsmStamp", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", ["never"], []),
"fsm_status": MoPropertyMeta("fsm_status", "fsmStatus", "string", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, ["AddRepoBegin", "AddRepoFail", "AddRepoLocal", "AddRepoSuccess", "ImageSyncBegin", "ImageSyncFail", "ImageSyncLocal", "ImageSyncSuccess", "nop"], []),
"fsm_try": MoPropertyMeta("fsm_try", "fsmTry", "byte", VersionMeta.Version111a, MoPropertyMeta.INTERNAL, None, None, None, None, [], []),
"int_id": MoPropertyMeta("int_id", "intId", "string", VersionMeta.Version101a, MoPropertyMeta.INTERNAL, None, None, None, None, ["none"], ["0-4294967295"]),
"ipv6server": MoPropertyMeta("ipv6server", "ipv6server", "string", VersionMeta.Version112a, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""^([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([0-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])\.([1-9]|[1-9][0-9]|1[0-9][0-9]|2[0-4][0-9]|25[0-5])$|[a-fA-F0-9:\[\]]{1,39}""", [], []),
"link_speed": MoPropertyMeta("link_speed", "linkSpeed", "uint", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x20, None, None, None, [], []),
"monitor_state": MoPropertyMeta("monitor_state", "monitorState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["failed", "monitored", "monitoring", "unregistered"], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version101a, MoPropertyMeta.NAMING, 0x40, None, None, r"""[\-\.:_a-zA-Z0-9]{1,16}""", [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version111a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["failed", "synced", "syncing", "unregistered"], []),
"password": MoPropertyMeta("password", "password", "string", VersionMeta.Version111a, MoPropertyMeta.READ_WRITE, 0x80, None, None, r"""[!""#%&'\(\)\*\+,\-\./:;<>@\[\\\]\^_`\{\|\}~a-zA-Z0-9]{0,64}""", [], []),
"policy_level": MoPropertyMeta("policy_level", "policyLevel", "uint", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"policy_owner": MoPropertyMeta("policy_owner", "policyOwner", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, None, None, None, None, ["local", "pending-policy", "policy", "unspecified"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101a, MoPropertyMeta.READ_ONLY, 0x100, 0, 256, None, [], []),
"server": MoPropertyMeta("server", "server", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x200, 0, 510, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101a, MoPropertyMeta.READ_WRITE, 0x400, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
}
prop_map = {
"childAction": "child_action",
"configState": "config_state",
"descr": "descr",
"dir": "dir",
"dn": "dn",
"fltAggr": "flt_aggr",
"fsmDescr": "fsm_descr",
"fsmPrev": "fsm_prev",
"fsmProgr": "fsm_progr",
"fsmRmtInvErrCode": "fsm_rmt_inv_err_code",
"fsmRmtInvErrDescr": "fsm_rmt_inv_err_descr",
"fsmRmtInvRslt": "fsm_rmt_inv_rslt",
"fsmStageDescr": "fsm_stage_descr",
"fsmStamp": "fsm_stamp",
"fsmStatus": "fsm_status",
"fsmTry": "fsm_try",
"intId": "int_id",
"ipv6server": "ipv6server",
"linkSpeed": "link_speed",
"monitorState": "monitor_state",
"name": "name",
"operState": "oper_state",
"password": "password",
"policyLevel": "policy_level",
"policyOwner": "policy_owner",
"rn": "rn",
"server": "server",
"status": "status",
}
def __init__(self, parent_mo_or_dn, name, **kwargs):
self._dirty_mask = 0
self.name = name
self.child_action = None
self.config_state = None
self.descr = None
self.dir = None
self.flt_aggr = None
self.fsm_descr = None
self.fsm_prev = None
self.fsm_progr = None
self.fsm_rmt_inv_err_code = None
self.fsm_rmt_inv_err_descr = None
self.fsm_rmt_inv_rslt = None
self.fsm_stage_descr = None
self.fsm_stamp = None
self.fsm_status = None
self.fsm_try = None
self.int_id = None
self.ipv6server = None
self.link_speed = None
self.monitor_state = None
self.oper_state = None
self.password = None
self.policy_level = None
self.policy_owner = None
self.server = None
self.status = None
ManagedObject.__init__(self, "NfsExportDef", parent_mo_or_dn, **kwargs)
| 85.789256
| 2,742
| 0.737151
|
76af2b5ed0d440dbcd1c23ef410775e16705b97b
| 50,340
|
py
|
Python
|
tests/functional_tests.py
|
sonatagreen/lbry
|
f6473e3383956b7823a76516622bba6c172619e1
|
[
"BSD-2-Clause"
] | null | null | null |
tests/functional_tests.py
|
sonatagreen/lbry
|
f6473e3383956b7823a76516622bba6c172619e1
|
[
"BSD-2-Clause"
] | null | null | null |
tests/functional_tests.py
|
sonatagreen/lbry
|
f6473e3383956b7823a76516622bba6c172619e1
|
[
"BSD-2-Clause"
] | null | null | null |
import shutil
from multiprocessing import Process, Event, Queue
import logging
import sys
import random
import io
from Crypto.PublicKey import RSA
from Crypto import Random
from Crypto.Hash import MD5
from lbrynet.conf import MIN_BLOB_DATA_PAYMENT_RATE
from lbrynet.conf import MIN_BLOB_INFO_PAYMENT_RATE
from lbrynet.lbrylive.LiveStreamCreator import FileLiveStreamCreator
from lbrynet.lbrylive.PaymentRateManager import BaseLiveStreamPaymentRateManager
from lbrynet.lbrylive.PaymentRateManager import LiveStreamPaymentRateManager
from lbrynet.lbrylive.LiveStreamMetadataManager import DBLiveStreamMetadataManager
from lbrynet.lbrylive.LiveStreamMetadataManager import TempLiveStreamMetadataManager
from lbrynet.lbryfile.LBRYFileMetadataManager import TempLBRYFileMetadataManager, DBLBRYFileMetadataManager
from lbrynet.lbryfilemanager.LBRYFileManager import LBRYFileManager
from lbrynet.core.PaymentRateManager import PaymentRateManager
from lbrynet.core.PTCWallet import PointTraderKeyQueryHandlerFactory, PointTraderKeyExchanger
from lbrynet.core.Session import LBRYSession
from lbrynet.core.client.StandaloneBlobDownloader import StandaloneBlobDownloader
from lbrynet.core.StreamDescriptor import BlobStreamDescriptorWriter
from lbrynet.core.StreamDescriptor import StreamDescriptorIdentifier
from lbrynet.core.StreamDescriptor import download_sd_blob
from lbrynet.lbryfilemanager.LBRYFileCreator import create_lbry_file
from lbrynet.lbryfile.client.LBRYFileOptions import add_lbry_file_to_sd_identifier
from lbrynet.lbryfile.StreamDescriptor import get_sd_info
from twisted.internet import defer, threads, task
from twisted.trial.unittest import TestCase
from twisted.python.failure import Failure
import os
from lbrynet.core.PeerManager import PeerManager
from lbrynet.core.RateLimiter import DummyRateLimiter, RateLimiter
from lbrynet.core.server.BlobAvailabilityHandler import BlobAvailabilityHandlerFactory
from lbrynet.core.server.BlobRequestHandler import BlobRequestHandlerFactory
from lbrynet.core.server.ServerProtocol import ServerProtocolFactory
from lbrynet.lbrylive.server.LiveBlobInfoQueryHandler import CryptBlobInfoQueryHandlerFactory
from lbrynet.lbrylive.client.LiveStreamOptions import add_live_stream_to_sd_identifier
from lbrynet.lbrylive.client.LiveStreamDownloader import add_full_live_stream_downloader_to_sd_identifier
log_format = "%(funcName)s(): %(message)s"
logging.basicConfig(level=logging.WARNING, format=log_format)
class FakeNode(object):
def __init__(self, *args, **kwargs):
pass
def joinNetwork(self, *args):
pass
def stop(self):
pass
class FakeWallet(object):
def __init__(self):
self.private_key = RSA.generate(1024)
self.encoded_public_key = self.private_key.publickey().exportKey()
def start(self):
return defer.succeed(True)
def stop(self):
return defer.succeed(True)
def get_info_exchanger(self):
return PointTraderKeyExchanger(self)
def get_wallet_info_query_handler_factory(self):
return PointTraderKeyQueryHandlerFactory(self)
def reserve_points(self, *args):
return True
def cancel_point_reservation(self, *args):
pass
def send_points(self, *args):
return defer.succeed(True)
def add_expected_payment(self, *args):
pass
def get_balance(self):
return defer.succeed(1000)
def set_public_key_for_peer(self, peer, public_key):
pass
class FakePeerFinder(object):
def __init__(self, start_port, peer_manager, num_peers):
self.start_port = start_port
self.peer_manager = peer_manager
self.num_peers = num_peers
self.count = 0
def find_peers_for_blob(self, *args):
peer_port = self.start_port + self.count
self.count += 1
if self.count >= self.num_peers:
self.count = 0
return defer.succeed([self.peer_manager.get_peer("127.0.0.1", peer_port)])
def run_manage_loop(self):
pass
def stop(self):
pass
class FakeAnnouncer(object):
def __init__(self, *args):
pass
def add_supplier(self, supplier):
pass
def immediate_announce(self, *args):
pass
def run_manage_loop(self):
pass
def stop(self):
pass
class GenFile(io.RawIOBase):
def __init__(self, size, pattern):
io.RawIOBase.__init__(self)
self.size = size
self.pattern = pattern
self.read_so_far = 0
self.buff = b''
self.last_offset = 0
def readable(self):
return True
def writable(self):
return False
def read(self, n=-1):
if n > -1:
bytes_to_read = min(n, self.size - self.read_so_far)
else:
bytes_to_read = self.size - self.read_so_far
output, self.buff = self.buff[:bytes_to_read], self.buff[bytes_to_read:]
bytes_to_read -= len(output)
while bytes_to_read > 0:
self.buff = self._generate_chunk()
new_output, self.buff = self.buff[:bytes_to_read], self.buff[bytes_to_read:]
bytes_to_read -= len(new_output)
output += new_output
self.read_so_far += len(output)
return output
def readall(self):
return self.read()
def _generate_chunk(self, n=2**10):
output = self.pattern[self.last_offset:self.last_offset + n]
n_left = n - len(output)
whole_patterns = n_left / len(self.pattern)
output += self.pattern * whole_patterns
self.last_offset = n - len(output)
output += self.pattern[:self.last_offset]
return output
test_create_stream_sd_file = {
'stream_name': '746573745f66696c65',
'blobs': [
{'length': 2097152, 'blob_num': 0,
'blob_hash':
'dc4708f76a5e7af0f1cae0ee96b824e2ed9250c9346c093b441f0a20d3607c17948b6fcfb4bc62020fe5286693d08586',
'iv': '30303030303030303030303030303031'},
{'length': 2097152, 'blob_num': 1,
'blob_hash':
'f4067522c1b49432a2a679512e3917144317caa1abba0c041e0cd2cf9f635d4cf127ce1824fa04189b63916174951f70',
'iv': '30303030303030303030303030303032'},
{'length': 1015056, 'blob_num': 2,
'blob_hash':
'305486c434260484fcb2968ce0e963b72f81ba56c11b08b1af0789b55b44d78422600f9a38e3cf4f2e9569897e5646a9',
'iv': '30303030303030303030303030303033'},
{'length': 0, 'blob_num': 3, 'iv': '30303030303030303030303030303034'}],
'stream_type': 'lbryfile',
'key': '30313233343536373031323334353637',
'suggested_file_name': '746573745f66696c65',
'stream_hash': '6d27fbe10c86d81aacfb897c7a426d0a2214f5a299455a6d315c0f998c4b3545c2dc60906122d94653c23b1898229e3f'}
def start_lbry_uploader(sd_hash_queue, kill_event, dead_event, file_size, ul_rate_limit=None):
sys.modules = sys.modules.copy()
del sys.modules['twisted.internet.reactor']
import twisted.internet
twisted.internet.reactor = twisted.internet.epollreactor.EPollReactor()
sys.modules['twisted.internet.reactor'] = twisted.internet.reactor
from twisted.internet import reactor
logging.debug("Starting the uploader")
Random.atfork()
r = random.Random()
r.seed("start_lbry_uploader")
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 1)
hash_announcer = FakeAnnouncer()
rate_limiter = RateLimiter()
sd_identifier = StreamDescriptorIdentifier()
db_dir = "server"
os.mkdir(db_dir)
session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer, peer_port=5553,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet)
stream_info_manager = TempLBRYFileMetadataManager()
lbry_file_manager = LBRYFileManager(session, stream_info_manager, sd_identifier)
if ul_rate_limit is not None:
session.rate_limiter.set_ul_limit(ul_rate_limit)
def start_all():
d = session.setup()
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: lbry_file_manager.setup())
d.addCallback(lambda _: start_server())
d.addCallback(lambda _: create_stream())
d.addCallback(create_stream_descriptor)
d.addCallback(put_sd_hash_on_queue)
def print_error(err):
logging.critical("Server error: %s", err.getErrorMessage())
d.addErrback(print_error)
return d
def start_server():
server_port = None
query_handler_factories = {
BlobAvailabilityHandlerFactory(session.blob_manager): True,
BlobRequestHandlerFactory(session.blob_manager, session.wallet,
PaymentRateManager(session.base_payment_rate_manager)): True,
session.wallet.get_wallet_info_query_handler_factory(): True,
}
server_factory = ServerProtocolFactory(session.rate_limiter,
query_handler_factories,
session.peer_manager)
server_port = reactor.listenTCP(5553, server_factory)
logging.debug("Started listening")
def kill_server():
ds = []
ds.append(session.shut_down())
ds.append(lbry_file_manager.stop())
if server_port:
ds.append(server_port.stopListening())
kill_check.stop()
dead_event.set()
dl = defer.DeferredList(ds)
dl.addCallback(lambda _: reactor.stop())
return dl
def check_for_kill():
if kill_event.is_set():
kill_server()
kill_check = task.LoopingCall(check_for_kill)
kill_check.start(1.0)
return True
def create_stream():
test_file = GenFile(file_size, b''.join([chr(i) for i in xrange(0, 64, 6)]))
d = create_lbry_file(session, lbry_file_manager, "test_file", test_file)
return d
def create_stream_descriptor(stream_hash):
descriptor_writer = BlobStreamDescriptorWriter(session.blob_manager)
d = get_sd_info(lbry_file_manager.stream_info_manager, stream_hash, True)
d.addCallback(descriptor_writer.create_descriptor)
return d
def put_sd_hash_on_queue(sd_hash):
sd_hash_queue.put(sd_hash)
reactor.callLater(1, start_all)
reactor.run()
def start_lbry_reuploader(sd_hash, kill_event, dead_event, ready_event, n, ul_rate_limit=None):
sys.modules = sys.modules.copy()
del sys.modules['twisted.internet.reactor']
import twisted.internet
twisted.internet.reactor = twisted.internet.epollreactor.EPollReactor()
sys.modules['twisted.internet.reactor'] = twisted.internet.reactor
from twisted.internet import reactor
logging.debug("Starting the uploader")
Random.atfork()
r = random.Random()
r.seed("start_lbry_uploader")
wallet = FakeWallet()
peer_port = 5553 + n
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 1)
hash_announcer = FakeAnnouncer()
rate_limiter = RateLimiter()
sd_identifier = StreamDescriptorIdentifier()
db_dir = "server_" + str(n)
blob_dir = os.path.join(db_dir, "blobfiles")
os.mkdir(db_dir)
os.mkdir(blob_dir)
session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd" + str(n),
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=None, peer_port=peer_port,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet)
stream_info_manager = TempLBRYFileMetadataManager()
lbry_file_manager = LBRYFileManager(session, stream_info_manager, sd_identifier)
if ul_rate_limit is not None:
session.rate_limiter.set_ul_limit(ul_rate_limit)
def make_downloader(metadata, prm):
info_validator = metadata.validator
options = metadata.options
factories = metadata.factories
chosen_options = [o.default_value for o in options.get_downloader_options(info_validator, prm)]
return factories[0].make_downloader(metadata, chosen_options, prm)
def download_file():
prm = PaymentRateManager(session.base_payment_rate_manager)
d = download_sd_blob(session, sd_hash, prm)
d.addCallback(sd_identifier.get_metadata_for_sd_blob)
d.addCallback(make_downloader, prm)
d.addCallback(lambda downloader: downloader.start())
return d
def start_transfer():
logging.debug("Starting the transfer")
d = session.setup()
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: lbry_file_manager.setup())
d.addCallback(lambda _: download_file())
return d
def start_server():
server_port = None
query_handler_factories = {
BlobAvailabilityHandlerFactory(session.blob_manager): True,
BlobRequestHandlerFactory(session.blob_manager, session.wallet,
PaymentRateManager(session.base_payment_rate_manager)): True,
session.wallet.get_wallet_info_query_handler_factory(): True,
}
server_factory = ServerProtocolFactory(session.rate_limiter,
query_handler_factories,
session.peer_manager)
server_port = reactor.listenTCP(peer_port, server_factory)
logging.debug("Started listening")
def kill_server():
ds = []
ds.append(session.shut_down())
ds.append(lbry_file_manager.stop())
if server_port:
ds.append(server_port.stopListening())
kill_check.stop()
dead_event.set()
dl = defer.DeferredList(ds)
dl.addCallback(lambda _: reactor.stop())
return dl
def check_for_kill():
if kill_event.is_set():
kill_server()
kill_check = task.LoopingCall(check_for_kill)
kill_check.start(1.0)
ready_event.set()
logging.debug("set the ready event")
d = task.deferLater(reactor, 1.0, start_transfer)
d.addCallback(lambda _: start_server())
reactor.run()
def start_live_server(sd_hash_queue, kill_event, dead_event):
sys.modules = sys.modules.copy()
del sys.modules['twisted.internet.reactor']
import twisted.internet
twisted.internet.reactor = twisted.internet.epollreactor.EPollReactor()
sys.modules['twisted.internet.reactor'] = twisted.internet.reactor
from twisted.internet import reactor
logging.debug("In start_server.")
Random.atfork()
r = random.Random()
r.seed("start_live_server")
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 1)
hash_announcer = FakeAnnouncer()
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
db_dir = "server"
os.mkdir(db_dir)
session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer, peer_port=5553,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet)
base_payment_rate_manager = BaseLiveStreamPaymentRateManager(MIN_BLOB_INFO_PAYMENT_RATE)
data_payment_rate_manager = PaymentRateManager(session.base_payment_rate_manager)
payment_rate_manager = LiveStreamPaymentRateManager(base_payment_rate_manager,
data_payment_rate_manager)
stream_info_manager = DBLiveStreamMetadataManager(session.db_dir, hash_announcer)
logging.debug("Created the session")
server_port = []
def start_listening():
logging.debug("Starting the server protocol")
query_handler_factories = {
CryptBlobInfoQueryHandlerFactory(stream_info_manager, session.wallet,
payment_rate_manager): True,
BlobAvailabilityHandlerFactory(session.blob_manager): True,
BlobRequestHandlerFactory(session.blob_manager, session.wallet,
payment_rate_manager): True,
session.wallet.get_wallet_info_query_handler_factory(): True,
}
server_factory = ServerProtocolFactory(session.rate_limiter,
query_handler_factories,
session.peer_manager)
server_port.append(reactor.listenTCP(5553, server_factory))
logging.debug("Server protocol has started")
def create_stream():
logging.debug("Making the live stream")
test_file = GenFile(5209343, b''.join([chr(i + 2) for i in xrange(0, 64, 6)]))
stream_creator_helper = FileLiveStreamCreator(session.blob_manager, stream_info_manager,
"test_file", test_file)
d = stream_creator_helper.setup()
d.addCallback(lambda _: stream_creator_helper.publish_stream_descriptor())
d.addCallback(put_sd_hash_on_queue)
d.addCallback(lambda _: stream_creator_helper.start_streaming())
return d
def put_sd_hash_on_queue(sd_hash):
logging.debug("Telling the client to start running. Stream hash: %s", str(sd_hash))
sd_hash_queue.put(sd_hash)
logging.debug("sd hash has been added to the queue")
def set_dead_event():
logging.debug("Setting the dead event")
dead_event.set()
def print_error(err):
logging.debug("An error occurred during shutdown: %s", err.getTraceback())
def stop_reactor():
logging.debug("Server is stopping its reactor")
reactor.stop()
def shut_down(arg):
logging.debug("Shutting down")
if isinstance(arg, Failure):
logging.error("Shut down is due to an error: %s", arg.getTraceback())
d = defer.maybeDeferred(server_port[0].stopListening)
d.addErrback(print_error)
d.addCallback(lambda _: session.shut_down())
d.addCallback(lambda _: stream_info_manager.stop())
d.addErrback(print_error)
d.addCallback(lambda _: set_dead_event())
d.addErrback(print_error)
d.addCallback(lambda _: reactor.callLater(0, stop_reactor))
d.addErrback(print_error)
return d
def wait_for_kill_event():
d = defer.Deferred()
def check_for_kill():
if kill_event.is_set():
logging.debug("Kill event has been found set")
kill_check.stop()
d.callback(True)
kill_check = task.LoopingCall(check_for_kill)
kill_check.start(1.0)
return d
def enable_live_stream():
base_live_stream_payment_rate_manager = BaseLiveStreamPaymentRateManager(
MIN_BLOB_INFO_PAYMENT_RATE
)
add_live_stream_to_sd_identifier(sd_identifier, base_live_stream_payment_rate_manager)
add_full_live_stream_downloader_to_sd_identifier(session, stream_info_manager, sd_identifier,
base_live_stream_payment_rate_manager)
def run_server():
d = session.setup()
d.addCallback(lambda _: stream_info_manager.setup())
d.addCallback(lambda _: enable_live_stream())
d.addCallback(lambda _: start_listening())
d.addCallback(lambda _: create_stream())
d.addCallback(lambda _: wait_for_kill_event())
d.addBoth(shut_down)
return d
reactor.callLater(1, run_server)
reactor.run()
def start_blob_uploader(blob_hash_queue, kill_event, dead_event, slow):
sys.modules = sys.modules.copy()
del sys.modules['twisted.internet.reactor']
import twisted.internet
twisted.internet.reactor = twisted.internet.epollreactor.EPollReactor()
sys.modules['twisted.internet.reactor'] = twisted.internet.reactor
from twisted.internet import reactor
logging.debug("Starting the uploader")
Random.atfork()
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 1)
hash_announcer = FakeAnnouncer()
rate_limiter = RateLimiter()
if slow is True:
peer_port = 5553
db_dir = "server1"
else:
peer_port = 5554
db_dir = "server2"
blob_dir = os.path.join(db_dir, "blobfiles")
os.mkdir(db_dir)
os.mkdir(blob_dir)
session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="efgh",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=blob_dir, peer_port=peer_port,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet)
if slow is True:
session.rate_limiter.set_ul_limit(2**11)
def start_all():
d = session.setup()
d.addCallback(lambda _: start_server())
d.addCallback(lambda _: create_single_blob())
d.addCallback(put_blob_hash_on_queue)
def print_error(err):
logging.critical("Server error: %s", err.getErrorMessage())
d.addErrback(print_error)
return d
def start_server():
server_port = None
query_handler_factories = {
BlobAvailabilityHandlerFactory(session.blob_manager): True,
BlobRequestHandlerFactory(session.blob_manager, session.wallet,
PaymentRateManager(session.base_payment_rate_manager)): True,
session.wallet.get_wallet_info_query_handler_factory(): True,
}
server_factory = ServerProtocolFactory(session.rate_limiter,
query_handler_factories,
session.peer_manager)
server_port = reactor.listenTCP(peer_port, server_factory)
logging.debug("Started listening")
def kill_server():
ds = []
ds.append(session.shut_down())
if server_port:
ds.append(server_port.stopListening())
kill_check.stop()
dead_event.set()
dl = defer.DeferredList(ds)
dl.addCallback(lambda _: reactor.stop())
return dl
def check_for_kill():
if kill_event.is_set():
kill_server()
kill_check = task.LoopingCall(check_for_kill)
kill_check.start(1.0)
return True
def create_single_blob():
blob_creator = session.blob_manager.get_blob_creator()
blob_creator.write("0" * 2**21)
return blob_creator.close()
def put_blob_hash_on_queue(blob_hash):
logging.debug("Telling the client to start running. Blob hash: %s", str(blob_hash))
blob_hash_queue.put(blob_hash)
logging.debug("blob hash has been added to the queue")
reactor.callLater(1, start_all)
reactor.run()
class TestTransfer(TestCase):
def setUp(self):
self.server_processes = []
self.session = None
self.stream_info_manager = None
self.lbry_file_manager = None
self.addCleanup(self.take_down_env)
def take_down_env(self):
d = defer.succeed(True)
if self.lbry_file_manager is not None:
d.addCallback(lambda _: self.lbry_file_manager.stop())
if self.session is not None:
d.addCallback(lambda _: self.session.shut_down())
if self.stream_info_manager is not None:
d.addCallback(lambda _: self.stream_info_manager.stop())
def delete_test_env():
dirs = ['server', 'server1', 'server2', 'client']
files = ['test_file']
for di in dirs:
if os.path.exists(di):
shutil.rmtree(di)
for f in files:
if os.path.exists(f):
os.remove(f)
for p in self.server_processes:
p.terminate()
return True
d.addCallback(lambda _: threads.deferToThread(delete_test_env))
return d
@staticmethod
def wait_for_event(event, timeout):
from twisted.internet import reactor
d = defer.Deferred()
def stop():
set_check.stop()
if stop_call.active():
stop_call.cancel()
d.callback(True)
def check_if_event_set():
if event.is_set():
logging.debug("Dead event has been found set")
stop()
def done_waiting():
logging.warning("Event has not been found set and timeout has expired")
stop()
set_check = task.LoopingCall(check_if_event_set)
set_check.start(.1)
stop_call = reactor.callLater(timeout, done_waiting)
return d
@staticmethod
def wait_for_hash_from_queue(hash_queue):
logging.debug("Waiting for the sd_hash to come through the queue")
d = defer.Deferred()
def check_for_start():
if hash_queue.empty() is False:
logging.debug("Client start event has been found set")
start_check.stop()
d.callback(hash_queue.get(False))
else:
logging.debug("Client start event has NOT been found set")
start_check = task.LoopingCall(check_for_start)
start_check.start(1.0)
return d
def test_lbry_transfer(self):
sd_hash_queue = Queue()
kill_event = Event()
dead_event = Event()
uploader = Process(target=start_lbry_uploader, args=(sd_hash_queue, kill_event, dead_event, 5209343))
uploader.start()
self.server_processes.append(uploader)
logging.debug("Testing transfer")
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 1)
hash_announcer = FakeAnnouncer()
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
db_dir = "client"
blob_dir = os.path.join(db_dir, "blobfiles")
os.mkdir(db_dir)
os.mkdir(blob_dir)
self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=blob_dir, peer_port=5553,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet)
self.stream_info_manager = TempLBRYFileMetadataManager()
self.lbry_file_manager = LBRYFileManager(self.session, self.stream_info_manager, sd_identifier)
def make_downloader(metadata, prm):
info_validator = metadata.validator
options = metadata.options
factories = metadata.factories
chosen_options = [o.default_value for o in options.get_downloader_options(info_validator, prm)]
return factories[0].make_downloader(metadata, chosen_options, prm)
def download_file(sd_hash):
prm = PaymentRateManager(self.session.base_payment_rate_manager)
d = download_sd_blob(self.session, sd_hash, prm)
d.addCallback(sd_identifier.get_metadata_for_sd_blob)
d.addCallback(make_downloader, prm)
d.addCallback(lambda downloader: downloader.start())
return d
def check_md5_sum():
f = open('test_file')
hashsum = MD5.new()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be")
def start_transfer(sd_hash):
logging.debug("Starting the transfer")
d = self.session.setup()
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
d.addCallback(lambda _: download_file(sd_hash))
d.addCallback(lambda _: check_md5_sum())
return d
def stop(arg):
if isinstance(arg, Failure):
logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
else:
logging.debug("Client is stopping normally.")
kill_event.set()
logging.debug("Set the kill event")
d = self.wait_for_event(dead_event, 15)
def print_shutting_down():
logging.info("Client is shutting down")
d.addCallback(lambda _: print_shutting_down())
d.addCallback(lambda _: arg)
return d
d = self.wait_for_hash_from_queue(sd_hash_queue)
d.addCallback(start_transfer)
d.addBoth(stop)
return d
def test_live_transfer(self):
sd_hash_queue = Queue()
kill_event = Event()
dead_event = Event()
server_args = (sd_hash_queue, kill_event, dead_event)
server = Process(target=start_live_server, args=server_args)
server.start()
self.server_processes.append(server)
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 1)
hash_announcer = FakeAnnouncer()
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
db_dir = "client"
os.mkdir(db_dir)
self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer, blob_dir=None,
peer_port=5553, use_upnp=False, rate_limiter=rate_limiter, wallet=wallet)
self.stream_info_manager = TempLiveStreamMetadataManager(hash_announcer)
d = self.wait_for_hash_from_queue(sd_hash_queue)
def create_downloader(metadata, prm):
info_validator = metadata.validator
options = metadata.options
factories = metadata.factories
chosen_options = [o.default_value for o in options.get_downloader_options(info_validator, prm)]
return factories[0].make_downloader(metadata, chosen_options, prm)
def start_lbry_file(lbry_file):
lbry_file = lbry_file
logging.debug("Calling lbry_file.start()")
return lbry_file.start()
def download_stream(sd_blob_hash):
logging.debug("Downloaded the sd blob. Reading it now")
prm = PaymentRateManager(self.session.base_payment_rate_manager)
d = download_sd_blob(self.session, sd_blob_hash, prm)
d.addCallback(sd_identifier.get_metadata_for_sd_blob)
d.addCallback(create_downloader, prm)
d.addCallback(start_lbry_file)
return d
def do_download(sd_blob_hash):
logging.debug("Starting the download")
d = self.session.setup()
d.addCallback(lambda _: enable_live_stream())
d.addCallback(lambda _: download_stream(sd_blob_hash))
return d
def enable_live_stream():
base_live_stream_payment_rate_manager = BaseLiveStreamPaymentRateManager(
MIN_BLOB_INFO_PAYMENT_RATE
)
add_live_stream_to_sd_identifier(sd_identifier,
base_live_stream_payment_rate_manager)
add_full_live_stream_downloader_to_sd_identifier(self.session, self.stream_info_manager,
sd_identifier,
base_live_stream_payment_rate_manager)
d.addCallback(do_download)
def check_md5_sum():
f = open('test_file')
hashsum = MD5.new()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "215b177db8eed86d028b37e5cbad55c7")
d.addCallback(lambda _: check_md5_sum())
def stop(arg):
if isinstance(arg, Failure):
logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
else:
logging.debug("Client is stopping normally.")
kill_event.set()
logging.debug("Set the kill event")
d = self.wait_for_event(dead_event, 15)
def print_shutting_down():
logging.info("Client is shutting down")
d.addCallback(lambda _: print_shutting_down())
d.addCallback(lambda _: arg)
return d
d.addBoth(stop)
return d
def test_last_blob_retrieval(self):
kill_event = Event()
dead_event_1 = Event()
blob_hash_queue_1 = Queue()
blob_hash_queue_2 = Queue()
fast_uploader = Process(target=start_blob_uploader,
args=(blob_hash_queue_1, kill_event, dead_event_1, False))
fast_uploader.start()
self.server_processes.append(fast_uploader)
dead_event_2 = Event()
slow_uploader = Process(target=start_blob_uploader,
args=(blob_hash_queue_2, kill_event, dead_event_2, True))
slow_uploader.start()
self.server_processes.append(slow_uploader)
logging.debug("Testing transfer")
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 2)
hash_announcer = FakeAnnouncer()
rate_limiter = DummyRateLimiter()
db_dir = "client"
blob_dir = os.path.join(db_dir, "blobfiles")
os.mkdir(db_dir)
os.mkdir(blob_dir)
self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=blob_dir, peer_port=5553,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet)
d1 = self.wait_for_hash_from_queue(blob_hash_queue_1)
d2 = self.wait_for_hash_from_queue(blob_hash_queue_2)
d = defer.DeferredList([d1, d2], fireOnOneErrback=True)
def get_blob_hash(results):
self.assertEqual(results[0][1], results[1][1])
return results[0][1]
d.addCallback(get_blob_hash)
def download_blob(blob_hash):
prm = PaymentRateManager(self.session.base_payment_rate_manager)
downloader = StandaloneBlobDownloader(blob_hash, self.session.blob_manager, peer_finder,
rate_limiter, prm, wallet)
d = downloader.download()
return d
def start_transfer(blob_hash):
logging.debug("Starting the transfer")
d = self.session.setup()
d.addCallback(lambda _: download_blob(blob_hash))
return d
d.addCallback(start_transfer)
def stop(arg):
if isinstance(arg, Failure):
logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
else:
logging.debug("Client is stopping normally.")
kill_event.set()
logging.debug("Set the kill event")
d1 = self.wait_for_event(dead_event_1, 15)
d2 = self.wait_for_event(dead_event_2, 15)
dl = defer.DeferredList([d1, d2])
def print_shutting_down():
logging.info("Client is shutting down")
dl.addCallback(lambda _: print_shutting_down())
dl.addCallback(lambda _: arg)
return dl
d.addBoth(stop)
return d
def test_double_download(self):
sd_hash_queue = Queue()
kill_event = Event()
dead_event = Event()
uploader = Process(target=start_lbry_uploader, args=(sd_hash_queue, kill_event, dead_event, 5209343))
uploader.start()
self.server_processes.append(uploader)
logging.debug("Testing double download")
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 1)
hash_announcer = FakeAnnouncer()
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
downloaders = []
db_dir = "client"
blob_dir = os.path.join(db_dir, "blobfiles")
os.mkdir(db_dir)
os.mkdir(blob_dir)
self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=blob_dir, peer_port=5553, use_upnp=False,
rate_limiter=rate_limiter, wallet=wallet)
self.stream_info_manager = DBLBRYFileMetadataManager(self.session.db_dir)
self.lbry_file_manager = LBRYFileManager(self.session, self.stream_info_manager, sd_identifier)
def make_downloader(metadata, prm):
info_validator = metadata.validator
options = metadata.options
factories = metadata.factories
chosen_options = [o.default_value for o in options.get_downloader_options(info_validator, prm)]
return factories[0].make_downloader(metadata, chosen_options, prm)
def append_downloader(downloader):
downloaders.append(downloader)
return downloader
def download_file(sd_hash):
prm = PaymentRateManager(self.session.base_payment_rate_manager)
d = download_sd_blob(self.session, sd_hash, prm)
d.addCallback(sd_identifier.get_metadata_for_sd_blob)
d.addCallback(make_downloader, prm)
d.addCallback(append_downloader)
d.addCallback(lambda downloader: downloader.start())
return d
def check_md5_sum():
f = open('test_file')
hashsum = MD5.new()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "4ca2aafb4101c1e42235aad24fbb83be")
def delete_lbry_file():
logging.debug("deleting the file...")
d = self.lbry_file_manager.delete_lbry_file(downloaders[0])
d.addCallback(lambda _: self.lbry_file_manager.get_count_for_stream_hash(downloaders[0].stream_hash))
d.addCallback(lambda c: self.stream_info_manager.delete_stream(downloaders[1].stream_hash) if c == 0 else True)
return d
def check_lbry_file():
d = downloaders[1].status()
d.addCallback(lambda _: downloaders[1].status())
def check_status_report(status_report):
self.assertEqual(status_report.num_known, status_report.num_completed)
self.assertEqual(status_report.num_known, 3)
d.addCallback(check_status_report)
return d
def start_transfer(sd_hash):
logging.debug("Starting the transfer")
d = self.session.setup()
d.addCallback(lambda _: self.stream_info_manager.setup())
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
d.addCallback(lambda _: download_file(sd_hash))
d.addCallback(lambda _: check_md5_sum())
d.addCallback(lambda _: download_file(sd_hash))
d.addCallback(lambda _: delete_lbry_file())
d.addCallback(lambda _: check_lbry_file())
return d
def stop(arg):
if isinstance(arg, Failure):
logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
else:
logging.debug("Client is stopping normally.")
kill_event.set()
logging.debug("Set the kill event")
d = self.wait_for_event(dead_event, 15)
def print_shutting_down():
logging.info("Client is shutting down")
d.addCallback(lambda _: print_shutting_down())
d.addCallback(lambda _: arg)
return d
d = self.wait_for_hash_from_queue(sd_hash_queue)
d.addCallback(start_transfer)
d.addBoth(stop)
return d
def test_multiple_uploaders(self):
sd_hash_queue = Queue()
num_uploaders = 3
kill_event = Event()
dead_events = [Event() for _ in range(num_uploaders)]
ready_events = [Event() for _ in range(1, num_uploaders)]
uploader = Process(target=start_lbry_uploader, args=(sd_hash_queue, kill_event, dead_events[0],
9373419, 2**22))
uploader.start()
self.server_processes.append(uploader)
logging.debug("Testing multiple uploaders")
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, num_uploaders)
hash_announcer = FakeAnnouncer()
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
db_dir = "client"
blob_dir = os.path.join(db_dir, "blobfiles")
os.mkdir(db_dir)
os.mkdir(blob_dir)
self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=None, peer_port=5553,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet)
self.stream_info_manager = TempLBRYFileMetadataManager()
self.lbry_file_manager = LBRYFileManager(self.session, self.stream_info_manager, sd_identifier)
def start_additional_uploaders(sd_hash):
for i in range(1, num_uploaders):
uploader = Process(target=start_lbry_reuploader,
args=(sd_hash, kill_event, dead_events[i], ready_events[i-1], i, 2**10))
uploader.start()
self.server_processes.append(uploader)
return defer.succeed(True)
def wait_for_ready_events():
return defer.DeferredList([self.wait_for_event(ready_event, 60) for ready_event in ready_events])
def make_downloader(metadata, prm):
info_validator = metadata.validator
options = metadata.options
factories = metadata.factories
chosen_options = [o.default_value for o in options.get_downloader_options(info_validator, prm)]
return factories[0].make_downloader(metadata, chosen_options, prm)
def download_file(sd_hash):
prm = PaymentRateManager(self.session.base_payment_rate_manager)
d = download_sd_blob(self.session, sd_hash, prm)
d.addCallback(sd_identifier.get_metadata_for_sd_blob)
d.addCallback(make_downloader, prm)
d.addCallback(lambda downloader: downloader.start())
return d
def check_md5_sum():
f = open('test_file')
hashsum = MD5.new()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "e5941d615f53312fd66638239c1f90d5")
def start_transfer(sd_hash):
logging.debug("Starting the transfer")
d = start_additional_uploaders(sd_hash)
d.addCallback(lambda _: wait_for_ready_events())
d.addCallback(lambda _: self.session.setup())
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
d.addCallback(lambda _: download_file(sd_hash))
d.addCallback(lambda _: check_md5_sum())
return d
def stop(arg):
if isinstance(arg, Failure):
logging.debug("Client is stopping due to an error. Error: %s", arg.getTraceback())
else:
logging.debug("Client is stopping normally.")
kill_event.set()
logging.debug("Set the kill event")
d = defer.DeferredList([self.wait_for_event(dead_event, 15) for dead_event in dead_events])
def print_shutting_down():
logging.info("Client is shutting down")
d.addCallback(lambda _: print_shutting_down())
d.addCallback(lambda _: arg)
return d
d = self.wait_for_hash_from_queue(sd_hash_queue)
d.addCallback(start_transfer)
d.addBoth(stop)
return d
class TestStreamify(TestCase):
def setUp(self):
self.session = None
self.stream_info_manager = None
self.lbry_file_manager = None
self.addCleanup(self.take_down_env)
def take_down_env(self):
d = defer.succeed(True)
if self.lbry_file_manager is not None:
d.addCallback(lambda _: self.lbry_file_manager.stop())
if self.session is not None:
d.addCallback(lambda _: self.session.shut_down())
if self.stream_info_manager is not None:
d.addCallback(lambda _: self.stream_info_manager.stop())
def delete_test_env():
shutil.rmtree('client')
if os.path.exists("test_file"):
os.remove("test_file")
d.addCallback(lambda _: threads.deferToThread(delete_test_env))
return d
def test_create_stream(self):
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 2)
hash_announcer = FakeAnnouncer()
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
db_dir = "client"
blob_dir = os.path.join(db_dir, "blobfiles")
os.mkdir(db_dir)
os.mkdir(blob_dir)
self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=blob_dir, peer_port=5553,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet)
self.stream_info_manager = TempLBRYFileMetadataManager()
self.lbry_file_manager = LBRYFileManager(self.session, self.stream_info_manager, sd_identifier)
d = self.session.setup()
d.addCallback(lambda _: self.stream_info_manager.setup())
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
def verify_equal(sd_info):
self.assertEqual(sd_info, test_create_stream_sd_file)
def verify_stream_descriptor_file(stream_hash):
d = get_sd_info(self.lbry_file_manager.stream_info_manager, stream_hash, True)
d.addCallback(verify_equal)
return d
def iv_generator():
iv = 0
while 1:
iv += 1
yield "%016d" % iv
def create_stream():
test_file = GenFile(5209343, b''.join([chr(i + 3) for i in xrange(0, 64, 6)]))
d = create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file,
key="0123456701234567", iv_generator=iv_generator())
return d
d.addCallback(lambda _: create_stream())
d.addCallback(verify_stream_descriptor_file)
return d
def test_create_and_combine_stream(self):
wallet = FakeWallet()
peer_manager = PeerManager()
peer_finder = FakePeerFinder(5553, peer_manager, 2)
hash_announcer = FakeAnnouncer()
rate_limiter = DummyRateLimiter()
sd_identifier = StreamDescriptorIdentifier()
db_dir = "client"
blob_dir = os.path.join(db_dir, "blobfiles")
os.mkdir(db_dir)
os.mkdir(blob_dir)
self.session = LBRYSession(MIN_BLOB_DATA_PAYMENT_RATE, db_dir=db_dir, lbryid="abcd",
peer_finder=peer_finder, hash_announcer=hash_announcer,
blob_dir=blob_dir, peer_port=5553,
use_upnp=False, rate_limiter=rate_limiter, wallet=wallet)
self.stream_info_manager = DBLBRYFileMetadataManager(self.session.db_dir)
self.lbry_file_manager = LBRYFileManager(self.session, self.stream_info_manager, sd_identifier)
def start_lbry_file(lbry_file):
logging.debug("Calling lbry_file.start()")
d = lbry_file.start()
return d
def combine_stream(stream_hash):
prm = PaymentRateManager(self.session.base_payment_rate_manager)
d = self.lbry_file_manager.add_lbry_file(stream_hash, prm)
d.addCallback(start_lbry_file)
def check_md5_sum():
f = open('test_file')
hashsum = MD5.new()
hashsum.update(f.read())
self.assertEqual(hashsum.hexdigest(), "68959747edc73df45e45db6379dd7b3b")
d.addCallback(lambda _: check_md5_sum())
return d
def create_stream():
test_file = GenFile(53209343, b''.join([chr(i + 5) for i in xrange(0, 64, 6)]))
return create_lbry_file(self.session, self.lbry_file_manager, "test_file", test_file,
suggested_file_name="test_file")
d = self.session.setup()
d.addCallback(lambda _: self.stream_info_manager.setup())
d.addCallback(lambda _: add_lbry_file_to_sd_identifier(sd_identifier))
d.addCallback(lambda _: self.lbry_file_manager.setup())
d.addCallback(lambda _: create_stream())
d.addCallback(combine_stream)
return d
| 36.690962
| 123
| 0.640862
|
58d232426f07807a62e7fe5640e5f811478bd18d
| 1,014
|
py
|
Python
|
adapters/repository.py
|
alenasf/Architecture_Patterns
|
ca5ca53639368e447d9d049c772ef737386a3d1e
|
[
"Apache-2.0"
] | null | null | null |
adapters/repository.py
|
alenasf/Architecture_Patterns
|
ca5ca53639368e447d9d049c772ef737386a3d1e
|
[
"Apache-2.0"
] | null | null | null |
adapters/repository.py
|
alenasf/Architecture_Patterns
|
ca5ca53639368e447d9d049c772ef737386a3d1e
|
[
"Apache-2.0"
] | null | null | null |
import abc
from domain import model
class AbstractRepository(abc.ABC):
@abc.abstractmethod
def add(self, batch: model.Batch):
raise NotImplementedError
@abc.abstractmethod
def get(self, reference) -> model.Batch:
raise NotImplementedError
class SqlAlchemyRepository(AbstractRepository):
def __init__(self, session):
self.session = session
def add(self, batch):
self.session.add(batch)
def get(self, reference):
return self.session.query(model.Batch).filter_by(reference=reference).one()
def list(self):
return self.session.query(model.Batch).all()
# A simple fake repository using a set
class FakeRepository(AbstractRepository):
def __init__(self, batches):
self._batches = set(batches)
def add(self, batch):
self._batches.add(batch)
def get(self, reference):
return next(b for b in self._batches if b.reference == reference)
def list(self):
return list(self._batches)
| 22.043478
| 83
| 0.677515
|
2aebe9bdc6292497cf6d896c32f6d47a623b92ab
| 4,835
|
py
|
Python
|
agglomerate/items.py
|
martinber/agglomerate
|
f326e72a9dac328fac3cd46519a048ed861b71e3
|
[
"MIT"
] | null | null | null |
agglomerate/items.py
|
martinber/agglomerate
|
f326e72a9dac328fac3cd46519a048ed861b71e3
|
[
"MIT"
] | null | null | null |
agglomerate/items.py
|
martinber/agglomerate
|
f326e72a9dac328fac3cd46519a048ed861b71e3
|
[
"MIT"
] | null | null | null |
import PIL.Image
import os
import agglomerate.math
class Item:
"""
Represents a sprite or a group of sprites, with a rectangular shape.
Something that will be placed by an algorithm.
Sprites can be cropped but groups not.
An algorithm works with items, placing them and rotating if necessary,
algorithms can crop sprites but can't crop groups, so they need to check the
type field.
**Fields**
position
Vector2 position in the container (sheet or group) in pixels, top-left
corner regardless of rotation.
size
Vector2 in pixels of the sprite in the sheet
rotated
true if the item was rotated 90 degrees clockwise by the algorithm
type
string, can be "sprite", "group", "parameters" or None
"""
def __init__(self, position=agglomerate.math.Vector2(),
size=agglomerate.math.Vector2()):
"""
Creates an item with the optional given position and size Vector2
"""
self.position = position
self.size = size
self.rotated = False
self.type = None
class Sprite(Item):
"""
Item that contains a Pillow image and it's metadata.
Sprites can be cropped analysing first the images.
**Fields**
image
PIL image
name
name string to be used when creating the coordinates file
rotated
true if the sprite was rotated 90 degrees clockwise by the algorithm
cropped
true if the sprite was cropped
position
Vector2 position in the sheet in pixels, top-left corner regardless of
rotation.
size
Vector2 in pixels of the sprite in the sheet, this is'nt the original
size if the sprite was cropped
original_size
Vector2 in pixels of the original size of the sprite
crop_l
Amount of pixels cropped in the left
crop_t
Amount of pixels cropped in the top
crop_r
Amount of pixels cropped in the right
crop_b
Amount of pixels cropped in the bottom
"""
def __init__(self, path):
"""
Opens the image in the specified file and processes it
:param str path: path to image file
"""
self.image = PIL.Image.open(path)
self.name = self.get_name_from_path(path)
self.rotated = False
self.cropped = False
self.position = None
self.size = agglomerate.math.Vector2.from_tuple(self.image.size)
self.original_size = self.size
self.crop_l = 0
self.crop_t = 0
self.crop_r = 0
self.crop_d = 0
self.type = "sprite"
def get_name_from_path(self, path):
"""
Generates a name from the file name
The name is the file name
:param str path: path to file
:return: file name
:rtype: str
"""
return os.path.basename(path)
class Group(Item):
"""
Has a list of items, a settings instance, and the inherited attributes
from Item.
Having a settings instance results in a duplicate size property:
settings.size and the size property inherited from Item. Both point to the
same Vector2 instance.
Example tree::
group
├─ items (list)
| ├─ group1
| | ├─ items (list)
| | └─ settings
| ├─ group2
| | ├─ items (list)
| | └─ settings
| ├─ sprite1
| ├─ sprite2
| └─ ...
|
└─ settings
"""
def __init__(self, items, settings):
self.items = items
self.settings = settings
self.position = None
self.rotated = False
self.type = "group"
@property
def size(self):
return self.settings.size
@size.setter
def size(self, value):
self.settings.size = value
class Parameters(Group):
"""
Contains everything that the packer needs to work, i.e. the sprites
organized in groups, and the settings for the sheet.
Has a items list and the sheet settings. This is like a group class but
with an extended settings (SheetSettings instead Settings) and a different
name. Also the type attribute is "parameters", "group" or None
Example tree::
parameters
├─ items (list)
| ├─ group1
| | ├─ items (list)
| | └─ settings
| ├─ group2
| | ├─ items (list)
| | └─ settings
| ├─ sprite1
| ├─ sprite2
| └─ ...
|
└─ settings
"""
def __init__(self, items, settings):
super().__init__(items, settings)
self.position = agglomerate.math.Vector2(0, 0)
self.size = settings.size
self.rotated = False
self.type = "parameters"
| 25.855615
| 80
| 0.591727
|
c764d9c0742aa0cc9406ba2c174804dd9d0484e8
| 3,385
|
py
|
Python
|
day2/feeder/base_microservices.py
|
liang-faan/SmartIOT-Diec
|
8336a4b558295295f10a82cf350d8b7ff3fb9f5c
|
[
"MIT"
] | 5
|
2019-05-30T02:55:16.000Z
|
2020-03-03T14:18:23.000Z
|
day2/feeder/base_microservices.py
|
liang-faan/SmartIOT-Diec
|
8336a4b558295295f10a82cf350d8b7ff3fb9f5c
|
[
"MIT"
] | 3
|
2019-12-27T00:53:23.000Z
|
2020-02-17T05:29:19.000Z
|
day2/feeder/base_microservices.py
|
liang-faan/SmartIOT-Diec
|
8336a4b558295295f10a82cf350d8b7ff3fb9f5c
|
[
"MIT"
] | 4
|
2019-06-04T06:26:14.000Z
|
2021-01-07T04:25:32.000Z
|
#
# Base Microservice
#
# Base microservice classes
# Abstract the communication protocol from derived classes
#
# Author: Lisa Ong, NUS/ISS
#
import argparse
import json
# References: https://www.eclipse.org/paho/clients/python/docs/
import paho.mqtt.client as mqtt
import paho.mqtt.publish as publish
def _on_connect(client, userdata, flags, rc):
userdata.on_connect()
def _on_message(client, userdata, msg):
try:
# JSON requires doublequotes instead of singlequotes
# decode converts byte array to str for JSON parser
payload = json.loads(msg.payload.replace(b"'", b'"').decode('utf-8'))
userdata.on_message(msg.topic, payload)
except Exception as e:
# exceptions tend to get swallowed up in callbacks
# print them here
print('Exception:', e)
class MqttMicroservice:
def __init__(self, channels):
# protocol-specific settings
self.client = mqtt.Client(userdata=self)
self.client.on_connect = _on_connect
self.client.on_message = _on_message
self.channels = channels
self.hostname = 'localhost'
self.port = 1883
def on_connect(self):
"""Called when the MQTT client is connected
"""
for channel in self.channels:
topic = self.topic_id + '/' + channel
self.client.subscribe(topic)
print('Subscribed to:' + topic)
def on_message(self, topic, payload):
"""Called when an MQTT client is received
"""
print(topic, payload)
def publish_message(self, channel, msg):
"""Publishes a message to an MQTT topic
"""
print('pub:', msg)
publish.single(self.topic_id + '/' + channel,
payload=json.dumps(msg), retain=False,
hostname=self.hostname, port=self.port,
protocol=mqtt.MQTTv311)
def connect(self, loop_forever=False):
"""Connect to the MQTT broker
"""
self.client.connect(self.hostname, self.port)
# https://www.eclipse.org/paho/clients/python/docs/#network-loop
if loop_forever:
self.client.loop_forever()
else:
self.client.loop_start()
def disconnect(self):
"""Disconnect from the MQTT broker
"""
self.client.disconnect()
def run(self):
"""Called to run the service
"""
try:
self.connect(loop_forever=True)
finally:
self.disconnect() # cleanly disconnect
def parse_args(self, description):
parser = argparse.ArgumentParser(description=description)
parser.add_argument('topic_id', type=str,
help='Top level topic identifier, e.g. /dev/ttyACM0 or COM4')
parser.add_argument('--hostname', type=str, default='localhost',
help='MQTT broker hostname, defaults to TCP localhost')
parser.add_argument('--port', type=int, default=1883, help='MQTT broker port, defaults to 1883')
args = parser.parse_args()
self.topic_id = args.topic_id
if args.hostname is not None:
self.hostname = args.hostname
if args.port is not None:
self.port = args.port
if __name__ == '__main__':
# for testing purposes only
service = MqttMicroservice(['stream'])
service.parse_args('MQTT Microservice')
service.run()
| 30.495495
| 104
| 0.626883
|
b8d0b4cf63d8d66bf068eedf00a474b9d6c6d771
| 5,063
|
py
|
Python
|
src/radical/cm/utils/calculator/api/resource.py
|
iparask/campaign_manager
|
7d7f3934fdde6acadc7581825697c89009fcf6fd
|
[
"MIT"
] | 1
|
2019-12-18T16:46:35.000Z
|
2019-12-18T16:46:35.000Z
|
src/radical/cm/utils/calculator/api/resource.py
|
radical-project/campaign_manager
|
7d7f3934fdde6acadc7581825697c89009fcf6fd
|
[
"MIT"
] | 22
|
2019-12-12T16:31:36.000Z
|
2020-12-14T22:45:40.000Z
|
src/radical/cm/utils/calculator/api/resource.py
|
iparask/campaign_manager
|
7d7f3934fdde6acadc7581825697c89009fcf6fd
|
[
"MIT"
] | null | null | null |
import radical.utils as ru
import numpy as np
from ..entities.core import Core
from ..exceptions import CalcTypeError
class Resource(object):
def __init__(self, num_cores=1, perf_dist='uniform',
dist_mean=10, temporal_var=0, spatial_var=0,
no_uid=False):
# Initialize
self._uid = None
self._dist_options = ['uniform', 'normal']
self._core_list = list()
if not isinstance(num_cores, int):
raise CalcTypeError(expected_type=int,
actual_type=type(num_cores),
entity='num_cores'
)
if not isinstance(perf_dist, str):
raise CalcTypeError(expected_type=str,
actual_type=type(num_cores),
entity='perf_dist'
)
if perf_dist not in self._dist_options:
raise ValueError("possible distributions are %s" %
(','.join(self._dist_options)))
if not (isinstance(dist_mean, int) or isinstance(dist_mean, float)):
raise CalcTypeError(expected_type='int or float',
actual_type=type(dist_mean),
entity='dist_mean'
)
if not (isinstance(temporal_var, int) or isinstance(temporal_var, float)):
raise CalcTypeError(expected_type='int or float',
actual_type=type(temporal_var),
entity='temporal_var'
)
if not (isinstance(spatial_var, int) or isinstance(spatial_var, float)):
raise CalcTypeError(expected_type='int or float',
actual_type=type(spatial_var),
entity='spatial_var'
)
self._num_cores = num_cores
self._perf_dist = perf_dist
self._dist_mean = dist_mean
self._temp_var = temporal_var
self._spat_var = spatial_var
if not no_uid:
self._uid = ru.generate_id('resource')
@property
def uid(self):
return self._uid
@property
def num_cores(self):
return self._num_cores
@property
def core_list(self):
return self._core_list
def create_core_list(self):
# Select N samples from the selected distribution. Currently the code
# creates a set of samples based on the temporal variance on the temporal
# variance. These samples are then used as means to get the set of cores
# used for the emulation.
# FIXME: Based on spatial mean, a spatial variance and a distribution, a
# set of resources is created. If these resources are dynamic and their
# performance varies over time, then each resource's performance needs
# to be updated based on a distirution and on different moments in time.
if self._perf_dist == 'uniform':
samples = list(np.random.uniform(low=self._dist_mean - self._spat_var,
high=self._dist_mean + self._spat_var,
size=self._num_cores))
elif self._perf_dist == 'normal':
samples = list(np.random.normal(self._dist_mean, self._spat_var,
self._num_cores))
# Create N execution units with the selected samples
# some sample in the non uniform distribution might be negative. Those
# samples should be discarded or folded around 0?
if not self._core_list:
self._core_list = [Core(abs(samples[i]), distribution=self._perf_dist,
var=self._temp_var)
for i in range(self._num_cores)]
elif self._temp_var:
for ind, core in enumerate(self._core_list):
core.perf = abs(samples[ind])
def to_dict(self):
core_list_as_dict = list()
for core in self._core_list:
core_list_as_dict.append(core.to_dict())
return {
'uid': self._uid,
'num_cores': self._num_cores,
'perf_dist': self._perf_dist,
'dist_mean': self._dist_mean,
'temp_var': self._temp_var,
'spat_var': self._spat_var,
'core_list': core_list_as_dict
}
def from_dict(self, entry):
self._uid = entry['uid']
self._num_cores = entry['num_cores']
self._perf_dist = entry['perf_dist']
self._dist_mean = entry['dist_mean']
self._temp_var = entry['temp_var']
self._spat_var = entry['spat_var']
for core in entry['core_list']:
c = Core(no_uid=True)
c.from_dict(core)
self._core_list.append(c)
def reset(self):
for n in self._core_list:
n.task = None
n.util = list()
| 36.956204
| 82
| 0.551649
|
7923ea3139e767982e4925638b43a8fb12721259
| 2,625
|
py
|
Python
|
ligh/posts/routes.py
|
madhavan-raja/ligh
|
d97cf32975a0709cda1de1e1c8cd7478c62932a2
|
[
"MIT"
] | null | null | null |
ligh/posts/routes.py
|
madhavan-raja/ligh
|
d97cf32975a0709cda1de1e1c8cd7478c62932a2
|
[
"MIT"
] | null | null | null |
ligh/posts/routes.py
|
madhavan-raja/ligh
|
d97cf32975a0709cda1de1e1c8cd7478c62932a2
|
[
"MIT"
] | null | null | null |
from flask import render_template, url_for, flash, redirect, request, abort, Blueprint
from flask_login import current_user, login_required
from ligh import db
from ligh.models import Post
from ligh.posts.forms import PostForm
from ligh.posts.utils import save_post_picture
posts = Blueprint('posts', __name__)
@posts.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
form = PostForm()
if form.validate_on_submit():
if form.use_default_post_picture.data:
post_picture_filename = "default.jpg"
elif form.post_picture.data:
post_picture_filename = save_post_picture(form.post_picture.data)
else:
post_picture_filename = "default.jpg"
post = Post(title=form.title.data, subtitle=form.subtitle.data, post_picture=post_picture_filename, content=form.content.data, author=current_user)
db.session.add(post)
db.session.commit()
flash('New post created', 'success')
return redirect(url_for('main.home'))
return render_template('create_post.html', title='New Post', form=form)
@posts.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
return render_template('post.html', title=post.title, post=post)
@posts.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.subtitle = form.subtitle.data
if form.use_default_post_picture.data:
post.post_picture = "default.jpg"
elif form.post_picture.data:
post_picture_filename = save_post_picture(form.post_picture.data)
post.post_picture = post_picture_filename
post.content = form.content.data
db.session.commit()
flash('Post updated', 'success')
return redirect(url_for('posts.post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.subtitle.data = post.subtitle
form.content.data = post.content
return render_template('create_post.html', title='Update Post', form=form)
@posts.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Post deleted', 'success')
return redirect(url_for('main.home'))
| 36.458333
| 155
| 0.688762
|
46246294620371b6f04ad1e1e7b4dea8883a7420
| 564
|
py
|
Python
|
Section 13 - Leitura e Escrita em Arquivos/exercicios/ex10.py
|
thiagofreitascarneiro/Python-avancado-Geek-University
|
861b742ad6b30955fcbe63274b8cf8afc6ca028f
|
[
"MIT"
] | null | null | null |
Section 13 - Leitura e Escrita em Arquivos/exercicios/ex10.py
|
thiagofreitascarneiro/Python-avancado-Geek-University
|
861b742ad6b30955fcbe63274b8cf8afc6ca028f
|
[
"MIT"
] | null | null | null |
Section 13 - Leitura e Escrita em Arquivos/exercicios/ex10.py
|
thiagofreitascarneiro/Python-avancado-Geek-University
|
861b742ad6b30955fcbe63274b8cf8afc6ca028f
|
[
"MIT"
] | null | null | null |
''' Faça um programa que receba o nome de um arquivo de entrada e outro de saída. O arquivo de entrada contém cada
linha o nome de uma cidades (ocupando 40 caracteres) e o seu número de habitantes. O programa deverá ler o arquivo
de entrada e gerar um arquivo de saída onde aparece o nome da cidade mais populosa segida pelo seu número de
habitantes
'''
arquivo = input('Digite o nome do arquivo: ')
#with open(f'{arquivo}', 'r') as arq:
#with open(f'{arquivo}.populosa', 'w') as arq2:
#cidade = arq.read()
#for valor in cidade:
| 43.384615
| 118
| 0.693262
|
c66cdccf67aa521551a6ae284c1bd99e35a80a63
| 207
|
py
|
Python
|
problemsets/Codeforces/Python/A1042.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
problemsets/Codeforces/Python/A1042.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
problemsets/Codeforces/Python/A1042.py
|
juarezpaulino/coderemite
|
a4649d3f3a89d234457032d14a6646b3af339ac1
|
[
"Apache-2.0"
] | null | null | null |
"""
*
* Author: Juarez Paulino(coderemite)
* Email: juarez.paulino@gmail.com
*
"""
i=input
n=int(i())
m=int(i())
a=[int(i())for _ in[0]*n]
v=max(a)
u=sum(v-x for x in a)
print(max(0,0--(m-u)//n)+v,m+v)
| 15.923077
| 38
| 0.570048
|
3bc59157055ce4bc4ad6f7850425e20734278eeb
| 172,739
|
py
|
Python
|
pandas/core/groupby/groupby.py
|
deflatSOCO/pandas
|
415012f4f38ca0cf41717c51e49bd2349cba09a8
|
[
"BSD-3-Clause"
] | 1
|
2021-05-12T21:50:27.000Z
|
2021-05-12T21:50:27.000Z
|
pandas/core/groupby/groupby.py
|
fillipe-gsm/pandas
|
72252e1b87a0a06fd1bec9c7c7548bae33d7b738
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | null | null | null |
pandas/core/groupby/groupby.py
|
fillipe-gsm/pandas
|
72252e1b87a0a06fd1bec9c7c7548bae33d7b738
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 2
|
2018-07-21T08:09:14.000Z
|
2021-08-07T09:10:54.000Z
|
import types
from functools import wraps, partial
import numpy as np
import datetime
import collections
import warnings
import copy
from textwrap import dedent
from contextlib import contextmanager
from pandas.compat import (
zip, range, lzip,
callable, map
)
from pandas import compat
from pandas.compat.numpy import function as nv
from pandas.compat import set_function_name
from pandas.core.dtypes.common import (
is_numeric_dtype,
is_timedelta64_dtype, is_datetime64_dtype,
is_categorical_dtype,
is_interval_dtype,
is_datetimelike,
is_datetime64_any_dtype,
is_bool, is_integer_dtype,
is_complex_dtype,
is_bool_dtype,
is_scalar,
is_list_like,
is_hashable,
needs_i8_conversion,
_ensure_float64,
_ensure_platform_int,
_ensure_int64,
_ensure_object,
_ensure_categorical,
_ensure_float)
from pandas.core.dtypes.cast import maybe_downcast_to_dtype
from pandas.core.dtypes.generic import ABCSeries
from pandas.core.dtypes.missing import isna, isnull, notna, _maybe_fill
from pandas.core.base import (PandasObject, SelectionMixin, GroupByError,
DataError, SpecificationError)
from pandas.core.index import (Index, MultiIndex,
CategoricalIndex, _ensure_index)
from pandas.core.arrays import ExtensionArray, Categorical
from pandas.core.frame import DataFrame
from pandas.core.generic import NDFrame, _shared_docs
from pandas.core.internals import BlockManager, make_block
from pandas.core.series import Series
from pandas.core.panel import Panel
from pandas.core.sorting import (get_group_index_sorter, get_group_index,
compress_group_index, get_flattened_iterator,
decons_obs_group_ids, get_indexer_dict)
from pandas.util._decorators import (cache_readonly, Substitution,
Appender, make_signature)
from pandas.io.formats.printing import pprint_thing
from pandas.util._validators import validate_kwargs
import pandas.core.common as com
import pandas.core.algorithms as algorithms
from pandas.core.config import option_context
from pandas.plotting._core import boxplot_frame_groupby
from pandas._libs import (lib, reduction,
groupby as libgroupby,
Timestamp, NaT, iNaT)
from pandas._libs.lib import count_level_2d
_doc_template = """
See also
--------
pandas.Series.%(name)s
pandas.DataFrame.%(name)s
pandas.Panel.%(name)s
"""
_apply_docs = dict(
template="""
Apply function ``func`` group-wise and combine the results together.
The function passed to ``apply`` must take a {input} as its first
argument and return a dataframe, a series or a scalar. ``apply`` will
then take care of combining the results back together into a single
dataframe or series. ``apply`` is therefore a highly flexible
grouping method.
While ``apply`` is a very flexible method, its downside is that
using it can be quite a bit slower than using more specific methods.
Pandas offers a wide range of method that will be much faster
than using ``apply`` for their specific purposes, so try to use them
before reaching for ``apply``.
Parameters
----------
func : function
A callable that takes a {input} as its first argument, and
returns a dataframe, a series or a scalar. In addition the
callable may take positional and keyword arguments
args, kwargs : tuple and dict
Optional positional and keyword arguments to pass to ``func``
Returns
-------
applied : Series or DataFrame
Notes
-----
In the current implementation ``apply`` calls func twice on the
first group to decide whether it can take a fast or slow code
path. This can lead to unexpected behavior if func has
side-effects, as they will take effect twice for the first
group.
Examples
--------
{examples}
See also
--------
pipe : Apply function to the full GroupBy object instead of to each
group.
aggregate, transform
""",
dataframe_examples="""
>>> df = pd.DataFrame({'A': 'a a b'.split(), 'B': [1,2,3], 'C': [4,6, 5]})
>>> g = df.groupby('A')
From ``df`` above we can see that ``g`` has two groups, ``a``, ``b``.
Calling ``apply`` in various ways, we can get different grouping results:
Example 1: below the function passed to ``apply`` takes a dataframe as
its argument and returns a dataframe. ``apply`` combines the result for
each group together into a new dataframe:
>>> g.apply(lambda x: x / x.sum())
B C
0 0.333333 0.4
1 0.666667 0.6
2 1.000000 1.0
Example 2: The function passed to ``apply`` takes a dataframe as
its argument and returns a series. ``apply`` combines the result for
each group together into a new dataframe:
>>> g.apply(lambda x: x.max() - x.min())
B C
A
a 1 2
b 0 0
Example 3: The function passed to ``apply`` takes a dataframe as
its argument and returns a scalar. ``apply`` combines the result for
each group together into a series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.C.max() - x.B.min())
A
a 5
b 2
dtype: int64
""",
series_examples="""
>>> ser = pd.Series([0, 1, 2], index='a a b'.split())
>>> g = ser.groupby(ser.index)
From ``ser`` above we can see that ``g`` has two groups, ``a``, ``b``.
Calling ``apply`` in various ways, we can get different grouping results:
Example 1: The function passed to ``apply`` takes a series as
its argument and returns a series. ``apply`` combines the result for
each group together into a new series:
>>> g.apply(lambda x: x*2 if x.name == 'b' else x/2)
0 0.0
1 0.5
2 4.0
dtype: float64
Example 2: The function passed to ``apply`` takes a series as
its argument and returns a scalar. ``apply`` combines the result for
each group together into a series, including setting the index as
appropriate:
>>> g.apply(lambda x: x.max() - x.min())
a 1
b 0
dtype: int64
""")
_pipe_template = """\
Apply a function ``func`` with arguments to this %(klass)s object and return
the function's result.
%(versionadded)s
Use ``.pipe`` when you want to improve readability by chaining together
functions that expect Series, DataFrames, GroupBy or Resampler objects.
Instead of writing
>>> h(g(f(df.groupby('group')), arg1=a), arg2=b, arg3=c)
You can write
>>> (df.groupby('group')
... .pipe(f)
... .pipe(g, arg1=a)
... .pipe(h, arg2=b, arg3=c))
which is much more readable.
Parameters
----------
func : callable or tuple of (callable, string)
Function to apply to this %(klass)s object or, alternatively,
a ``(callable, data_keyword)`` tuple where ``data_keyword`` is a
string indicating the keyword of ``callable`` that expects the
%(klass)s object.
args : iterable, optional
positional arguments passed into ``func``.
kwargs : dict, optional
a dictionary of keyword arguments passed into ``func``.
Returns
-------
object : the return type of ``func``.
Notes
-----
See more `here
<http://pandas.pydata.org/pandas-docs/stable/groupby.html#piping-function-calls>`_
Examples
--------
%(examples)s
See Also
--------
pandas.Series.pipe : Apply a function with arguments to a series
pandas.DataFrame.pipe: Apply a function with arguments to a dataframe
apply : Apply function to each group instead of to the
full %(klass)s object.
"""
_transform_template = """
Call function producing a like-indexed %(klass)s on each group and
return a %(klass)s having the same indexes as the original object
filled with the transformed values
Parameters
----------
f : function
Function to apply to each group
Notes
-----
Each group is endowed the attribute 'name' in case you need to know
which group you are working on.
The current implementation imposes three requirements on f:
* f must return a value that either has the same shape as the input
subframe or can be broadcast to the shape of the input subframe.
For example, f returns a scalar it will be broadcast to have the
same shape as the input subframe.
* if this is a DataFrame, f must support application column-by-column
in the subframe. If f also supports application to the entire subframe,
then a fast path is used starting from the second chunk.
* f must not mutate groups. Mutation is not supported and may
produce unexpected results.
Returns
-------
%(klass)s
See also
--------
aggregate, transform
Examples
--------
# Same shape
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : ['one', 'one', 'two', 'three',
... 'two', 'two'],
... 'C' : [1, 5, 5, 2, 5, 5],
... 'D' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.transform(lambda x: (x - x.mean()) / x.std())
C D
0 -1.154701 -0.577350
1 0.577350 0.000000
2 0.577350 1.154701
3 -1.154701 -1.000000
4 0.577350 -0.577350
5 0.577350 1.000000
# Broadcastable
>>> grouped.transform(lambda x: x.max() - x.min())
C D
0 4 6.0
1 3 8.0
2 4 6.0
3 3 8.0
4 4 6.0
5 3 8.0
"""
# special case to prevent duplicate plots when catching exceptions when
# forwarding methods from NDFrames
_plotting_methods = frozenset(['plot', 'boxplot', 'hist'])
_common_apply_whitelist = frozenset([
'last', 'first',
'head', 'tail', 'median',
'mean', 'sum', 'min', 'max',
'cumcount', 'ngroup',
'resample',
'rank', 'quantile',
'fillna',
'mad',
'any', 'all',
'take',
'idxmax', 'idxmin',
'shift', 'tshift',
'ffill', 'bfill',
'pct_change', 'skew',
'corr', 'cov', 'diff',
]) | _plotting_methods
_series_apply_whitelist = ((_common_apply_whitelist |
{'nlargest', 'nsmallest',
'is_monotonic_increasing',
'is_monotonic_decreasing'}) -
{'boxplot'}) | frozenset(['dtype', 'unique'])
_dataframe_apply_whitelist = ((_common_apply_whitelist |
frozenset(['dtypes', 'corrwith'])) -
{'boxplot'})
_cython_transforms = frozenset(['cumprod', 'cumsum', 'shift',
'cummin', 'cummax'])
_cython_cast_blacklist = frozenset(['rank', 'count', 'size'])
class Grouper(object):
"""
A Grouper allows the user to specify a groupby instruction for a target
object
This specification will select a column via the key parameter, or if the
level and/or axis parameters are given, a level of the index of the target
object.
These are local specifications and will override 'global' settings,
that is the parameters axis and level which are passed to the groupby
itself.
Parameters
----------
key : string, defaults to None
groupby key, which selects the grouping column of the target
level : name/number, defaults to None
the level for the target index
freq : string / frequency object, defaults to None
This will groupby the specified frequency if the target selection
(via key or level) is a datetime-like object. For full specification
of available frequencies, please see `here
<http://pandas.pydata.org/pandas-docs/stable/timeseries.html#offset-aliases>`_.
axis : number/name of the axis, defaults to 0
sort : boolean, default to False
whether to sort the resulting labels
additional kwargs to control time-like groupers (when ``freq`` is passed)
closed : closed end of interval; 'left' or 'right'
label : interval boundary to use for labeling; 'left' or 'right'
convention : {'start', 'end', 'e', 's'}
If grouper is PeriodIndex
base, loffset
Returns
-------
A specification for a groupby instruction
Examples
--------
Syntactic sugar for ``df.groupby('A')``
>>> df.groupby(Grouper(key='A'))
Specify a resample operation on the column 'date'
>>> df.groupby(Grouper(key='date', freq='60s'))
Specify a resample operation on the level 'date' on the columns axis
with a frequency of 60s
>>> df.groupby(Grouper(level='date', freq='60s', axis=1))
"""
_attributes = ('key', 'level', 'freq', 'axis', 'sort')
def __new__(cls, *args, **kwargs):
if kwargs.get('freq') is not None:
from pandas.core.resample import TimeGrouper
cls = TimeGrouper
return super(Grouper, cls).__new__(cls)
def __init__(self, key=None, level=None, freq=None, axis=0, sort=False):
self.key = key
self.level = level
self.freq = freq
self.axis = axis
self.sort = sort
self.grouper = None
self.obj = None
self.indexer = None
self.binner = None
self._grouper = None
@property
def ax(self):
return self.grouper
def _get_grouper(self, obj, validate=True):
"""
Parameters
----------
obj : the subject object
validate : boolean, default True
if True, validate the grouper
Returns
-------
a tuple of binner, grouper, obj (possibly sorted)
"""
self._set_grouper(obj)
self.grouper, exclusions, self.obj = _get_grouper(self.obj, [self.key],
axis=self.axis,
level=self.level,
sort=self.sort,
validate=validate)
return self.binner, self.grouper, self.obj
def _set_grouper(self, obj, sort=False):
"""
given an object and the specifications, setup the internal grouper
for this particular specification
Parameters
----------
obj : the subject object
sort : bool, default False
whether the resulting grouper should be sorted
"""
if self.key is not None and self.level is not None:
raise ValueError(
"The Grouper cannot specify both a key and a level!")
# Keep self.grouper value before overriding
if self._grouper is None:
self._grouper = self.grouper
# the key must be a valid info item
if self.key is not None:
key = self.key
# The 'on' is already defined
if getattr(self.grouper, 'name', None) == key and \
isinstance(obj, ABCSeries):
ax = self._grouper.take(obj.index)
else:
if key not in obj._info_axis:
raise KeyError(
"The grouper name {0} is not found".format(key))
ax = Index(obj[key], name=key)
else:
ax = obj._get_axis(self.axis)
if self.level is not None:
level = self.level
# if a level is given it must be a mi level or
# equivalent to the axis name
if isinstance(ax, MultiIndex):
level = ax._get_level_number(level)
ax = Index(ax._get_level_values(level),
name=ax.names[level])
else:
if level not in (0, ax.name):
raise ValueError(
"The level {0} is not valid".format(level))
# possibly sort
if (self.sort or sort) and not ax.is_monotonic:
# use stable sort to support first, last, nth
indexer = self.indexer = ax.argsort(kind='mergesort')
ax = ax.take(indexer)
obj = obj._take(indexer, axis=self.axis, is_copy=False)
self.obj = obj
self.grouper = ax
return self.grouper
@property
def groups(self):
return self.grouper.groups
def __repr__(self):
attrs_list = ["{}={!r}".format(attr_name, getattr(self, attr_name))
for attr_name in self._attributes
if getattr(self, attr_name) is not None]
attrs = ", ".join(attrs_list)
cls_name = self.__class__.__name__
return "{}({})".format(cls_name, attrs)
class GroupByPlot(PandasObject):
"""
Class implementing the .plot attribute for groupby objects
"""
def __init__(self, groupby):
self._groupby = groupby
def __call__(self, *args, **kwargs):
def f(self):
return self.plot(*args, **kwargs)
f.__name__ = 'plot'
return self._groupby.apply(f)
def __getattr__(self, name):
def attr(*args, **kwargs):
def f(self):
return getattr(self.plot, name)(*args, **kwargs)
return self._groupby.apply(f)
return attr
@contextmanager
def _group_selection_context(groupby):
"""
set / reset the _group_selection_context
"""
groupby._set_group_selection()
yield groupby
groupby._reset_group_selection()
class _GroupBy(PandasObject, SelectionMixin):
_group_selection = None
_apply_whitelist = frozenset([])
def __init__(self, obj, keys=None, axis=0, level=None,
grouper=None, exclusions=None, selection=None, as_index=True,
sort=True, group_keys=True, squeeze=False,
observed=False, **kwargs):
self._selection = selection
if isinstance(obj, NDFrame):
obj._consolidate_inplace()
self.level = level
if not as_index:
if not isinstance(obj, DataFrame):
raise TypeError('as_index=False only valid with DataFrame')
if axis != 0:
raise ValueError('as_index=False only valid for axis=0')
self.as_index = as_index
self.keys = keys
self.sort = sort
self.group_keys = group_keys
self.squeeze = squeeze
self.observed = observed
self.mutated = kwargs.pop('mutated', False)
if grouper is None:
grouper, exclusions, obj = _get_grouper(obj, keys,
axis=axis,
level=level,
sort=sort,
observed=observed,
mutated=self.mutated)
self.obj = obj
self.axis = obj._get_axis_number(axis)
self.grouper = grouper
self.exclusions = set(exclusions) if exclusions else set()
# we accept no other args
validate_kwargs('group', kwargs, {})
def __len__(self):
return len(self.groups)
def __unicode__(self):
# TODO: Better unicode/repr for GroupBy object
return object.__repr__(self)
def _assure_grouper(self):
"""
we create the grouper on instantiation
sub-classes may have a different policy
"""
pass
@property
def groups(self):
""" dict {group name -> group labels} """
self._assure_grouper()
return self.grouper.groups
@property
def ngroups(self):
self._assure_grouper()
return self.grouper.ngroups
@property
def indices(self):
""" dict {group name -> group indices} """
self._assure_grouper()
return self.grouper.indices
def _get_indices(self, names):
"""
safe get multiple indices, translate keys for
datelike to underlying repr
"""
def get_converter(s):
# possibly convert to the actual key types
# in the indices, could be a Timestamp or a np.datetime64
if isinstance(s, (Timestamp, datetime.datetime)):
return lambda key: Timestamp(key)
elif isinstance(s, np.datetime64):
return lambda key: Timestamp(key).asm8
else:
return lambda key: key
if len(names) == 0:
return []
if len(self.indices) > 0:
index_sample = next(iter(self.indices))
else:
index_sample = None # Dummy sample
name_sample = names[0]
if isinstance(index_sample, tuple):
if not isinstance(name_sample, tuple):
msg = ("must supply a tuple to get_group with multiple"
" grouping keys")
raise ValueError(msg)
if not len(name_sample) == len(index_sample):
try:
# If the original grouper was a tuple
return [self.indices[name] for name in names]
except KeyError:
# turns out it wasn't a tuple
msg = ("must supply a a same-length tuple to get_group"
" with multiple grouping keys")
raise ValueError(msg)
converters = [get_converter(s) for s in index_sample]
names = [tuple(f(n) for f, n in zip(converters, name))
for name in names]
else:
converter = get_converter(index_sample)
names = [converter(name) for name in names]
return [self.indices.get(name, []) for name in names]
def _get_index(self, name):
""" safe get index, translate keys for datelike to underlying repr """
return self._get_indices([name])[0]
@cache_readonly
def _selected_obj(self):
if self._selection is None or isinstance(self.obj, Series):
if self._group_selection is not None:
return self.obj[self._group_selection]
return self.obj
else:
return self.obj[self._selection]
def _reset_group_selection(self):
"""
Clear group based selection. Used for methods needing to return info on
each group regardless of whether a group selection was previously set.
"""
if self._group_selection is not None:
# GH12839 clear cached selection too when changing group selection
self._group_selection = None
self._reset_cache('_selected_obj')
def _set_group_selection(self):
"""
Create group based selection. Used when selection is not passed
directly but instead via a grouper.
NOTE: this should be paired with a call to _reset_group_selection
"""
grp = self.grouper
if not (self.as_index and
getattr(grp, 'groupings', None) is not None and
self.obj.ndim > 1 and
self._group_selection is None):
return
ax = self.obj._info_axis
groupers = [g.name for g in grp.groupings
if g.level is None and g.in_axis]
if len(groupers):
# GH12839 clear selected obj cache when group selection changes
self._group_selection = ax.difference(Index(groupers)).tolist()
self._reset_cache('_selected_obj')
def _set_result_index_ordered(self, result):
# set the result index on the passed values object and
# return the new object, xref 8046
# the values/counts are repeated according to the group index
# shortcut if we have an already ordered grouper
if not self.grouper.is_monotonic:
index = Index(np.concatenate(
self._get_indices(self.grouper.result_index)))
result.set_axis(index, axis=self.axis, inplace=True)
result = result.sort_index(axis=self.axis)
result.set_axis(self.obj._get_axis(self.axis), axis=self.axis,
inplace=True)
return result
def _dir_additions(self):
return self.obj._dir_additions() | self._apply_whitelist
def __getattr__(self, attr):
if attr in self._internal_names_set:
return object.__getattribute__(self, attr)
if attr in self.obj:
return self[attr]
if hasattr(self.obj, attr):
return self._make_wrapper(attr)
raise AttributeError("%r object has no attribute %r" %
(type(self).__name__, attr))
@Substitution(klass='GroupBy',
versionadded='.. versionadded:: 0.21.0',
examples="""\
>>> df = pd.DataFrame({'A': 'a b a b'.split(), 'B': [1, 2, 3, 4]})
>>> df
A B
0 a 1
1 b 2
2 a 3
3 b 4
To get the difference between each groups maximum and minimum value in one
pass, you can do
>>> df.groupby('A').pipe(lambda x: x.max() - x.min())
B
A
a 2
b 2""")
@Appender(_pipe_template)
def pipe(self, func, *args, **kwargs):
return com._pipe(self, func, *args, **kwargs)
plot = property(GroupByPlot)
def _make_wrapper(self, name):
if name not in self._apply_whitelist:
is_callable = callable(getattr(self._selected_obj, name, None))
kind = ' callable ' if is_callable else ' '
msg = ("Cannot access{0}attribute {1!r} of {2!r} objects, try "
"using the 'apply' method".format(kind, name,
type(self).__name__))
raise AttributeError(msg)
self._set_group_selection()
# need to setup the selection
# as are not passed directly but in the grouper
f = getattr(self._selected_obj, name)
if not isinstance(f, types.MethodType):
return self.apply(lambda self: getattr(self, name))
f = getattr(type(self._selected_obj), name)
def wrapper(*args, **kwargs):
# a little trickery for aggregation functions that need an axis
# argument
kwargs_with_axis = kwargs.copy()
if 'axis' not in kwargs_with_axis or \
kwargs_with_axis['axis'] is None:
kwargs_with_axis['axis'] = self.axis
def curried_with_axis(x):
return f(x, *args, **kwargs_with_axis)
def curried(x):
return f(x, *args, **kwargs)
# preserve the name so we can detect it when calling plot methods,
# to avoid duplicates
curried.__name__ = curried_with_axis.__name__ = name
# special case otherwise extra plots are created when catching the
# exception below
if name in _plotting_methods:
return self.apply(curried)
try:
return self.apply(curried_with_axis)
except Exception:
try:
return self.apply(curried)
except Exception:
# related to : GH3688
# try item-by-item
# this can be called recursively, so need to raise
# ValueError
# if we don't have this method to indicated to aggregate to
# mark this column as an error
try:
return self._aggregate_item_by_item(name,
*args, **kwargs)
except (AttributeError):
raise ValueError
return wrapper
def get_group(self, name, obj=None):
"""
Constructs NDFrame from group with provided name
Parameters
----------
name : object
the name of the group to get as a DataFrame
obj : NDFrame, default None
the NDFrame to take the DataFrame out of. If
it is None, the object groupby was called on will
be used
Returns
-------
group : type of obj
"""
if obj is None:
obj = self._selected_obj
inds = self._get_index(name)
if not len(inds):
raise KeyError(name)
return obj._take(inds, axis=self.axis)
def __iter__(self):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
return self.grouper.get_iterator(self.obj, axis=self.axis)
@Appender(_apply_docs['template']
.format(input="dataframe",
examples=_apply_docs['dataframe_examples']))
def apply(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
# this is needed so we don't try and wrap strings. If we could
# resolve functions to their callable functions prior, this
# wouldn't be needed
if args or kwargs:
if callable(func):
@wraps(func)
def f(g):
with np.errstate(all='ignore'):
return func(g, *args, **kwargs)
else:
raise ValueError('func must be a callable if args or '
'kwargs are supplied')
else:
f = func
# ignore SettingWithCopy here in case the user mutates
with option_context('mode.chained_assignment', None):
try:
result = self._python_apply_general(f)
except Exception:
# gh-20949
# try again, with .apply acting as a filtering
# operation, by excluding the grouping column
# This would normally not be triggered
# except if the udf is trying an operation that
# fails on *some* columns, e.g. a numeric operation
# on a string grouper column
with _group_selection_context(self):
return self._python_apply_general(f)
return result
def _python_apply_general(self, f):
keys, values, mutated = self.grouper.apply(f, self._selected_obj,
self.axis)
return self._wrap_applied_output(
keys,
values,
not_indexed_same=mutated or self.mutated)
def _iterate_slices(self):
yield self._selection_name, self._selected_obj
def transform(self, func, *args, **kwargs):
raise com.AbstractMethodError(self)
def _cumcount_array(self, ascending=True):
"""
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Notes
-----
this is currently implementing sort=False
(though the default is sort=True) for groupby in general
"""
ids, _, ngroups = self.grouper.group_info
sorter = get_group_index_sorter(ids, ngroups)
ids, count = ids[sorter], len(ids)
if count == 0:
return np.empty(0, dtype=np.int64)
run = np.r_[True, ids[:-1] != ids[1:]]
rep = np.diff(np.r_[np.nonzero(run)[0], count])
out = (~run).cumsum()
if ascending:
out -= np.repeat(out[run], rep)
else:
out = np.repeat(out[np.r_[run[1:], True]], rep) - out
rev = np.empty(count, dtype=np.intp)
rev[sorter] = np.arange(count, dtype=np.intp)
return out[rev].astype(np.int64, copy=False)
def _try_cast(self, result, obj, numeric_only=False):
"""
try to cast the result to our obj original type,
we may have roundtripped thru object in the mean-time
if numeric_only is True, then only try to cast numerics
and not datetimelikes
"""
if obj.ndim > 1:
dtype = obj.values.dtype
else:
dtype = obj.dtype
if not is_scalar(result):
if numeric_only and is_numeric_dtype(dtype) or not numeric_only:
result = maybe_downcast_to_dtype(result, dtype)
return result
def _transform_should_cast(self, func_nm):
"""
Parameters:
-----------
func_nm: str
The name of the aggregation function being performed
Returns:
--------
bool
Whether transform should attempt to cast the result of aggregation
"""
return (self.size().fillna(0) > 0).any() and (func_nm not in
_cython_cast_blacklist)
def _cython_transform(self, how, numeric_only=True, **kwargs):
output = collections.OrderedDict()
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.transform(obj.values, how,
**kwargs)
except NotImplementedError:
continue
except AssertionError as e:
raise GroupByError(str(e))
if self._transform_should_cast(how):
output[name] = self._try_cast(result, obj)
else:
output[name] = result
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_transformed_output(output, names)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
output = {}
for name, obj in self._iterate_slices():
is_numeric = is_numeric_dtype(obj.dtype)
if numeric_only and not is_numeric:
continue
try:
result, names = self.grouper.aggregate(obj.values, how,
min_count=min_count)
except AssertionError as e:
raise GroupByError(str(e))
output[name] = self._try_cast(result, obj)
if len(output) == 0:
raise DataError('No numeric types to aggregate')
return self._wrap_aggregated_output(output, names)
def _python_agg_general(self, func, *args, **kwargs):
func = self._is_builtin_func(func)
f = lambda x: func(x, *args, **kwargs)
# iterate through "columns" ex exclusions to populate output dict
output = {}
for name, obj in self._iterate_slices():
try:
result, counts = self.grouper.agg_series(obj, f)
output[name] = self._try_cast(result, obj, numeric_only=True)
except TypeError:
continue
if len(output) == 0:
return self._python_apply_general(f)
if self.grouper._filter_empty_groups:
mask = counts.ravel() > 0
for name, result in compat.iteritems(output):
# since we are masking, make sure that we have a float object
values = result
if is_numeric_dtype(values.dtype):
values = _ensure_float(values)
output[name] = self._try_cast(values[mask], result)
return self._wrap_aggregated_output(output)
def _wrap_applied_output(self, *args, **kwargs):
raise com.AbstractMethodError(self)
def _concat_objects(self, keys, values, not_indexed_same=False):
from pandas.core.reshape.concat import concat
def reset_identity(values):
# reset the identities of the components
# of the values to prevent aliasing
for v in com._not_none(*values):
ax = v._get_axis(self.axis)
ax._reset_identity()
return values
if not not_indexed_same:
result = concat(values, axis=self.axis)
ax = self._selected_obj._get_axis(self.axis)
if isinstance(result, Series):
result = result.reindex(ax)
else:
# this is a very unfortunate situation
# we have a multi-index that is NOT lexsorted
# and we have a result which is duplicated
# we can't reindex, so we resort to this
# GH 14776
if isinstance(ax, MultiIndex) and not ax.is_unique:
indexer = algorithms.unique1d(
result.index.get_indexer_for(ax.values))
result = result.take(indexer, axis=self.axis)
else:
result = result.reindex(ax, axis=self.axis)
elif self.group_keys:
values = reset_identity(values)
if self.as_index:
# possible MI return case
group_keys = keys
group_levels = self.grouper.levels
group_names = self.grouper.names
result = concat(values, axis=self.axis, keys=group_keys,
levels=group_levels, names=group_names,
sort=False)
else:
# GH5610, returns a MI, with the first level being a
# range index
keys = list(range(len(values)))
result = concat(values, axis=self.axis, keys=keys)
else:
values = reset_identity(values)
result = concat(values, axis=self.axis)
if (isinstance(result, Series) and
getattr(self, '_selection_name', None) is not None):
result.name = self._selection_name
return result
def _apply_filter(self, indices, dropna):
if len(indices) == 0:
indices = np.array([], dtype='int64')
else:
indices = np.sort(np.concatenate(indices))
if dropna:
filtered = self._selected_obj.take(indices, axis=self.axis)
else:
mask = np.empty(len(self._selected_obj.index), dtype=bool)
mask.fill(False)
mask[indices.astype(int)] = True
# mask fails to broadcast when passed to where; broadcast manually.
mask = np.tile(mask, list(self._selected_obj.shape[1:]) + [1]).T
filtered = self._selected_obj.where(mask) # Fill with NaNs.
return filtered
class GroupBy(_GroupBy):
"""
Class for grouping and aggregating relational data. See aggregate,
transform, and apply functions on this object.
It's easiest to use obj.groupby(...) to use GroupBy, but you can also do:
::
grouped = groupby(obj, ...)
Parameters
----------
obj : pandas object
axis : int, default 0
level : int, default None
Level of MultiIndex
groupings : list of Grouping objects
Most users should ignore this
exclusions : array-like, optional
List of columns to exclude
name : string
Most users should ignore this
Notes
-----
After grouping, see aggregate, apply, and transform functions. Here are
some other brief notes about usage. When grouping by multiple groups, the
result index will be a MultiIndex (hierarchical) by default.
Iteration produces (key, group) tuples, i.e. chunking the data by group. So
you can write code like:
::
grouped = obj.groupby(keys, axis=axis)
for key, group in grouped:
# do something with the data
Function calls on GroupBy, if not specially implemented, "dispatch" to the
grouped data. So if you group a DataFrame and wish to invoke the std()
method on each group, you can simply do:
::
df.groupby(mapper).std()
rather than
::
df.groupby(mapper).aggregate(np.std)
You can pass arguments to these "wrapped" functions, too.
See the online documentation for full exposition on these topics and much
more
Returns
-------
**Attributes**
groups : dict
{group name -> group labels}
len(grouped) : int
Number of groups
"""
_apply_whitelist = _common_apply_whitelist
def _bool_agg(self, val_test, skipna):
"""Shared func to call any / all Cython GroupBy implementations"""
def objs_to_bool(vals):
try:
vals = vals.astype(np.bool)
except ValueError: # for objects
vals = np.array([bool(x) for x in vals])
return vals.view(np.uint8)
def result_to_bool(result):
return result.astype(np.bool, copy=False)
return self._get_cythonized_result('group_any_all', self.grouper,
aggregate=True,
cython_dtype=np.uint8,
needs_values=True,
needs_mask=True,
pre_processing=objs_to_bool,
post_processing=result_to_bool,
val_test=val_test, skipna=skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def any(self, skipna=True):
"""
Returns True if any value in the group is truthful, else False
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing
"""
return self._bool_agg('any', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def all(self, skipna=True):
"""Returns True if all values in the group are truthful, else False
Parameters
----------
skipna : bool, default True
Flag to ignore nan values during truth testing
"""
return self._bool_agg('all', skipna)
@Substitution(name='groupby')
@Appender(_doc_template)
def count(self):
"""Compute count of group, excluding missing values"""
# defined here for API doc
raise NotImplementedError
@Substitution(name='groupby')
@Appender(_doc_template)
def mean(self, *args, **kwargs):
"""
Compute mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
nv.validate_groupby_func('mean', args, kwargs, ['numeric_only'])
try:
return self._cython_agg_general('mean', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
with _group_selection_context(self):
f = lambda x: x.mean(axis=self.axis, **kwargs)
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def median(self, **kwargs):
"""
Compute median of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
try:
return self._cython_agg_general('median', **kwargs)
except GroupByError:
raise
except Exception: # pragma: no cover
def f(x):
if isinstance(x, np.ndarray):
x = Series(x)
return x.median(axis=self.axis, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def std(self, ddof=1, *args, **kwargs):
"""
Compute standard deviation of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
# TODO: implement at Cython level?
nv.validate_groupby_func('std', args, kwargs)
return np.sqrt(self.var(ddof=ddof, **kwargs))
@Substitution(name='groupby')
@Appender(_doc_template)
def var(self, ddof=1, *args, **kwargs):
"""
Compute variance of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
nv.validate_groupby_func('var', args, kwargs)
if ddof == 1:
return self._cython_agg_general('var', **kwargs)
else:
f = lambda x: x.var(ddof=ddof, **kwargs)
with _group_selection_context(self):
return self._python_agg_general(f)
@Substitution(name='groupby')
@Appender(_doc_template)
def sem(self, ddof=1):
"""
Compute standard error of the mean of groups, excluding missing values
For multiple groupings, the result index will be a MultiIndex
Parameters
----------
ddof : integer, default 1
degrees of freedom
"""
return self.std(ddof=ddof) / np.sqrt(self.count())
@Substitution(name='groupby')
@Appender(_doc_template)
def size(self):
"""Compute group sizes"""
result = self.grouper.size()
if isinstance(self.obj, Series):
result.name = getattr(self.obj, 'name', None)
return result
@classmethod
def _add_numeric_operations(cls):
""" add numeric operations to the GroupBy generically """
def groupby_function(name, alias, npfunc,
numeric_only=True, _convert=False,
min_count=-1):
_local_template = "Compute %(f)s of group values"
@Substitution(name='groupby', f=name)
@Appender(_doc_template)
@Appender(_local_template)
def f(self, **kwargs):
if 'numeric_only' not in kwargs:
kwargs['numeric_only'] = numeric_only
if 'min_count' not in kwargs:
kwargs['min_count'] = min_count
self._set_group_selection()
try:
return self._cython_agg_general(
alias, alt=npfunc, **kwargs)
except AssertionError as e:
raise SpecificationError(str(e))
except Exception:
result = self.aggregate(
lambda x: npfunc(x, axis=self.axis))
if _convert:
result = result._convert(datetime=True)
return result
set_function_name(f, name, cls)
return f
def first_compat(x, axis=0):
def first(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[0]
if isinstance(x, DataFrame):
return x.apply(first, axis=axis)
else:
return first(x)
def last_compat(x, axis=0):
def last(x):
x = np.asarray(x)
x = x[notna(x)]
if len(x) == 0:
return np.nan
return x[-1]
if isinstance(x, DataFrame):
return x.apply(last, axis=axis)
else:
return last(x)
cls.sum = groupby_function('sum', 'add', np.sum, min_count=0)
cls.prod = groupby_function('prod', 'prod', np.prod, min_count=0)
cls.min = groupby_function('min', 'min', np.min, numeric_only=False)
cls.max = groupby_function('max', 'max', np.max, numeric_only=False)
cls.first = groupby_function('first', 'first', first_compat,
numeric_only=False)
cls.last = groupby_function('last', 'last', last_compat,
numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def ohlc(self):
"""
Compute sum of values, excluding missing values
For multiple groupings, the result index will be a MultiIndex
"""
return self._apply_to_column_groupbys(
lambda x: x._cython_agg_general('ohlc'))
@Appender(DataFrame.describe.__doc__)
def describe(self, **kwargs):
with _group_selection_context(self):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
@Substitution(name='groupby')
@Appender(_doc_template)
def resample(self, rule, *args, **kwargs):
"""
Provide resampling when using a TimeGrouper
Return a new grouper with our resampler appended
"""
from pandas.core.resample import get_resampler_for_grouping
return get_resampler_for_grouping(self, rule, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def rolling(self, *args, **kwargs):
"""
Return a rolling grouper, providing rolling
functionality per group
"""
from pandas.core.window import RollingGroupby
return RollingGroupby(self, *args, **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def expanding(self, *args, **kwargs):
"""
Return an expanding grouper, providing expanding
functionality per group
"""
from pandas.core.window import ExpandingGroupby
return ExpandingGroupby(self, *args, **kwargs)
def _fill(self, direction, limit=None):
"""Shared function for `pad` and `backfill` to call Cython method
Parameters
----------
direction : {'ffill', 'bfill'}
Direction passed to underlying Cython function. `bfill` will cause
values to be filled backwards. `ffill` and any other values will
default to a forward fill
limit : int, default None
Maximum number of consecutive values to fill. If `None`, this
method will convert to -1 prior to passing to Cython
Returns
-------
`Series` or `DataFrame` with filled values
See Also
--------
pad
backfill
"""
# Need int value for Cython
if limit is None:
limit = -1
return self._get_cythonized_result('group_fillna_indexer',
self.grouper, needs_mask=True,
cython_dtype=np.int64,
result_is_index=True,
direction=direction, limit=limit)
@Substitution(name='groupby')
def pad(self, limit=None):
"""
Forward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.pad
DataFrame.pad
Series.fillna
DataFrame.fillna
"""
return self._fill('ffill', limit=limit)
ffill = pad
@Substitution(name='groupby')
def backfill(self, limit=None):
"""
Backward fill the values
Parameters
----------
limit : integer, optional
limit of how many values to fill
See Also
--------
Series.backfill
DataFrame.backfill
Series.fillna
DataFrame.fillna
"""
return self._fill('bfill', limit=limit)
bfill = backfill
@Substitution(name='groupby')
@Appender(_doc_template)
def nth(self, n, dropna=None):
"""
Take the nth row from each group if n is an int, or a subset of rows
if n is a list of ints.
If dropna, will take the nth non-null row, dropna is either
Truthy (if a Series) or 'all', 'any' (if a DataFrame);
this is equivalent to calling dropna(how=dropna) before the
groupby.
Parameters
----------
n : int or list of ints
a single nth value for the row or a list of nth values
dropna : None or str, optional
apply the specified dropna operation before counting which row is
the nth row. Needs to be None, 'any' or 'all'
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 1, 2],
... 'B': [np.nan, 2, 3, 4, 5]}, columns=['A', 'B'])
>>> g = df.groupby('A')
>>> g.nth(0)
B
A
1 NaN
2 3.0
>>> g.nth(1)
B
A
1 2.0
2 5.0
>>> g.nth(-1)
B
A
1 4.0
2 5.0
>>> g.nth([0, 1])
B
A
1 NaN
1 2.0
2 3.0
2 5.0
Specifying ``dropna`` allows count ignoring NaN
>>> g.nth(0, dropna='any')
B
A
1 2.0
2 3.0
NaNs denote group exhausted when using dropna
>>> g.nth(3, dropna='any')
B
A
1 NaN
2 NaN
Specifying ``as_index=False`` in ``groupby`` keeps the original index.
>>> df.groupby('A', as_index=False).nth(1)
A B
1 1 2.0
4 2 5.0
"""
if isinstance(n, int):
nth_values = [n]
elif isinstance(n, (set, list, tuple)):
nth_values = list(set(n))
if dropna is not None:
raise ValueError(
"dropna option with a list of nth values is not supported")
else:
raise TypeError("n needs to be an int or a list/set/tuple of ints")
nth_values = np.array(nth_values, dtype=np.intp)
self._set_group_selection()
if not dropna:
mask = np.in1d(self._cumcount_array(), nth_values) | \
np.in1d(self._cumcount_array(ascending=False) + 1, -nth_values)
out = self._selected_obj[mask]
if not self.as_index:
return out
ids, _, _ = self.grouper.group_info
out.index = self.grouper.result_index[ids[mask]]
return out.sort_index() if self.sort else out
if dropna not in ['any', 'all']:
if isinstance(self._selected_obj, Series) and dropna is True:
warnings.warn("the dropna={dropna} keyword is deprecated,"
"use dropna='all' instead. "
"For a Series groupby, dropna must be "
"either None, 'any' or 'all'.".format(
dropna=dropna),
FutureWarning,
stacklevel=2)
dropna = 'all'
else:
# Note: when agg-ing picker doesn't raise this,
# just returns NaN
raise ValueError("For a DataFrame groupby, dropna must be "
"either None, 'any' or 'all', "
"(was passed %s)." % (dropna),)
# old behaviour, but with all and any support for DataFrames.
# modified in GH 7559 to have better perf
max_len = n if n >= 0 else - 1 - n
dropped = self.obj.dropna(how=dropna, axis=self.axis)
# get a new grouper for our dropped obj
if self.keys is None and self.level is None:
# we don't have the grouper info available
# (e.g. we have selected out
# a column that is not in the current object)
axis = self.grouper.axis
grouper = axis[axis.isin(dropped.index)]
else:
# create a grouper with the original parameters, but on the dropped
# object
grouper, _, _ = _get_grouper(dropped, key=self.keys,
axis=self.axis, level=self.level,
sort=self.sort,
mutated=self.mutated)
grb = dropped.groupby(grouper, as_index=self.as_index, sort=self.sort)
sizes, result = grb.size(), grb.nth(n)
mask = (sizes < max_len).values
# set the results which don't meet the criteria
if len(result) and mask.any():
result.loc[mask] = np.nan
# reset/reindex to the original groups
if len(self.obj) == len(dropped) or \
len(result) == len(self.grouper.result_index):
result.index = self.grouper.result_index
else:
result = result.reindex(self.grouper.result_index)
return result
@Substitution(name='groupby')
def ngroup(self, ascending=True):
"""
Number each group from 0 to the number of groups - 1.
This is the enumerative complement of cumcount. Note that the
numbers given to the groups match the order in which the groups
would be seen when iterating over the groupby object, not the
order they are first observed.
.. versionadded:: 0.20.2
Parameters
----------
ascending : bool, default True
If False, number in reverse, from number of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame({"A": list("aaabba")})
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').ngroup()
0 0
1 0
2 0
3 1
4 1
5 0
dtype: int64
>>> df.groupby('A').ngroup(ascending=False)
0 1
1 1
2 1
3 0
4 0
5 1
dtype: int64
>>> df.groupby(["A", [1,1,2,3,2,1]]).ngroup()
0 0
1 0
2 1
3 3
4 2
5 0
dtype: int64
See also
--------
.cumcount : Number the rows in each group.
"""
with _group_selection_context(self):
index = self._selected_obj.index
result = Series(self.grouper.group_info[0], index)
if not ascending:
result = self.ngroups - 1 - result
return result
@Substitution(name='groupby')
def cumcount(self, ascending=True):
"""
Number each item in each group from 0 to the length of that group - 1.
Essentially this is equivalent to
>>> self.apply(lambda x: Series(np.arange(len(x)), x.index))
Parameters
----------
ascending : bool, default True
If False, number in reverse, from length of group - 1 to 0.
Examples
--------
>>> df = pd.DataFrame([['a'], ['a'], ['a'], ['b'], ['b'], ['a']],
... columns=['A'])
>>> df
A
0 a
1 a
2 a
3 b
4 b
5 a
>>> df.groupby('A').cumcount()
0 0
1 1
2 2
3 0
4 1
5 3
dtype: int64
>>> df.groupby('A').cumcount(ascending=False)
0 3
1 2
2 1
3 1
4 0
5 0
dtype: int64
See also
--------
.ngroup : Number the groups themselves.
"""
with _group_selection_context(self):
index = self._selected_obj.index
cumcounts = self._cumcount_array(ascending=ascending)
return Series(cumcounts, index)
@Substitution(name='groupby')
@Appender(_doc_template)
def rank(self, method='average', ascending=True, na_option='keep',
pct=False, axis=0):
"""
Provides the rank of values within each group.
Parameters
----------
method : {'average', 'min', 'max', 'first', 'dense'}, default 'average'
* average: average rank of group
* min: lowest rank in group
* max: highest rank in group
* first: ranks assigned in order they appear in the array
* dense: like 'min', but rank always increases by 1 between groups
ascending : boolean, default True
False for ranks by high (1) to low (N)
na_option : {'keep', 'top', 'bottom'}, default 'keep'
* keep: leave NA values where they are
* top: smallest rank if ascending
* bottom: smallest rank if descending
pct : boolean, default False
Compute percentage rank of data within each group
axis : int, default 0
The axis of the object over which to compute the rank.
Returns
-----
DataFrame with ranking of values within each group
"""
return self._cython_transform('rank', numeric_only=False,
ties_method=method, ascending=ascending,
na_option=na_option, pct=pct, axis=axis)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumprod(self, axis=0, *args, **kwargs):
"""Cumulative product for each group"""
nv.validate_groupby_func('cumprod', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumprod(axis=axis, **kwargs))
return self._cython_transform('cumprod', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cumsum(self, axis=0, *args, **kwargs):
"""Cumulative sum for each group"""
nv.validate_groupby_func('cumsum', args, kwargs,
['numeric_only', 'skipna'])
if axis != 0:
return self.apply(lambda x: x.cumsum(axis=axis, **kwargs))
return self._cython_transform('cumsum', **kwargs)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummin(self, axis=0, **kwargs):
"""Cumulative min for each group"""
if axis != 0:
return self.apply(lambda x: np.minimum.accumulate(x, axis))
return self._cython_transform('cummin', numeric_only=False)
@Substitution(name='groupby')
@Appender(_doc_template)
def cummax(self, axis=0, **kwargs):
"""Cumulative max for each group"""
if axis != 0:
return self.apply(lambda x: np.maximum.accumulate(x, axis))
return self._cython_transform('cummax', numeric_only=False)
def _get_cythonized_result(self, how, grouper, aggregate=False,
cython_dtype=None, needs_values=False,
needs_mask=False, needs_ngroups=False,
result_is_index=False,
pre_processing=None, post_processing=None,
**kwargs):
"""Get result for Cythonized functions
Parameters
----------
how : str, Cythonized function name to be called
grouper : Grouper object containing pertinent group info
aggregate : bool, default False
Whether the result should be aggregated to match the number of
groups
cython_dtype : default None
Type of the array that will be modified by the Cython call. If
`None`, the type will be inferred from the values of each slice
needs_values : bool, default False
Whether the values should be a part of the Cython call
signature
needs_mask : bool, default False
Whether boolean mask needs to be part of the Cython call
signature
needs_ngroups : bool, default False
Whether number of groups is part of the Cython call signature
result_is_index : bool, default False
Whether the result of the Cython operation is an index of
values to be retrieved, instead of the actual values themselves
pre_processing : function, default None
Function to be applied to `values` prior to passing to Cython
Raises if `needs_values` is False
post_processing : function, default None
Function to be applied to result of Cython function
**kwargs : dict
Extra arguments to be passed back to Cython funcs
Returns
-------
`Series` or `DataFrame` with filled values
"""
if result_is_index and aggregate:
raise ValueError("'result_is_index' and 'aggregate' cannot both "
"be True!")
if post_processing:
if not callable(pre_processing):
raise ValueError("'post_processing' must be a callable!")
if pre_processing:
if not callable(pre_processing):
raise ValueError("'pre_processing' must be a callable!")
if not needs_values:
raise ValueError("Cannot use 'pre_processing' without "
"specifying 'needs_values'!")
labels, _, ngroups = grouper.group_info
output = collections.OrderedDict()
base_func = getattr(libgroupby, how)
for name, obj in self._iterate_slices():
if aggregate:
result_sz = ngroups
else:
result_sz = len(obj.values)
if not cython_dtype:
cython_dtype = obj.values.dtype
result = np.zeros(result_sz, dtype=cython_dtype)
func = partial(base_func, result, labels)
if needs_values:
vals = obj.values
if pre_processing:
vals = pre_processing(vals)
func = partial(func, vals)
if needs_mask:
mask = isnull(obj.values).view(np.uint8)
func = partial(func, mask)
if needs_ngroups:
func = partial(func, ngroups)
func(**kwargs) # Call func to modify indexer values in place
if result_is_index:
result = algorithms.take_nd(obj.values, result)
if post_processing:
result = post_processing(result)
output[name] = result
if aggregate:
return self._wrap_aggregated_output(output)
else:
return self._wrap_transformed_output(output)
@Substitution(name='groupby')
@Appender(_doc_template)
def shift(self, periods=1, freq=None, axis=0):
"""
Shift each group by periods observations
Parameters
----------
periods : integer, default 1
number of periods to shift
freq : frequency string
axis : axis to shift, default 0
"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.shift(periods, freq, axis))
return self._get_cythonized_result('group_shift_indexer',
self.grouper, cython_dtype=np.int64,
needs_ngroups=True,
result_is_index=True,
periods=periods)
@Substitution(name='groupby')
@Appender(_doc_template)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
axis=0):
"""Calculate pct_change of each value to previous entry in group"""
if freq is not None or axis != 0:
return self.apply(lambda x: x.pct_change(periods=periods,
fill_method=fill_method,
limit=limit, freq=freq,
axis=axis))
filled = getattr(self, fill_method)(limit=limit).drop(
self.grouper.names, axis=1)
shifted = filled.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
@Substitution(name='groupby')
@Appender(_doc_template)
def head(self, n=5):
"""
Returns first n rows of each group.
Essentially equivalent to ``.apply(lambda x: x.head(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([[1, 2], [1, 4], [5, 6]],
columns=['A', 'B'])
>>> df.groupby('A', as_index=False).head(1)
A B
0 1 2
2 5 6
>>> df.groupby('A').head(1)
A B
0 1 2
2 5 6
"""
self._reset_group_selection()
mask = self._cumcount_array() < n
return self._selected_obj[mask]
@Substitution(name='groupby')
@Appender(_doc_template)
def tail(self, n=5):
"""
Returns last n rows of each group
Essentially equivalent to ``.apply(lambda x: x.tail(n))``,
except ignores as_index flag.
Examples
--------
>>> df = DataFrame([['a', 1], ['a', 2], ['b', 1], ['b', 2]],
columns=['A', 'B'])
>>> df.groupby('A').tail(1)
A B
1 a 2
3 b 2
>>> df.groupby('A').head(1)
A B
0 a 1
2 b 1
"""
self._reset_group_selection()
mask = self._cumcount_array(ascending=False) < n
return self._selected_obj[mask]
GroupBy._add_numeric_operations()
@Appender(GroupBy.__doc__)
def groupby(obj, by, **kwds):
if isinstance(obj, Series):
klass = SeriesGroupBy
elif isinstance(obj, DataFrame):
klass = DataFrameGroupBy
else: # pragma: no cover
raise TypeError('invalid type: %s' % type(obj))
return klass(obj, by, **kwds)
def _get_axes(group):
if isinstance(group, Series):
return [group.index]
else:
return group.axes
def _is_indexed_like(obj, axes):
if isinstance(obj, Series):
if len(axes) > 1:
return False
return obj.index.equals(axes[0])
elif isinstance(obj, DataFrame):
return obj.index.equals(axes[0])
return False
class BaseGrouper(object):
"""
This is an internal Grouper class, which actually holds
the generated groups
Parameters
----------
axis : int
the axis to group
groupings : array of grouping
all the grouping instances to handle in this grouper
for example for grouper list to groupby, need to pass the list
sort : boolean, default True
whether this grouper will give sorted result or not
group_keys : boolean, default True
mutated : boolean, default False
indexer : intp array, optional
the indexer created by Grouper
some groupers (TimeGrouper) will sort its axis and its
group_info is also sorted, so need the indexer to reorder
"""
def __init__(self, axis, groupings, sort=True, group_keys=True,
mutated=False, indexer=None):
self._filter_empty_groups = self.compressed = len(groupings) != 1
self.axis = axis
self.groupings = groupings
self.sort = sort
self.group_keys = group_keys
self.mutated = mutated
self.indexer = indexer
@property
def shape(self):
return tuple(ping.ngroups for ping in self.groupings)
def __iter__(self):
return iter(self.indices)
@property
def nkeys(self):
return len(self.groupings)
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
splitter = self._get_splitter(data, axis=axis)
keys = self._get_group_keys()
for key, (i, group) in zip(keys, splitter):
yield key, group
def _get_splitter(self, data, axis=0):
comp_ids, _, ngroups = self.group_info
return get_splitter(data, comp_ids, ngroups, axis=axis)
def _get_group_keys(self):
if len(self.groupings) == 1:
return self.levels[0]
else:
comp_ids, _, ngroups = self.group_info
# provide "flattened" iterator for multi-group setting
return get_flattened_iterator(comp_ids,
ngroups,
self.levels,
self.labels)
def apply(self, f, data, axis=0):
mutated = self.mutated
splitter = self._get_splitter(data, axis=axis)
group_keys = self._get_group_keys()
# oh boy
f_name = com._get_callable_name(f)
if (f_name not in _plotting_methods and
hasattr(splitter, 'fast_apply') and axis == 0):
try:
values, mutated = splitter.fast_apply(f, group_keys)
return group_keys, values, mutated
except reduction.InvalidApply:
# we detect a mutation of some kind
# so take slow path
pass
except Exception:
# raise this error to the caller
pass
result_values = []
for key, (i, group) in zip(group_keys, splitter):
object.__setattr__(group, 'name', key)
# group might be modified
group_axes = _get_axes(group)
res = f(group)
if not _is_indexed_like(res, group_axes):
mutated = True
result_values.append(res)
return group_keys, result_values, mutated
@cache_readonly
def indices(self):
""" dict {group name -> group indices} """
if len(self.groupings) == 1:
return self.groupings[0].indices
else:
label_list = [ping.labels for ping in self.groupings]
keys = [com._values_from_object(ping.group_index)
for ping in self.groupings]
return get_indexer_dict(label_list, keys)
@property
def labels(self):
return [ping.labels for ping in self.groupings]
@property
def levels(self):
return [ping.group_index for ping in self.groupings]
@property
def names(self):
return [ping.name for ping in self.groupings]
def size(self):
"""
Compute group sizes
"""
ids, _, ngroup = self.group_info
ids = _ensure_platform_int(ids)
if ngroup:
out = np.bincount(ids[ids != -1], minlength=ngroup)
else:
out = ids
return Series(out,
index=self.result_index,
dtype='int64')
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
if len(self.groupings) == 1:
return self.groupings[0].groups
else:
to_groupby = lzip(*(ping.grouper for ping in self.groupings))
to_groupby = Index(to_groupby)
return self.axis.groupby(to_groupby)
@cache_readonly
def is_monotonic(self):
# return if my group orderings are monotonic
return Index(self.group_info[0]).is_monotonic
@cache_readonly
def group_info(self):
comp_ids, obs_group_ids = self._get_compressed_labels()
ngroups = len(obs_group_ids)
comp_ids = _ensure_int64(comp_ids)
return comp_ids, obs_group_ids, ngroups
@cache_readonly
def label_info(self):
# return the labels of items in original grouped axis
labels, _, _ = self.group_info
if self.indexer is not None:
sorter = np.lexsort((labels, self.indexer))
labels = labels[sorter]
return labels
def _get_compressed_labels(self):
all_labels = [ping.labels for ping in self.groupings]
if len(all_labels) > 1:
group_index = get_group_index(all_labels, self.shape,
sort=True, xnull=True)
return compress_group_index(group_index, sort=self.sort)
ping = self.groupings[0]
return ping.labels, np.arange(len(ping.group_index))
@cache_readonly
def ngroups(self):
return len(self.result_index)
@property
def recons_labels(self):
comp_ids, obs_ids, _ = self.group_info
labels = (ping.labels for ping in self.groupings)
return decons_obs_group_ids(
comp_ids, obs_ids, self.shape, labels, xnull=True)
@cache_readonly
def result_index(self):
if not self.compressed and len(self.groupings) == 1:
return self.groupings[0].result_index.rename(self.names[0])
labels = self.recons_labels
levels = [ping.result_index for ping in self.groupings]
result = MultiIndex(levels=levels,
labels=labels,
verify_integrity=False,
names=self.names)
return result
def get_group_levels(self):
if not self.compressed and len(self.groupings) == 1:
return [self.groupings[0].result_index]
name_list = []
for ping, labels in zip(self.groupings, self.recons_labels):
labels = _ensure_platform_int(labels)
levels = ping.result_index.take(labels)
name_list.append(levels)
return name_list
# ------------------------------------------------------------
# Aggregation functions
_cython_functions = {
'aggregate': {
'add': 'group_add',
'prod': 'group_prod',
'min': 'group_min',
'max': 'group_max',
'mean': 'group_mean',
'median': {
'name': 'group_median'
},
'var': 'group_var',
'first': {
'name': 'group_nth',
'f': lambda func, a, b, c, d, e: func(a, b, c, d, 1, -1)
},
'last': 'group_last',
'ohlc': 'group_ohlc',
},
'transform': {
'cumprod': 'group_cumprod',
'cumsum': 'group_cumsum',
'cummin': 'group_cummin',
'cummax': 'group_cummax',
'rank': {
'name': 'group_rank',
'f': lambda func, a, b, c, d, **kwargs: func(
a, b, c, d,
kwargs.get('ties_method', 'average'),
kwargs.get('ascending', True),
kwargs.get('pct', False),
kwargs.get('na_option', 'keep')
)
}
}
}
_cython_arity = {
'ohlc': 4, # OHLC
}
_name_functions = {
'ohlc': lambda *args: ['open', 'high', 'low', 'close']
}
def _is_builtin_func(self, arg):
"""
if we define an builtin function for this argument, return it,
otherwise return the arg
"""
return SelectionMixin._builtin_table.get(arg, arg)
def _get_cython_function(self, kind, how, values, is_numeric):
dtype_str = values.dtype.name
def get_func(fname):
# see if there is a fused-type version of function
# only valid for numeric
f = getattr(libgroupby, fname, None)
if f is not None and is_numeric:
return f
# otherwise find dtype-specific version, falling back to object
for dt in [dtype_str, 'object']:
f = getattr(libgroupby, "%s_%s" % (fname, dtype_str), None)
if f is not None:
return f
ftype = self._cython_functions[kind][how]
if isinstance(ftype, dict):
func = afunc = get_func(ftype['name'])
# a sub-function
f = ftype.get('f')
if f is not None:
def wrapper(*args, **kwargs):
return f(afunc, *args, **kwargs)
# need to curry our sub-function
func = wrapper
else:
func = get_func(ftype)
if func is None:
raise NotImplementedError("function is not implemented for this"
"dtype: [how->%s,dtype->%s]" %
(how, dtype_str))
return func
def _cython_operation(self, kind, values, how, axis, min_count=-1,
**kwargs):
assert kind in ['transform', 'aggregate']
# can we do this operation with our cython functions
# if not raise NotImplementedError
# we raise NotImplemented if this is an invalid operation
# entirely, e.g. adding datetimes
# categoricals are only 1d, so we
# are not setup for dim transforming
if is_categorical_dtype(values):
raise NotImplementedError(
"categoricals are not support in cython ops ATM")
elif is_datetime64_any_dtype(values):
if how in ['add', 'prod', 'cumsum', 'cumprod']:
raise NotImplementedError(
"datetime64 type does not support {} "
"operations".format(how))
elif is_timedelta64_dtype(values):
if how in ['prod', 'cumprod']:
raise NotImplementedError(
"timedelta64 type does not support {} "
"operations".format(how))
arity = self._cython_arity.get(how, 1)
vdim = values.ndim
swapped = False
if vdim == 1:
values = values[:, None]
out_shape = (self.ngroups, arity)
else:
if axis > 0:
swapped = True
values = values.swapaxes(0, axis)
if arity > 1:
raise NotImplementedError("arity of more than 1 is not "
"supported for the 'how' argument")
out_shape = (self.ngroups,) + values.shape[1:]
is_datetimelike = needs_i8_conversion(values.dtype)
is_numeric = is_numeric_dtype(values.dtype)
if is_datetimelike:
values = values.view('int64')
is_numeric = True
elif is_bool_dtype(values.dtype):
values = _ensure_float64(values)
elif is_integer_dtype(values):
# we use iNaT for the missing value on ints
# so pre-convert to guard this condition
if (values == iNaT).any():
values = _ensure_float64(values)
else:
values = values.astype('int64', copy=False)
elif is_numeric and not is_complex_dtype(values):
values = _ensure_float64(values)
else:
values = values.astype(object)
try:
func = self._get_cython_function(
kind, how, values, is_numeric)
except NotImplementedError:
if is_numeric:
values = _ensure_float64(values)
func = self._get_cython_function(
kind, how, values, is_numeric)
else:
raise
if how == 'rank':
out_dtype = 'float'
else:
if is_numeric:
out_dtype = '%s%d' % (values.dtype.kind, values.dtype.itemsize)
else:
out_dtype = 'object'
labels, _, _ = self.group_info
if kind == 'aggregate':
result = _maybe_fill(np.empty(out_shape, dtype=out_dtype),
fill_value=np.nan)
counts = np.zeros(self.ngroups, dtype=np.int64)
result = self._aggregate(
result, counts, values, labels, func, is_numeric,
is_datetimelike, min_count)
elif kind == 'transform':
result = _maybe_fill(np.empty_like(values, dtype=out_dtype),
fill_value=np.nan)
# TODO: min_count
result = self._transform(
result, values, labels, func, is_numeric, is_datetimelike,
**kwargs)
if is_integer_dtype(result) and not is_datetimelike:
mask = result == iNaT
if mask.any():
result = result.astype('float64')
result[mask] = np.nan
if kind == 'aggregate' and \
self._filter_empty_groups and not counts.all():
if result.ndim == 2:
try:
result = lib.row_bool_subset(
result, (counts > 0).view(np.uint8))
except ValueError:
result = lib.row_bool_subset_object(
_ensure_object(result),
(counts > 0).view(np.uint8))
else:
result = result[counts > 0]
if vdim == 1 and arity == 1:
result = result[:, 0]
if how in self._name_functions:
# TODO
names = self._name_functions[how]()
else:
names = None
if swapped:
result = result.swapaxes(0, axis)
return result, names
def aggregate(self, values, how, axis=0, min_count=-1):
return self._cython_operation('aggregate', values, how, axis,
min_count=min_count)
def transform(self, values, how, axis=0, **kwargs):
return self._cython_operation('transform', values, how, axis, **kwargs)
def _aggregate(self, result, counts, values, comp_ids, agg_func,
is_numeric, is_datetimelike, min_count=-1):
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
agg_func(result[:, :, i], counts, chunk, comp_ids,
min_count)
else:
agg_func(result, counts, values, comp_ids, min_count)
return result
def _transform(self, result, values, comp_ids, transform_func,
is_numeric, is_datetimelike, **kwargs):
comp_ids, _, ngroups = self.group_info
if values.ndim > 3:
# punting for now
raise NotImplementedError("number of dimensions is currently "
"limited to 3")
elif values.ndim > 2:
for i, chunk in enumerate(values.transpose(2, 0, 1)):
chunk = chunk.squeeze()
transform_func(result[:, :, i], values,
comp_ids, is_datetimelike, **kwargs)
else:
transform_func(result, values, comp_ids, is_datetimelike, **kwargs)
return result
def agg_series(self, obj, func):
try:
return self._aggregate_series_fast(obj, func)
except Exception:
return self._aggregate_series_pure_python(obj, func)
def _aggregate_series_fast(self, obj, func):
func = self._is_builtin_func(func)
if obj.index._has_complex_internals:
raise TypeError('Incompatible index for Cython grouper')
group_index, _, ngroups = self.group_info
# avoids object / Series creation overhead
dummy = obj._get_values(slice(None, 0)).to_dense()
indexer = get_group_index_sorter(group_index, ngroups)
obj = obj._take(indexer).to_dense()
group_index = algorithms.take_nd(
group_index, indexer, allow_fill=False)
grouper = reduction.SeriesGrouper(obj, func, group_index, ngroups,
dummy)
result, counts = grouper.get_result()
return result, counts
def _aggregate_series_pure_python(self, obj, func):
group_index, _, ngroups = self.group_info
counts = np.zeros(ngroups, dtype=int)
result = None
splitter = get_splitter(obj, group_index, ngroups, axis=self.axis)
for label, group in splitter:
res = func(group)
if result is None:
if (isinstance(res, (Series, Index, np.ndarray))):
raise ValueError('Function does not reduce')
result = np.empty(ngroups, dtype='O')
counts[label] = group.shape[0]
result[label] = res
result = lib.maybe_convert_objects(result, try_float=0)
return result, counts
def generate_bins_generic(values, binner, closed):
"""
Generate bin edge offsets and bin labels for one array using another array
which has bin edge values. Both arrays must be sorted.
Parameters
----------
values : array of values
binner : a comparable array of values representing bins into which to bin
the first array. Note, 'values' end-points must fall within 'binner'
end-points.
closed : which end of bin is closed; left (default), right
Returns
-------
bins : array of offsets (into 'values' argument) of bins.
Zero and last edge are excluded in result, so for instance the first
bin is values[0:bin[0]] and the last is values[bin[-1]:]
"""
lenidx = len(values)
lenbin = len(binner)
if lenidx <= 0 or lenbin <= 0:
raise ValueError("Invalid length for values or for binner")
# check binner fits data
if values[0] < binner[0]:
raise ValueError("Values falls before first bin")
if values[lenidx - 1] > binner[lenbin - 1]:
raise ValueError("Values falls after last bin")
bins = np.empty(lenbin - 1, dtype=np.int64)
j = 0 # index into values
bc = 0 # bin count
# linear scan, presume nothing about values/binner except that it fits ok
for i in range(0, lenbin - 1):
r_bin = binner[i + 1]
# count values in current bin, advance to next bin
while j < lenidx and (values[j] < r_bin or
(closed == 'right' and values[j] == r_bin)):
j += 1
bins[bc] = j
bc += 1
return bins
class BinGrouper(BaseGrouper):
"""
This is an internal Grouper class
Parameters
----------
bins : the split index of binlabels to group the item of axis
binlabels : the label list
filter_empty : boolean, default False
mutated : boolean, default False
indexer : a intp array
Examples
--------
bins: [2, 4, 6, 8, 10]
binlabels: DatetimeIndex(['2005-01-01', '2005-01-03',
'2005-01-05', '2005-01-07', '2005-01-09'],
dtype='datetime64[ns]', freq='2D')
the group_info, which contains the label of each item in grouped
axis, the index of label in label list, group number, is
(array([0, 0, 1, 1, 2, 2, 3, 3, 4, 4]), array([0, 1, 2, 3, 4]), 5)
means that, the grouped axis has 10 items, can be grouped into 5
labels, the first and second items belong to the first label, the
third and forth items belong to the second label, and so on
"""
def __init__(self, bins, binlabels, filter_empty=False, mutated=False,
indexer=None):
self.bins = _ensure_int64(bins)
self.binlabels = _ensure_index(binlabels)
self._filter_empty_groups = filter_empty
self.mutated = mutated
self.indexer = indexer
@cache_readonly
def groups(self):
""" dict {group name -> group labels} """
# this is mainly for compat
# GH 3881
result = {}
for key, value in zip(self.binlabels, self.bins):
if key is not NaT:
result[key] = value
return result
@property
def nkeys(self):
return 1
def get_iterator(self, data, axis=0):
"""
Groupby iterator
Returns
-------
Generator yielding sequence of (name, subsetted object)
for each group
"""
if isinstance(data, NDFrame):
slicer = lambda start, edge: data._slice(
slice(start, edge), axis=axis)
length = len(data.axes[axis])
else:
slicer = lambda start, edge: data[slice(start, edge)]
length = len(data)
start = 0
for edge, label in zip(self.bins, self.binlabels):
if label is not NaT:
yield label, slicer(start, edge)
start = edge
if start < length:
yield self.binlabels[-1], slicer(start, None)
@cache_readonly
def indices(self):
indices = collections.defaultdict(list)
i = 0
for label, bin in zip(self.binlabels, self.bins):
if i < bin:
if label is not NaT:
indices[label] = list(range(i, bin))
i = bin
return indices
@cache_readonly
def group_info(self):
ngroups = self.ngroups
obs_group_ids = np.arange(ngroups)
rep = np.diff(np.r_[0, self.bins])
rep = _ensure_platform_int(rep)
if ngroups == len(self.bins):
comp_ids = np.repeat(np.arange(ngroups), rep)
else:
comp_ids = np.repeat(np.r_[-1, np.arange(ngroups)], rep)
return comp_ids.astype('int64', copy=False), \
obs_group_ids.astype('int64', copy=False), ngroups
@cache_readonly
def ngroups(self):
return len(self.result_index)
@cache_readonly
def result_index(self):
if len(self.binlabels) != 0 and isna(self.binlabels[0]):
return self.binlabels[1:]
return self.binlabels
@property
def levels(self):
return [self.binlabels]
@property
def names(self):
return [self.binlabels.name]
@property
def groupings(self):
return [Grouping(lvl, lvl, in_axis=False, level=None, name=name)
for lvl, name in zip(self.levels, self.names)]
def agg_series(self, obj, func):
dummy = obj[:0]
grouper = reduction.SeriesBinGrouper(obj, func, self.bins, dummy)
return grouper.get_result()
# ----------------------------------------------------------------------
# cython aggregation
_cython_functions = copy.deepcopy(BaseGrouper._cython_functions)
class Grouping(object):
"""
Holds the grouping information for a single key
Parameters
----------
index : Index
grouper :
obj :
name :
level :
observed : boolean, default False
If we are a Categorical, use the observed values
in_axis : if the Grouping is a column in self.obj and hence among
Groupby.exclusions list
Returns
-------
**Attributes**:
* indices : dict of {group -> index_list}
* labels : ndarray, group labels
* ids : mapping of label -> group
* counts : array of group counts
* group_index : unique groups
* groups : dict of {group -> label_list}
"""
def __init__(self, index, grouper=None, obj=None, name=None, level=None,
sort=True, observed=False, in_axis=False):
self.name = name
self.level = level
self.grouper = _convert_grouper(index, grouper)
self.all_grouper = None
self.index = index
self.sort = sort
self.obj = obj
self.observed = observed
self.in_axis = in_axis
# right place for this?
if isinstance(grouper, (Series, Index)) and name is None:
self.name = grouper.name
if isinstance(grouper, MultiIndex):
self.grouper = grouper.values
# we have a single grouper which may be a myriad of things,
# some of which are dependent on the passing in level
if level is not None:
if not isinstance(level, int):
if level not in index.names:
raise AssertionError('Level %s not in index' % str(level))
level = index.names.index(level)
if self.name is None:
self.name = index.names[level]
self.grouper, self._labels, self._group_index = \
index._get_grouper_for_level(self.grouper, level)
# a passed Grouper like, directly get the grouper in the same way
# as single grouper groupby, use the group_info to get labels
elif isinstance(self.grouper, Grouper):
# get the new grouper; we already have disambiguated
# what key/level refer to exactly, don't need to
# check again as we have by this point converted these
# to an actual value (rather than a pd.Grouper)
_, grouper, _ = self.grouper._get_grouper(self.obj, validate=False)
if self.name is None:
self.name = grouper.result_index.name
self.obj = self.grouper.obj
self.grouper = grouper
else:
if self.grouper is None and self.name is not None:
self.grouper = self.obj[self.name]
elif isinstance(self.grouper, (list, tuple)):
self.grouper = com._asarray_tuplesafe(self.grouper)
# a passed Categorical
elif is_categorical_dtype(self.grouper):
self.all_grouper = self.grouper
self.grouper = self.grouper._codes_for_groupby(
self.sort, observed)
categories = self.grouper.categories
# we make a CategoricalIndex out of the cat grouper
# preserving the categories / ordered attributes
self._labels = self.grouper.codes
if observed:
codes = algorithms.unique1d(self.grouper.codes)
else:
codes = np.arange(len(categories))
self._group_index = CategoricalIndex(
Categorical.from_codes(
codes=codes,
categories=categories,
ordered=self.grouper.ordered))
# we are done
if isinstance(self.grouper, Grouping):
self.grouper = self.grouper.grouper
# no level passed
elif not isinstance(self.grouper,
(Series, Index, ExtensionArray, np.ndarray)):
if getattr(self.grouper, 'ndim', 1) != 1:
t = self.name or str(type(self.grouper))
raise ValueError("Grouper for '%s' not 1-dimensional" % t)
self.grouper = self.index.map(self.grouper)
if not (hasattr(self.grouper, "__len__") and
len(self.grouper) == len(self.index)):
errmsg = ('Grouper result violates len(labels) == '
'len(data)\nresult: %s' %
pprint_thing(self.grouper))
self.grouper = None # Try for sanity
raise AssertionError(errmsg)
# if we have a date/time-like grouper, make sure that we have
# Timestamps like
if getattr(self.grouper, 'dtype', None) is not None:
if is_datetime64_dtype(self.grouper):
from pandas import to_datetime
self.grouper = to_datetime(self.grouper)
elif is_timedelta64_dtype(self.grouper):
from pandas import to_timedelta
self.grouper = to_timedelta(self.grouper)
def __repr__(self):
return 'Grouping({0})'.format(self.name)
def __iter__(self):
return iter(self.indices)
_labels = None
_group_index = None
@property
def ngroups(self):
return len(self.group_index)
@cache_readonly
def indices(self):
# we have a list of groupers
if isinstance(self.grouper, BaseGrouper):
return self.grouper.indices
values = _ensure_categorical(self.grouper)
return values._reverse_indexer()
@property
def labels(self):
if self._labels is None:
self._make_labels()
return self._labels
@cache_readonly
def result_index(self):
if self.all_grouper is not None:
all_categories = self.all_grouper.categories
# we re-order to the original category orderings
if self.sort:
return self.group_index.set_categories(all_categories)
# we are not sorting, so add unobserved to the end
categories = self.group_index.categories
return self.group_index.add_categories(
all_categories[~all_categories.isin(categories)])
return self.group_index
@property
def group_index(self):
if self._group_index is None:
self._make_labels()
return self._group_index
def _make_labels(self):
if self._labels is None or self._group_index is None:
# we have a list of groupers
if isinstance(self.grouper, BaseGrouper):
labels = self.grouper.label_info
uniques = self.grouper.result_index
else:
labels, uniques = algorithms.factorize(
self.grouper, sort=self.sort)
uniques = Index(uniques, name=self.name)
self._labels = labels
self._group_index = uniques
@cache_readonly
def groups(self):
return self.index.groupby(Categorical.from_codes(self.labels,
self.group_index))
def _get_grouper(obj, key=None, axis=0, level=None, sort=True,
observed=False, mutated=False, validate=True):
"""
create and return a BaseGrouper, which is an internal
mapping of how to create the grouper indexers.
This may be composed of multiple Grouping objects, indicating
multiple groupers
Groupers are ultimately index mappings. They can originate as:
index mappings, keys to columns, functions, or Groupers
Groupers enable local references to axis,level,sort, while
the passed in axis, level, and sort are 'global'.
This routine tries to figure out what the passing in references
are and then creates a Grouping for each one, combined into
a BaseGrouper.
If observed & we have a categorical grouper, only show the observed
values
If validate, then check for key/level overlaps
"""
group_axis = obj._get_axis(axis)
# validate that the passed single level is compatible with the passed
# axis of the object
if level is not None:
# TODO: These if-block and else-block are almost same.
# MultiIndex instance check is removable, but it seems that there are
# some processes only for non-MultiIndex in else-block,
# eg. `obj.index.name != level`. We have to consider carefully whether
# these are applicable for MultiIndex. Even if these are applicable,
# we need to check if it makes no side effect to subsequent processes
# on the outside of this condition.
# (GH 17621)
if isinstance(group_axis, MultiIndex):
if is_list_like(level) and len(level) == 1:
level = level[0]
if key is None and is_scalar(level):
# Get the level values from group_axis
key = group_axis.get_level_values(level)
level = None
else:
# allow level to be a length-one list-like object
# (e.g., level=[0])
# GH 13901
if is_list_like(level):
nlevels = len(level)
if nlevels == 1:
level = level[0]
elif nlevels == 0:
raise ValueError('No group keys passed!')
else:
raise ValueError('multiple levels only valid with '
'MultiIndex')
if isinstance(level, compat.string_types):
if obj.index.name != level:
raise ValueError('level name %s is not the name of the '
'index' % level)
elif level > 0 or level < -1:
raise ValueError('level > 0 or level < -1 only valid with '
' MultiIndex')
# NOTE: `group_axis` and `group_axis.get_level_values(level)`
# are same in this section.
level = None
key = group_axis
# a passed-in Grouper, directly convert
if isinstance(key, Grouper):
binner, grouper, obj = key._get_grouper(obj, validate=False)
if key.key is None:
return grouper, [], obj
else:
return grouper, set([key.key]), obj
# already have a BaseGrouper, just return it
elif isinstance(key, BaseGrouper):
return key, [], obj
# In the future, a tuple key will always mean an actual key,
# not an iterable of keys. In the meantime, we attempt to provide
# a warning. We can assume that the user wanted a list of keys when
# the key is not in the index. We just have to be careful with
# unhashble elements of `key`. Any unhashable elements implies that
# they wanted a list of keys.
# https://github.com/pandas-dev/pandas/issues/18314
is_tuple = isinstance(key, tuple)
all_hashable = is_tuple and is_hashable(key)
if is_tuple:
if ((all_hashable and key not in obj and set(key).issubset(obj))
or not all_hashable):
# column names ('a', 'b') -> ['a', 'b']
# arrays like (a, b) -> [a, b]
msg = ("Interpreting tuple 'by' as a list of keys, rather than "
"a single key. Use 'by=[...]' instead of 'by=(...)'. In "
"the future, a tuple will always mean a single key.")
warnings.warn(msg, FutureWarning, stacklevel=5)
key = list(key)
if not isinstance(key, list):
keys = [key]
match_axis_length = False
else:
keys = key
match_axis_length = len(keys) == len(group_axis)
# what are we after, exactly?
any_callable = any(callable(g) or isinstance(g, dict) for g in keys)
any_groupers = any(isinstance(g, Grouper) for g in keys)
any_arraylike = any(isinstance(g, (list, tuple, Series, Index, np.ndarray))
for g in keys)
try:
if isinstance(obj, DataFrame):
all_in_columns_index = all(g in obj.columns or g in obj.index.names
for g in keys)
else:
all_in_columns_index = False
except Exception:
all_in_columns_index = False
if not any_callable and not all_in_columns_index and \
not any_arraylike and not any_groupers and \
match_axis_length and level is None:
keys = [com._asarray_tuplesafe(keys)]
if isinstance(level, (tuple, list)):
if key is None:
keys = [None] * len(level)
levels = level
else:
levels = [level] * len(keys)
groupings = []
exclusions = []
# if the actual grouper should be obj[key]
def is_in_axis(key):
if not _is_label_like(key):
try:
obj._data.items.get_loc(key)
except Exception:
return False
return True
# if the grouper is obj[name]
def is_in_obj(gpr):
try:
return id(gpr) == id(obj[gpr.name])
except Exception:
return False
for i, (gpr, level) in enumerate(zip(keys, levels)):
if is_in_obj(gpr): # df.groupby(df['name'])
in_axis, name = True, gpr.name
exclusions.append(name)
elif is_in_axis(gpr): # df.groupby('name')
if gpr in obj:
if validate:
stacklevel = 5 # Number of stack levels from df.groupby
obj._check_label_or_level_ambiguity(
gpr, stacklevel=stacklevel)
in_axis, name, gpr = True, gpr, obj[gpr]
exclusions.append(name)
elif obj._is_level_reference(gpr):
in_axis, name, level, gpr = False, None, gpr, None
else:
raise KeyError(gpr)
elif isinstance(gpr, Grouper) and gpr.key is not None:
# Add key to exclusions
exclusions.append(gpr.key)
in_axis, name = False, None
else:
in_axis, name = False, None
if is_categorical_dtype(gpr) and len(gpr) != obj.shape[axis]:
raise ValueError(
("Length of grouper ({len_gpr}) and axis ({len_axis})"
" must be same length"
.format(len_gpr=len(gpr), len_axis=obj.shape[axis])))
# create the Grouping
# allow us to passing the actual Grouping as the gpr
ping = Grouping(group_axis,
gpr,
obj=obj,
name=name,
level=level,
sort=sort,
observed=observed,
in_axis=in_axis) \
if not isinstance(gpr, Grouping) else gpr
groupings.append(ping)
if len(groupings) == 0:
raise ValueError('No group keys passed!')
# create the internals grouper
grouper = BaseGrouper(group_axis, groupings, sort=sort, mutated=mutated)
return grouper, exclusions, obj
def _is_label_like(val):
return (isinstance(val, (compat.string_types, tuple)) or
(val is not None and is_scalar(val)))
def _convert_grouper(axis, grouper):
if isinstance(grouper, dict):
return grouper.get
elif isinstance(grouper, Series):
if grouper.index.equals(axis):
return grouper._values
else:
return grouper.reindex(axis)._values
elif isinstance(grouper, (list, Series, Index, np.ndarray)):
if len(grouper) != len(axis):
raise ValueError('Grouper and axis must be same length')
return grouper
else:
return grouper
def _whitelist_method_generator(klass, whitelist):
"""
Yields all GroupBy member defs for DataFrame/Series names in _whitelist.
Parameters
----------
klass - class where members are defined. Should be Series or DataFrame
whitelist - list of names of klass methods to be constructed
Returns
-------
The generator yields a sequence of strings, each suitable for exec'ing,
that define implementations of the named methods for DataFrameGroupBy
or SeriesGroupBy.
Since we don't want to override methods explicitly defined in the
base class, any such name is skipped.
"""
method_wrapper_template = \
"""def %(name)s(%(sig)s) :
\"""
%(doc)s
\"""
f = %(self)s.__getattr__('%(name)s')
return f(%(args)s)"""
property_wrapper_template = \
"""@property
def %(name)s(self) :
\"""
%(doc)s
\"""
return self.__getattr__('%(name)s')"""
for name in whitelist:
# don't override anything that was explicitly defined
# in the base class
if hasattr(GroupBy, name):
continue
# ugly, but we need the name string itself in the method.
f = getattr(klass, name)
doc = f.__doc__
doc = doc if type(doc) == str else ''
if isinstance(f, types.MethodType):
wrapper_template = method_wrapper_template
decl, args = make_signature(f)
# pass args by name to f because otherwise
# GroupBy._make_wrapper won't know whether
# we passed in an axis parameter.
args_by_name = ['{0}={0}'.format(arg) for arg in args[1:]]
params = {'name': name,
'doc': doc,
'sig': ','.join(decl),
'self': args[0],
'args': ','.join(args_by_name)}
else:
wrapper_template = property_wrapper_template
params = {'name': name, 'doc': doc}
yield wrapper_template % params
class SeriesGroupBy(GroupBy):
#
# Make class defs of attributes on SeriesGroupBy whitelist
_apply_whitelist = _series_apply_whitelist
for _def_str in _whitelist_method_generator(Series,
_series_apply_whitelist):
exec(_def_str)
@property
def _selection_name(self):
"""
since we are a series, we by definition only have
a single name, but may be the result of a selection or
the name of our object
"""
if self._selection is None:
return self.obj.name
else:
return self._selection
_agg_doc = dedent("""
Examples
--------
>>> s = Series([1, 2, 3, 4])
>>> s
0 1
1 2
2 3
3 4
dtype: int64
>>> s.groupby([1, 1, 2, 2]).min()
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg('min')
1 1
2 3
dtype: int64
>>> s.groupby([1, 1, 2, 2]).agg(['min', 'max'])
min max
1 1 2
2 3 4
See also
--------
pandas.Series.groupby.apply
pandas.Series.groupby.transform
pandas.Series.aggregate
""")
@Appender(_apply_docs['template']
.format(input='series',
examples=_apply_docs['series_examples']))
def apply(self, func, *args, **kwargs):
return super(SeriesGroupBy, self).apply(func, *args, **kwargs)
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='Series',
versionadded='',
axis=''))
def aggregate(self, func_or_funcs, *args, **kwargs):
_level = kwargs.pop('_level', None)
if isinstance(func_or_funcs, compat.string_types):
return getattr(self, func_or_funcs)(*args, **kwargs)
if isinstance(func_or_funcs, collections.Iterable):
# Catch instances of lists / tuples
# but not the class list / tuple itself.
ret = self._aggregate_multiple_funcs(func_or_funcs,
(_level or 0) + 1)
else:
cyfunc = self._is_cython_func(func_or_funcs)
if cyfunc and not args and not kwargs:
return getattr(self, cyfunc)()
if self.grouper.nkeys > 1:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
try:
return self._python_agg_general(func_or_funcs, *args, **kwargs)
except Exception:
result = self._aggregate_named(func_or_funcs, *args, **kwargs)
index = Index(sorted(result), name=self.grouper.names[0])
ret = Series(result, index=index)
if not self.as_index: # pragma: no cover
print('Warning, ignoring as_index=True')
# _level handled at higher
if not _level and isinstance(ret, dict):
from pandas import concat
ret = concat(ret, axis=1)
return ret
agg = aggregate
def _aggregate_multiple_funcs(self, arg, _level):
if isinstance(arg, dict):
# show the deprecation, but only if we
# have not shown a higher level one
# GH 15931
if isinstance(self._selected_obj, Series) and _level <= 1:
warnings.warn(
("using a dict on a Series for aggregation\n"
"is deprecated and will be removed in a future "
"version"),
FutureWarning, stacklevel=3)
columns = list(arg.keys())
arg = list(arg.items())
elif any(isinstance(x, (tuple, list)) for x in arg):
arg = [(x, x) if not isinstance(x, (tuple, list)) else x
for x in arg]
# indicated column order
columns = lzip(*arg)[0]
else:
# list of functions / function names
columns = []
for f in arg:
if isinstance(f, compat.string_types):
columns.append(f)
else:
# protect against callables without names
columns.append(com._get_callable_name(f))
arg = lzip(columns, arg)
results = {}
for name, func in arg:
obj = self
if name in results:
raise SpecificationError('Function names must be unique, '
'found multiple named %s' % name)
# reset the cache so that we
# only include the named selection
if name in self._selected_obj:
obj = copy.copy(obj)
obj._reset_cache()
obj._selection = name
results[name] = obj.aggregate(func)
if isinstance(list(compat.itervalues(results))[0],
DataFrame):
# let higher level handle
if _level:
return results
return list(compat.itervalues(results))[0]
return DataFrame(results, columns=columns)
def _wrap_output(self, output, index, names=None):
""" common agg/transform wrapping logic """
output = output[self._selection_name]
if names is not None:
return DataFrame(output, index=index, columns=names)
else:
name = self._selection_name
if name is None:
name = self._selected_obj.name
return Series(output, index=index, name=name)
def _wrap_aggregated_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.grouper.result_index,
names=names)
def _wrap_transformed_output(self, output, names=None):
return self._wrap_output(output=output,
index=self.obj.index,
names=names)
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
if len(keys) == 0:
# GH #6265
return Series([], name=self._selection_name, index=keys)
def _get_index():
if self.grouper.nkeys > 1:
index = MultiIndex.from_tuples(keys, names=self.grouper.names)
else:
index = Index(keys, name=self.grouper.names[0])
return index
if isinstance(values[0], dict):
# GH #823
index = _get_index()
result = DataFrame(values, index=index).stack()
result.name = self._selection_name
return result
if isinstance(values[0], (Series, dict)):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif isinstance(values[0], DataFrame):
# possible that Series -> DataFrame by applied function
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
else:
# GH #6265
return Series(values, index=_get_index(),
name=self._selection_name)
def _aggregate_named(self, func, *args, **kwargs):
result = {}
for name, group in self:
group.name = name
output = func(group, *args, **kwargs)
if isinstance(output, (Series, Index, np.ndarray)):
raise Exception('Must produce aggregated value')
result[name] = self._try_cast(output, group)
return result
@Substitution(klass='Series', selected='A.')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
func = self._is_cython_func(func) or func
# if string function
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
return self._transform_fast(
lambda: getattr(self, func)(*args, **kwargs), func)
# reg transform
klass = self._selected_obj.__class__
results = []
wrapper = lambda x: func(x, *args, **kwargs)
for name, group in self:
object.__setattr__(group, 'name', name)
res = wrapper(group)
if hasattr(res, 'values'):
res = res.values
indexer = self._get_index(name)
s = klass(res, indexer)
results.append(s)
from pandas.core.reshape.concat import concat
result = concat(results).sort_index()
# we will only try to coerce the result type if
# we have a numeric dtype, as these are *always* udfs
# the cython take a different path (and casting)
dtype = self._selected_obj.dtype
if is_numeric_dtype(dtype):
result = maybe_downcast_to_dtype(result, dtype)
result.name = self._selected_obj.name
result.index = self._selected_obj.index
return result
def _transform_fast(self, func, func_nm):
"""
fast version of transform, only applicable to
builtin/cythonizable functions
"""
if isinstance(func, compat.string_types):
func = getattr(self, func)
ids, _, ngroup = self.grouper.group_info
cast = self._transform_should_cast(func_nm)
out = algorithms.take_1d(func().values, ids)
if cast:
out = self._try_cast(out, self.obj)
return Series(out, index=self.obj.index, name=self.obj.name)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a Series excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
func : function
To apply to each group. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> df.groupby('A').B.filter(lambda x: x.mean() > 3.)
1 2
3 4
5 6
Name: B, dtype: int64
Returns
-------
filtered : Series
"""
if isinstance(func, compat.string_types):
wrapper = lambda x: getattr(x, func)(*args, **kwargs)
else:
wrapper = lambda x: func(x, *args, **kwargs)
# Interpret np.nan as False.
def true_and_notna(x, *args, **kwargs):
b = wrapper(x, *args, **kwargs)
return b and notna(b)
try:
indices = [self._get_index(name) for name, group in self
if true_and_notna(group)]
except ValueError:
raise TypeError("the filter must return a boolean result")
except TypeError:
raise TypeError("the filter must return a boolean result")
filtered = self._apply_filter(indices, dropna)
return filtered
def nunique(self, dropna=True):
""" Returns number of unique elements in the group """
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
try:
sorter = np.lexsort((val, ids))
except TypeError: # catches object dtypes
assert val.dtype == object, \
'val.dtype must be object, got %s' % val.dtype
val, _ = algorithms.factorize(val, sort=False)
sorter = np.lexsort((val, ids))
_isna = lambda a: a == -1
else:
_isna = isna
ids, val = ids[sorter], val[sorter]
# group boundaries are where group ids change
# unique observations are where sorted values change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
inc = np.r_[1, val[1:] != val[:-1]]
# 1st item of each group is a new unique observation
mask = _isna(val)
if dropna:
inc[idx] = 1
inc[mask] = 0
else:
inc[mask & np.r_[False, mask[:-1]]] = 0
inc[idx] = 1
out = np.add.reduceat(inc, idx).astype('int64', copy=False)
if len(ids):
# NaN/NaT group exists if the head of ids is -1,
# so remove it from res and exclude its index from idx
if ids[0] == -1:
res = out[1:]
idx = idx[np.flatnonzero(idx)]
else:
res = out
else:
res = out[1:]
ri = self.grouper.result_index
# we might have duplications among the bins
if len(res) != len(ri):
res, out = np.zeros(len(ri), dtype=out.dtype), res
res[ids[idx]] = out
return Series(res,
index=ri,
name=self._selection_name)
@Appender(Series.describe.__doc__)
def describe(self, **kwargs):
result = self.apply(lambda x: x.describe(**kwargs))
if self.axis == 1:
return result.T
return result.unstack()
def value_counts(self, normalize=False, sort=True, ascending=False,
bins=None, dropna=True):
from pandas.core.reshape.tile import cut
from pandas.core.reshape.merge import _get_join_indexers
if bins is not None and not np.iterable(bins):
# scalar bins cannot be done at top level
# in a backward compatible way
return self.apply(Series.value_counts,
normalize=normalize,
sort=sort,
ascending=ascending,
bins=bins)
ids, _, _ = self.grouper.group_info
val = self.obj.get_values()
# groupby removes null keys from groupings
mask = ids != -1
ids, val = ids[mask], val[mask]
if bins is None:
lab, lev = algorithms.factorize(val, sort=True)
llab = lambda lab, inc: lab[inc]
else:
# lab is a Categorical with categories an IntervalIndex
lab = cut(Series(val), bins, include_lowest=True)
lev = lab.cat.categories
lab = lev.take(lab.cat.codes)
llab = lambda lab, inc: lab[inc]._multiindex.labels[-1]
if is_interval_dtype(lab):
# TODO: should we do this inside II?
sorter = np.lexsort((lab.left, lab.right, ids))
else:
sorter = np.lexsort((lab, ids))
ids, lab = ids[sorter], lab[sorter]
# group boundaries are where group ids change
idx = np.r_[0, 1 + np.nonzero(ids[1:] != ids[:-1])[0]]
# new values are where sorted labels change
lchanges = llab(lab, slice(1, None)) != llab(lab, slice(None, -1))
inc = np.r_[True, lchanges]
inc[idx] = True # group boundaries are also new values
out = np.diff(np.nonzero(np.r_[inc, True])[0]) # value counts
# num. of times each group should be repeated
rep = partial(np.repeat, repeats=np.add.reduceat(inc, idx))
# multi-index components
labels = list(map(rep, self.grouper.recons_labels)) + [llab(lab, inc)]
levels = [ping.group_index for ping in self.grouper.groupings] + [lev]
names = self.grouper.names + [self._selection_name]
if dropna:
mask = labels[-1] != -1
if mask.all():
dropna = False
else:
out, labels = out[mask], [label[mask] for label in labels]
if normalize:
out = out.astype('float')
d = np.diff(np.r_[idx, len(ids)])
if dropna:
m = ids[lab == -1]
np.add.at(d, m, -1)
acc = rep(d)[mask]
else:
acc = rep(d)
out /= acc
if sort and bins is None:
cat = ids[inc][mask] if dropna else ids[inc]
sorter = np.lexsort((out if ascending else -out, cat))
out, labels[-1] = out[sorter], labels[-1][sorter]
if bins is None:
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = _ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
# for compat. with libgroupby.value_counts need to ensure every
# bin is present at every index level, null filled with zeros
diff = np.zeros(len(out), dtype='bool')
for lab in labels[:-1]:
diff |= np.r_[True, lab[1:] != lab[:-1]]
ncat, nbin = diff.sum(), len(levels[-1])
left = [np.repeat(np.arange(ncat), nbin),
np.tile(np.arange(nbin), ncat)]
right = [diff.cumsum() - 1, labels[-1]]
_, idx = _get_join_indexers(left, right, sort=False, how='left')
out = np.where(idx != -1, out[idx], 0)
if sort:
sorter = np.lexsort((out if ascending else -out, left[0]))
out, left[-1] = out[sorter], left[-1][sorter]
# build the multi-index w/ full levels
labels = list(map(lambda lab: np.repeat(lab[diff], nbin), labels[:-1]))
labels.append(left[-1])
mi = MultiIndex(levels=levels, labels=labels, names=names,
verify_integrity=False)
if is_integer_dtype(out):
out = _ensure_int64(out)
return Series(out, index=mi, name=self._selection_name)
def count(self):
""" Compute count of group, excluding missing values """
ids, _, ngroups = self.grouper.group_info
val = self.obj.get_values()
mask = (ids != -1) & ~isna(val)
ids = _ensure_platform_int(ids)
out = np.bincount(ids[mask], minlength=ngroups or 0)
return Series(out,
index=self.grouper.result_index,
name=self._selection_name,
dtype='int64')
def _apply_to_column_groupbys(self, func):
""" return a pass thru """
return func(self)
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None):
"""Calculate percent change of each value to previous entry in group"""
filled = getattr(self, fill_method)(limit=limit)
shifted = filled.shift(periods=periods, freq=freq)
return (filled / shifted) - 1
class NDFrameGroupBy(GroupBy):
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self.obj.columns
else:
slice_axis = self._selection_list
slicer = lambda x: self.obj[x]
else:
slice_axis = self.obj.index
slicer = self.obj.xs
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def _cython_agg_general(self, how, alt=None, numeric_only=True,
min_count=-1):
new_items, new_blocks = self._cython_agg_blocks(
how, alt=alt, numeric_only=numeric_only, min_count=min_count)
return self._wrap_agged_blocks(new_items, new_blocks)
def _wrap_agged_blocks(self, items, blocks):
obj = self._obj_with_exclusions
new_axes = list(obj._data.axes)
# more kludge
if self.axis == 0:
new_axes[0], new_axes[1] = new_axes[1], self.grouper.result_index
else:
new_axes[self.axis] = self.grouper.result_index
# Make sure block manager integrity check passes.
assert new_axes[0].equals(items)
new_axes[0] = items
mgr = BlockManager(blocks, new_axes)
new_obj = type(obj)(mgr)
return self._post_process_cython_aggregate(new_obj)
_block_agg_axis = 0
def _cython_agg_blocks(self, how, alt=None, numeric_only=True,
min_count=-1):
# TODO: the actual managing of mgr_locs is a PITA
# here, it should happen via BlockManager.combine
data, agg_axis = self._get_data_to_aggregate()
if numeric_only:
data = data.get_numeric_data(copy=False)
new_blocks = []
new_items = []
deleted_items = []
for block in data.blocks:
locs = block.mgr_locs.as_array
try:
result, _ = self.grouper.aggregate(
block.values, how, axis=agg_axis, min_count=min_count)
except NotImplementedError:
# generally if we have numeric_only=False
# and non-applicable functions
# try to python agg
if alt is None:
# we cannot perform the operation
# in an alternate way, exclude the block
deleted_items.append(locs)
continue
# call our grouper again with only this block
obj = self.obj[data.items[locs]]
s = groupby(obj, self.grouper)
result = s.aggregate(lambda x: alt(x, axis=self.axis))
newb = result._data.blocks[0]
finally:
# see if we can cast the block back to the original dtype
result = block._try_coerce_and_cast_result(result)
newb = block.make_block(result)
new_items.append(locs)
new_blocks.append(newb)
if len(new_blocks) == 0:
raise DataError('No numeric types to aggregate')
# reset the locs in the blocks to correspond to our
# current ordering
indexer = np.concatenate(new_items)
new_items = data.items.take(np.sort(indexer))
if len(deleted_items):
# we need to adjust the indexer to account for the
# items we have removed
# really should be done in internals :<
deleted = np.concatenate(deleted_items)
ai = np.arange(len(data))
mask = np.zeros(len(data))
mask[deleted] = 1
indexer = (ai - mask.cumsum())[indexer]
offset = 0
for b in new_blocks:
loc = len(b.mgr_locs)
b.mgr_locs = indexer[offset:(offset + loc)]
offset += loc
return new_items, new_blocks
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 0:
return obj.swapaxes(0, 1)._data, 1
else:
return obj._data, self.axis
def _post_process_cython_aggregate(self, obj):
# undoing kludge from below
if self.axis == 0:
obj = obj.swapaxes(0, 1)
return obj
def aggregate(self, arg, *args, **kwargs):
_level = kwargs.pop('_level', None)
result, how = self._aggregate(arg, _level=_level, *args, **kwargs)
if how is None:
return result
if result is None:
# grouper specific aggregations
if self.grouper.nkeys > 1:
return self._python_agg_general(arg, *args, **kwargs)
else:
# try to treat as if we are passing a list
try:
assert not args and not kwargs
result = self._aggregate_multiple_funcs(
[arg], _level=_level, _axis=self.axis)
result.columns = Index(
result.columns.levels[0],
name=self._selected_obj.columns.name)
except Exception:
result = self._aggregate_generic(arg, *args, **kwargs)
if not self.as_index:
self._insert_inaxis_grouper_inplace(result)
result.index = np.arange(len(result))
return result._convert(datetime=True)
agg = aggregate
def _aggregate_generic(self, func, *args, **kwargs):
if self.grouper.nkeys != 1:
raise AssertionError('Number of keys must be 1')
axis = self.axis
obj = self._obj_with_exclusions
result = {}
if axis != obj._info_axis_number:
try:
for name, data in self:
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
return self._aggregate_item_by_item(func, *args, **kwargs)
else:
for name in self.indices:
try:
data = self.get_group(name, obj=obj)
result[name] = self._try_cast(func(data, *args, **kwargs),
data)
except Exception:
wrapper = lambda x: func(x, *args, **kwargs)
result[name] = data.apply(wrapper, axis=axis)
return self._wrap_generic_output(result, obj)
def _wrap_aggregated_output(self, output, names=None):
raise com.AbstractMethodError(self)
def _aggregate_item_by_item(self, func, *args, **kwargs):
# only for axis==0
obj = self._obj_with_exclusions
result = {}
cannot_agg = []
errors = None
for item in obj:
try:
data = obj[item]
colg = SeriesGroupBy(data, selection=item,
grouper=self.grouper)
result[item] = self._try_cast(
colg.aggregate(func, *args, **kwargs), data)
except ValueError:
cannot_agg.append(item)
continue
except TypeError as e:
cannot_agg.append(item)
errors = e
continue
result_columns = obj.columns
if cannot_agg:
result_columns = result_columns.drop(cannot_agg)
# GH6337
if not len(result_columns) and errors is not None:
raise errors
return DataFrame(result, columns=result_columns)
def _decide_output_index(self, output, labels):
if len(output) == len(labels):
output_keys = labels
else:
output_keys = sorted(output)
try:
output_keys.sort()
except Exception: # pragma: no cover
pass
if isinstance(labels, MultiIndex):
output_keys = MultiIndex.from_tuples(output_keys,
names=labels.names)
return output_keys
def _wrap_applied_output(self, keys, values, not_indexed_same=False):
from pandas.core.index import _all_indexes_same
from pandas.core.tools.numeric import to_numeric
if len(keys) == 0:
return DataFrame(index=keys)
key_names = self.grouper.names
# GH12824.
def first_not_none(values):
try:
return next(com._not_none(*values))
except StopIteration:
return None
v = first_not_none(values)
if v is None:
# GH9684. If all values are None, then this will throw an error.
# We'd prefer it return an empty dataframe.
return DataFrame()
elif isinstance(v, DataFrame):
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
elif self.grouper.groupings is not None:
if len(self.grouper.groupings) > 1:
key_index = self.grouper.result_index
else:
ping = self.grouper.groupings[0]
if len(keys) == ping.ngroups:
key_index = ping.group_index
key_index.name = key_names[0]
key_lookup = Index(keys)
indexer = key_lookup.get_indexer(key_index)
# reorder the values
values = [values[i] for i in indexer]
else:
key_index = Index(keys, name=key_names[0])
# don't use the key indexer
if not self.as_index:
key_index = None
# make Nones an empty object
v = first_not_none(values)
if v is None:
return DataFrame()
elif isinstance(v, NDFrame):
values = [
x if x is not None else
v._constructor(**v._construct_axes_dict())
for x in values
]
v = values[0]
if isinstance(v, (np.ndarray, Index, Series)):
if isinstance(v, Series):
applied_index = self._selected_obj._get_axis(self.axis)
all_indexed_same = _all_indexes_same([
x.index for x in values
])
singular_series = (len(values) == 1 and
applied_index.nlevels == 1)
# GH3596
# provide a reduction (Frame -> Series) if groups are
# unique
if self.squeeze:
# assign the name to this series
if singular_series:
values[0].name = keys[0]
# GH2893
# we have series in the values array, we want to
# produce a series:
# if any of the sub-series are not indexed the same
# OR we don't have a multi-index and we have only a
# single values
return self._concat_objects(
keys, values, not_indexed_same=not_indexed_same
)
# still a series
# path added as of GH 5545
elif all_indexed_same:
from pandas.core.reshape.concat import concat
return concat(values)
if not all_indexed_same:
# GH 8467
return self._concat_objects(
keys, values, not_indexed_same=True,
)
try:
if self.axis == 0:
# GH6124 if the list of Series have a consistent name,
# then propagate that name to the result.
index = v.index.copy()
if index.name is None:
# Only propagate the series name to the result
# if all series have a consistent name. If the
# series do not have a consistent name, do
# nothing.
names = {v.name for v in values}
if len(names) == 1:
index.name = list(names)[0]
# normally use vstack as its faster than concat
# and if we have mi-columns
if (isinstance(v.index, MultiIndex) or
key_index is None or
isinstance(key_index, MultiIndex)):
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values, index=key_index,
columns=index)
else:
# GH5788 instead of stacking; concat gets the
# dtypes correct
from pandas.core.reshape.concat import concat
result = concat(values, keys=key_index,
names=key_index.names,
axis=self.axis).unstack()
result.columns = index
else:
stacked_values = np.vstack(map(np.asarray, values))
result = DataFrame(stacked_values.T, index=v.index,
columns=key_index)
except (ValueError, AttributeError):
# GH1738: values is list of arrays of unequal lengths fall
# through to the outer else caluse
return Series(values, index=key_index,
name=self._selection_name)
# if we have date/time like in the original, then coerce dates
# as we are stacking can easily have object dtypes here
so = self._selected_obj
if (so.ndim == 2 and so.dtypes.apply(is_datetimelike).any()):
result = result.apply(
lambda x: to_numeric(x, errors='ignore'))
date_cols = self._selected_obj.select_dtypes(
include=['datetime', 'timedelta']).columns
date_cols = date_cols.intersection(result.columns)
result[date_cols] = (result[date_cols]
._convert(datetime=True,
coerce=True))
else:
result = result._convert(datetime=True)
return self._reindex_output(result)
# values are not series or array-like but scalars
else:
# only coerce dates if we find at least 1 datetime
coerce = any(isinstance(x, Timestamp) for x in values)
# self._selection_name not passed through to Series as the
# result should not take the name of original selection
# of columns
return (Series(values, index=key_index)
._convert(datetime=True,
coerce=coerce))
else:
# Handle cases like BinGrouper
return self._concat_objects(keys, values,
not_indexed_same=not_indexed_same)
def _transform_general(self, func, *args, **kwargs):
from pandas.core.reshape.concat import concat
applied = []
obj = self._obj_with_exclusions
gen = self.grouper.get_iterator(obj, axis=self.axis)
fast_path, slow_path = self._define_paths(func, *args, **kwargs)
path = None
for name, group in gen:
object.__setattr__(group, 'name', name)
if path is None:
# Try slow path and fast path.
try:
path, res = self._choose_path(fast_path, slow_path, group)
except TypeError:
return self._transform_item_by_item(obj, fast_path)
except ValueError:
msg = 'transform must return a scalar value for each group'
raise ValueError(msg)
else:
res = path(group)
if isinstance(res, Series):
# we need to broadcast across the
# other dimension; this will preserve dtypes
# GH14457
if not np.prod(group.shape):
continue
elif res.index.is_(obj.index):
r = concat([res] * len(group.columns), axis=1)
r.columns = group.columns
r.index = group.index
else:
r = DataFrame(
np.concatenate([res.values] * len(group.index)
).reshape(group.shape),
columns=group.columns, index=group.index)
applied.append(r)
else:
applied.append(res)
concat_index = obj.columns if self.axis == 0 else obj.index
concatenated = concat(applied, join_axes=[concat_index],
axis=self.axis, verify_integrity=False)
return self._set_result_index_ordered(concatenated)
@Substitution(klass='DataFrame', selected='')
@Appender(_transform_template)
def transform(self, func, *args, **kwargs):
# optimized transforms
func = self._is_cython_func(func) or func
if isinstance(func, compat.string_types):
if func in _cython_transforms:
# cythonized transform
return getattr(self, func)(*args, **kwargs)
else:
# cythonized aggregation and merge
result = getattr(self, func)(*args, **kwargs)
else:
return self._transform_general(func, *args, **kwargs)
# a reduction transform
if not isinstance(result, DataFrame):
return self._transform_general(func, *args, **kwargs)
obj = self._obj_with_exclusions
# nuiscance columns
if not result.columns.equals(obj.columns):
return self._transform_general(func, *args, **kwargs)
return self._transform_fast(result, obj, func)
def _transform_fast(self, result, obj, func_nm):
"""
Fast transform path for aggregations
"""
# if there were groups with no observations (Categorical only?)
# try casting data to original dtype
cast = self._transform_should_cast(func_nm)
# for each col, reshape to to size of original frame
# by take operation
ids, _, ngroup = self.grouper.group_info
output = []
for i, _ in enumerate(result.columns):
res = algorithms.take_1d(result.iloc[:, i].values, ids)
if cast:
res = self._try_cast(res, obj.iloc[:, i])
output.append(res)
return DataFrame._from_arrays(output, columns=result.columns,
index=obj.index)
def _define_paths(self, func, *args, **kwargs):
if isinstance(func, compat.string_types):
fast_path = lambda group: getattr(group, func)(*args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: getattr(x, func)(*args, **kwargs), axis=self.axis)
else:
fast_path = lambda group: func(group, *args, **kwargs)
slow_path = lambda group: group.apply(
lambda x: func(x, *args, **kwargs), axis=self.axis)
return fast_path, slow_path
def _choose_path(self, fast_path, slow_path, group):
path = slow_path
res = slow_path(group)
# if we make it here, test if we can use the fast path
try:
res_fast = fast_path(group)
# compare that we get the same results
if res.shape == res_fast.shape:
res_r = res.values.ravel()
res_fast_r = res_fast.values.ravel()
mask = notna(res_r)
if (res_r[mask] == res_fast_r[mask]).all():
path = fast_path
except Exception:
pass
return path, res
def _transform_item_by_item(self, obj, wrapper):
# iterate through columns
output = {}
inds = []
for i, col in enumerate(obj):
try:
output[col] = self[col].transform(wrapper)
inds.append(i)
except Exception:
pass
if len(output) == 0: # pragma: no cover
raise TypeError('Transform function invalid for data types')
columns = obj.columns
if len(output) < len(obj.columns):
columns = columns.take(inds)
return DataFrame(output, index=obj.index, columns=columns)
def filter(self, func, dropna=True, *args, **kwargs): # noqa
"""
Return a copy of a DataFrame excluding elements from groups that
do not satisfy the boolean criterion specified by func.
Parameters
----------
f : function
Function to apply to each subframe. Should return True or False.
dropna : Drop groups that do not pass the filter. True by default;
if False, groups that evaluate False are filled with NaNs.
Notes
-----
Each subframe is endowed the attribute 'name' in case you need to know
which group you are working on.
Examples
--------
>>> import pandas as pd
>>> df = pd.DataFrame({'A' : ['foo', 'bar', 'foo', 'bar',
... 'foo', 'bar'],
... 'B' : [1, 2, 3, 4, 5, 6],
... 'C' : [2.0, 5., 8., 1., 2., 9.]})
>>> grouped = df.groupby('A')
>>> grouped.filter(lambda x: x['B'].mean() > 3.)
A B C
1 bar 2 5.0
3 bar 4 1.0
5 bar 6 9.0
Returns
-------
filtered : DataFrame
"""
indices = []
obj = self._selected_obj
gen = self.grouper.get_iterator(obj, axis=self.axis)
for name, group in gen:
object.__setattr__(group, 'name', name)
res = func(group, *args, **kwargs)
try:
res = res.squeeze()
except AttributeError: # allow e.g., scalars and frames to pass
pass
# interpret the result of the filter
if is_bool(res) or (is_scalar(res) and isna(res)):
if res and notna(res):
indices.append(self._get_index(name))
else:
# non scalars aren't allowed
raise TypeError("filter function returned a %s, "
"but expected a scalar bool" %
type(res).__name__)
return self._apply_filter(indices, dropna)
class DataFrameGroupBy(NDFrameGroupBy):
_apply_whitelist = _dataframe_apply_whitelist
#
# Make class defs of attributes on DataFrameGroupBy whitelist.
for _def_str in _whitelist_method_generator(DataFrame, _apply_whitelist):
exec(_def_str)
_block_agg_axis = 1
_agg_doc = dedent("""
Examples
--------
>>> df = pd.DataFrame({'A': [1, 1, 2, 2],
... 'B': [1, 2, 3, 4],
... 'C': np.random.randn(4)})
>>> df
A B C
0 1 1 0.362838
1 1 2 0.227877
2 2 3 1.267767
3 2 4 -0.562860
The aggregation is for each column.
>>> df.groupby('A').agg('min')
B C
A
1 1 0.227877
2 3 -0.562860
Multiple aggregations
>>> df.groupby('A').agg(['min', 'max'])
B C
min max min max
A
1 1 2 0.227877 0.362838
2 3 4 -0.562860 1.267767
Select a column for aggregation
>>> df.groupby('A').B.agg(['min', 'max'])
min max
A
1 1 2
2 3 4
Different aggregations per column
>>> df.groupby('A').agg({'B': ['min', 'max'], 'C': 'sum'})
B C
min max sum
A
1 1 2 0.590716
2 3 4 0.704907
See also
--------
pandas.DataFrame.groupby.apply
pandas.DataFrame.groupby.transform
pandas.DataFrame.aggregate
""")
@Appender(_agg_doc)
@Appender(_shared_docs['aggregate'] % dict(
klass='DataFrame',
versionadded='',
axis=''))
def aggregate(self, arg, *args, **kwargs):
return super(DataFrameGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _gotitem(self, key, ndim, subset=None):
"""
sub-classes to define
return a sliced object
Parameters
----------
key : string / list of selections
ndim : 1,2
requested ndim of result
subset : object, default None
subset to act on
"""
if ndim == 2:
if subset is None:
subset = self.obj
return DataFrameGroupBy(subset, self.grouper, selection=key,
grouper=self.grouper,
exclusions=self.exclusions,
as_index=self.as_index)
elif ndim == 1:
if subset is None:
subset = self.obj[key]
return SeriesGroupBy(subset, selection=key,
grouper=self.grouper)
raise AssertionError("invalid ndim for _gotitem")
def _wrap_generic_output(self, result, obj):
result_index = self.grouper.levels[0]
if self.axis == 0:
return DataFrame(result, index=obj.columns,
columns=result_index).T
else:
return DataFrame(result, index=obj.index,
columns=result_index)
def _get_data_to_aggregate(self):
obj = self._obj_with_exclusions
if self.axis == 1:
return obj.T._data, 1
else:
return obj._data, 1
def _insert_inaxis_grouper_inplace(self, result):
# zip in reverse so we can always insert at loc 0
izip = zip(* map(reversed, (
self.grouper.names,
self.grouper.get_group_levels(),
[grp.in_axis for grp in self.grouper.groupings])))
for name, lev, in_axis in izip:
if in_axis:
result.insert(0, name, lev)
def _wrap_aggregated_output(self, output, names=None):
agg_axis = 0 if self.axis == 1 else 1
agg_labels = self._obj_with_exclusions._get_axis(agg_axis)
output_keys = self._decide_output_index(output, agg_labels)
if not self.as_index:
result = DataFrame(output, columns=output_keys)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
result = DataFrame(output, index=index, columns=output_keys)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _wrap_transformed_output(self, output, names=None):
return DataFrame(output, index=self.obj.index)
def _wrap_agged_blocks(self, items, blocks):
if not self.as_index:
index = np.arange(blocks[0].values.shape[1])
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
self._insert_inaxis_grouper_inplace(result)
result = result._consolidate()
else:
index = self.grouper.result_index
mgr = BlockManager(blocks, [items, index])
result = DataFrame(mgr)
if self.axis == 1:
result = result.T
return self._reindex_output(result)._convert(datetime=True)
def _reindex_output(self, result):
"""
If we have categorical groupers, then we want to make sure that
we have a fully reindex-output to the levels. These may have not
participated in the groupings (e.g. may have all been
nan groups);
This can re-expand the output space
"""
# we need to re-expand the output space to accomodate all values
# whether observed or not in the cartesian product of our groupes
groupings = self.grouper.groupings
if groupings is None:
return result
elif len(groupings) == 1:
return result
# if we only care about the observed values
# we are done
elif self.observed:
return result
# reindexing only applies to a Categorical grouper
elif not any(isinstance(ping.grouper, (Categorical, CategoricalIndex))
for ping in groupings):
return result
levels_list = [ping.group_index for ping in groupings]
index, _ = MultiIndex.from_product(
levels_list, names=self.grouper.names).sortlevel()
if self.as_index:
d = {self.obj._get_axis_name(self.axis): index, 'copy': False}
return result.reindex(**d)
# GH 13204
# Here, the categorical in-axis groupers, which need to be fully
# expanded, are columns in `result`. An idea is to do:
# result = result.set_index(self.grouper.names)
# .reindex(index).reset_index()
# but special care has to be taken because of possible not-in-axis
# groupers.
# So, we manually select and drop the in-axis grouper columns,
# reindex `result`, and then reset the in-axis grouper columns.
# Select in-axis groupers
in_axis_grps = [(i, ping.name) for (i, ping)
in enumerate(groupings) if ping.in_axis]
g_nums, g_names = zip(*in_axis_grps)
result = result.drop(labels=list(g_names), axis=1)
# Set a temp index and reindex (possibly expanding)
result = result.set_index(self.grouper.result_index
).reindex(index, copy=False)
# Reset in-axis grouper columns
# (using level numbers `g_nums` because level names may not be unique)
result = result.reset_index(level=g_nums)
return result.reset_index(drop=True)
def _iterate_column_groupbys(self):
for i, colname in enumerate(self._selected_obj.columns):
yield colname, SeriesGroupBy(self._selected_obj.iloc[:, i],
selection=colname,
grouper=self.grouper,
exclusions=self.exclusions)
def _apply_to_column_groupbys(self, func):
from pandas.core.reshape.concat import concat
return concat(
(func(col_groupby) for _, col_groupby
in self._iterate_column_groupbys()),
keys=self._selected_obj.columns, axis=1)
def _fill(self, direction, limit=None):
"""Overridden method to join grouped columns in output"""
res = super(DataFrameGroupBy, self)._fill(direction, limit=limit)
output = collections.OrderedDict(
(grp.name, grp.grouper) for grp in self.grouper.groupings)
from pandas import concat
return concat((self._wrap_transformed_output(output), res), axis=1)
def count(self):
""" Compute count of group, excluding missing values """
from pandas.core.dtypes.missing import _isna_ndarraylike as isna
data, _ = self._get_data_to_aggregate()
ids, _, ngroups = self.grouper.group_info
mask = ids != -1
val = ((mask & ~isna(np.atleast_2d(blk.get_values())))
for blk in data.blocks)
loc = (blk.mgr_locs for blk in data.blocks)
counter = partial(count_level_2d, labels=ids, max_bin=ngroups, axis=1)
blk = map(make_block, map(counter, val), loc)
return self._wrap_agged_blocks(data.items, list(blk))
def nunique(self, dropna=True):
"""
Return DataFrame with number of distinct observations per group for
each column.
.. versionadded:: 0.20.0
Parameters
----------
dropna : boolean, default True
Don't include NaN in the counts.
Returns
-------
nunique: DataFrame
Examples
--------
>>> df = pd.DataFrame({'id': ['spam', 'egg', 'egg', 'spam',
... 'ham', 'ham'],
... 'value1': [1, 5, 5, 2, 5, 5],
... 'value2': list('abbaxy')})
>>> df
id value1 value2
0 spam 1 a
1 egg 5 b
2 egg 5 b
3 spam 2 a
4 ham 5 x
5 ham 5 y
>>> df.groupby('id').nunique()
id value1 value2
id
egg 1 1 1
ham 1 1 2
spam 1 2 1
# check for rows with the same id but conflicting values
>>> df.groupby('id').filter(lambda g: (g.nunique() > 1).any())
id value1 value2
0 spam 1 a
3 spam 2 a
4 ham 5 x
5 ham 5 y
"""
obj = self._selected_obj
def groupby_series(obj, col=None):
return SeriesGroupBy(obj,
selection=col,
grouper=self.grouper).nunique(dropna=dropna)
if isinstance(obj, Series):
results = groupby_series(obj)
else:
from pandas.core.reshape.concat import concat
results = [groupby_series(obj[col], col) for col in obj.columns]
results = concat(results, axis=1)
if not self.as_index:
results.index = com._default_index(len(results))
return results
boxplot = boxplot_frame_groupby
class PanelGroupBy(NDFrameGroupBy):
def aggregate(self, arg, *args, **kwargs):
return super(PanelGroupBy, self).aggregate(arg, *args, **kwargs)
agg = aggregate
def _iterate_slices(self):
if self.axis == 0:
# kludge
if self._selection is None:
slice_axis = self._selected_obj.items
else:
slice_axis = self._selection_list
slicer = lambda x: self._selected_obj[x]
else:
raise NotImplementedError("axis other than 0 is not supported")
for val in slice_axis:
if val in self.exclusions:
continue
yield val, slicer(val)
def aggregate(self, arg, *args, **kwargs):
"""
Aggregate using input function or dict of {column -> function}
Parameters
----------
arg : function or dict
Function to use for aggregating groups. If a function, must either
work when passed a Panel or when passed to Panel.apply. If
pass a dict, the keys must be DataFrame column names
Returns
-------
aggregated : Panel
"""
if isinstance(arg, compat.string_types):
return getattr(self, arg)(*args, **kwargs)
return self._aggregate_generic(arg, *args, **kwargs)
def _wrap_generic_output(self, result, obj):
if self.axis == 0:
new_axes = list(obj.axes)
new_axes[0] = self.grouper.result_index
elif self.axis == 1:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, z, x]
else:
x, y, z = obj.axes
new_axes = [self.grouper.result_index, y, x]
result = Panel._from_axes(result, new_axes)
if self.axis == 1:
result = result.swapaxes(0, 1).swapaxes(0, 2)
elif self.axis == 2:
result = result.swapaxes(0, 2)
return result
def _aggregate_item_by_item(self, func, *args, **kwargs):
obj = self._obj_with_exclusions
result = {}
if self.axis > 0:
for item in obj:
try:
itemg = DataFrameGroupBy(obj[item],
axis=self.axis - 1,
grouper=self.grouper)
result[item] = itemg.aggregate(func, *args, **kwargs)
except (ValueError, TypeError):
raise
new_axes = list(obj.axes)
new_axes[self.axis] = self.grouper.result_index
return Panel._from_axes(result, new_axes)
else:
raise ValueError("axis value must be greater than 0")
def _wrap_aggregated_output(self, output, names=None):
raise com.AbstractMethodError(self)
# ----------------------------------------------------------------------
# Splitting / application
class DataSplitter(object):
def __init__(self, data, labels, ngroups, axis=0):
self.data = data
self.labels = _ensure_int64(labels)
self.ngroups = ngroups
self.axis = axis
@cache_readonly
def slabels(self):
# Sorted labels
return algorithms.take_nd(self.labels, self.sort_idx, allow_fill=False)
@cache_readonly
def sort_idx(self):
# Counting sort indexer
return get_group_index_sorter(self.labels, self.ngroups)
def __iter__(self):
sdata = self._get_sorted_data()
if self.ngroups == 0:
# we are inside a generator, rather than raise StopIteration
# we merely return signal the end
return
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
for i, (start, end) in enumerate(zip(starts, ends)):
# Since I'm now compressing the group ids, it's now not "possible"
# to produce empty slices because such groups would not be observed
# in the data
# if start >= end:
# raise AssertionError('Start %s must be less than end %s'
# % (str(start), str(end)))
yield i, self._chop(sdata, slice(start, end))
def _get_sorted_data(self):
return self.data._take(self.sort_idx, axis=self.axis)
def _chop(self, sdata, slice_obj):
return sdata.iloc[slice_obj]
def apply(self, f):
raise com.AbstractMethodError(self)
class SeriesSplitter(DataSplitter):
def _chop(self, sdata, slice_obj):
return sdata._get_values(slice_obj).to_dense()
class FrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(FrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
def fast_apply(self, f, names):
# must return keys::list, values::list, mutated::bool
try:
starts, ends = lib.generate_slices(self.slabels, self.ngroups)
except Exception:
# fails when all -1
return [], True
sdata = self._get_sorted_data()
results, mutated = reduction.apply_frame_axis0(sdata, f, names,
starts, ends)
return results, mutated
def _chop(self, sdata, slice_obj):
if self.axis == 0:
return sdata.iloc[slice_obj]
else:
return sdata._slice(slice_obj, axis=1) # .loc[:, slice_obj]
class NDFrameSplitter(DataSplitter):
def __init__(self, data, labels, ngroups, axis=0):
super(NDFrameSplitter, self).__init__(data, labels, ngroups, axis=axis)
self.factory = data._constructor
def _get_sorted_data(self):
# this is the BlockManager
data = self.data._data
# this is sort of wasteful but...
sorted_axis = data.axes[self.axis].take(self.sort_idx)
sorted_data = data.reindex_axis(sorted_axis, axis=self.axis)
return sorted_data
def _chop(self, sdata, slice_obj):
return self.factory(sdata.get_slice(slice_obj, axis=self.axis))
def get_splitter(data, *args, **kwargs):
if isinstance(data, Series):
klass = SeriesSplitter
elif isinstance(data, DataFrame):
klass = FrameSplitter
else:
klass = NDFrameSplitter
return klass(data, *args, **kwargs)
| 33.665757
| 87
| 0.550032
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.