max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
src/prefect/environments/execution/__init__.py
|
vnsn/prefect
| 8,633
|
12784051
|
"""
Execution environments encapsulate the logic for where your Flow should execute in Prefect Cloud.
DEPRECATED: Environment based configuration is deprecated, please transition to
configuring `flow.run_config` instead of `flow.environment`. See
https://docs.prefect.io/orchestration/flow_config/overview.html for more info.
"""
from prefect.environments.execution.base import Environment, load_and_run_flow
from prefect.environments.execution.dask import DaskKubernetesEnvironment
from prefect.environments.execution.dask import DaskCloudProviderEnvironment
from prefect.environments.execution.fargate import FargateTaskEnvironment
from prefect.environments.execution.k8s import KubernetesJobEnvironment
from prefect.environments.execution.local import LocalEnvironment
| 1.234375
| 1
|
src/load_files.py
|
marckolak/radio-map-calibration-robot
| 0
|
12784052
|
"""
The pypositioning.system.load_files.py module contains functions allowing to load measurement results from various
types of files. The currently available functions allow to load **.psd** files collected with TI Packet Sniffer and
results obtained using IONIS localization system.
Copyright (C) 2020 <NAME>
"""
import numpy as np
import pandas as pd
def load_ionis_file(filepath, normalize_ts=False):
""" Load measurement file from IONIS system
Parameters
----------
filepath: str
path to measurement file
normalize_ts: bool
if True set ts base to 0 (rounds ts to full seconds)
Returns
-------
ble_df: DataFrame
data frame with ble rssi results (contains whole ble packets received by the anchors)
uwb_df: Dataframe
data frame with uwb-based toa results
ts_0: float
timestamp of the first received packet
"""
# open psd file
f = open(filepath, 'r')
ble_res = []
uwb_res = []
# split and decode each line
for line in f:
s = line.split('\t')
# if int(s[3]) == 19:
# ... # the packet is empty
if int(s[5]) * 12 + int(s[6]) * 8 + 19 > int(s[3]):
print("faulty packet, ts: " + s[0])
else:
# get ble and uwb packets number
ble_n = int(s[5])
uwb_n = int(s[6])
# for each ble packet
for k in range(ble_n):
bps = 6 + k * 8
# append array [ts, an_id, an_sqn, an_pressure, BLE packet contents]
ble_res.append(s[:3] + [s[4]] + s[bps + 1:bps + 9])
for j in range(uwb_n):
ups = 6 + ble_n * 8 + j * 4
# append array [ts, an_id, an_sqn, an_pressure, BLE packet contents]
uwb_res.append(s[:3] + s[ups + 1:ups + 5])
# reshape the arrays
ble_res = np.array(ble_res)
uwb_res = np.array(uwb_res)
if ble_res.size > 0:
ble_df = pd.DataFrame(data=ble_res,
columns=['ts', 'an_id', 'an_sqn', 'an_p', 'rx_id', 'tag_id', 'ble_ts', 'rssi',
'pres', 'volt', 'steps', 'alert'])
ble_df = ble_df.astype(dtype={'ts': 'float', 'an_id': 'int32', 'an_sqn': 'int32', 'an_p': 'int32',
'rx_id': 'int32', 'tag_id': 'int32', 'ble_ts': 'int32',
'rssi': 'float', 'pres': 'int32', 'volt': 'int32',
'steps': 'int32', 'alert': 'int32'})
ble_df.loc[ble_df['rssi'] == 0, 'rssi'] = np.nan
else:
ble_df = None
if uwb_res.size > 0:
uwb_df = pd.DataFrame(data=uwb_res, columns=['ts', 'an_id', 'an_sqn', 'rx_id', 'tag_id', 'uwb_sqn', 'toa'])
uwb_df = uwb_df.astype({'ts': 'float', 'an_id': 'int32', 'an_sqn': 'int32',
'rx_id': 'int32', 'tag_id': 'int32', 'uwb_sqn': 'int32', 'toa': 'float'})
uwb_df['toa'] = uwb_df['toa'].values * 15.65e-12
else:
uwb_df = None
if normalize_ts:
ts_min = 0
if (uwb_res.size > 0 and ble_res.size > 0):
ts_min = np.minimum(ble_df.ts.min(), uwb_df.ts.min())
ble_df.ts = np.rint((ble_df.ts - ts_min).values / 1000)
uwb_df.ts = np.rint((uwb_df.ts - ts_min).values / 1000)
elif uwb_res.size > 0:
ts_min = uwb_df.ts.min()
uwb_df.ts = np.rint((uwb_df.ts - ts_min).values / 1000)
print('no ble results in a file - normalizing uwb ts only')
elif ble_res.size > 0:
ts_min = ble_df.ts.min()
ble_df.ts = np.rint((ble_df.ts - ts_min).values / 1000)
print('no uwb results in a file - normalizing ble ts only')
return ble_df, uwb_df, ts_min / 1000
return ble_df, uwb_df, 0
def synchronize_toa_ionis(m_uwb, an, an_r):
""" Synchronize toa values according to IONIS synchronization scheme
Parameters
----------
m_uwb: DataFrame
data frame with uwb measurement results
an: ndarray
anchor nodes coordinates [id,x,y,z]
an_r: ndarray
reference anchor node coordinates [x,y,z]
Returns
-------
m_uwb: DataFrame
m_uwb data frame with toa values synchronized
"""
# initialize array with empty rows for missing anchors
an_f = np.empty((int(an[:, 0].max()), 3))
an_f[:] = np.NaN
for a in an:
an_f[int(a[0]) - 1, :] = a[1:]
m_uwb["toa"] = m_uwb.toa + np.linalg.norm(an_f[m_uwb.an_id - 1] - an_r, axis=1) / 3e8
return m_uwb
def distribute_packets_ionis(df):
""" Distribute packets that could be delayed and came to the system controller at the same time
Parameters
----------
df: DataFrame
dataframe containing measurement results with timestamps and sqns. It must include columns:
[ts, an_sqn, an_id]. Timestamp must be rounded to full seconds (might be float)
Returns
-------
df_d: DataFrame
dataframe, where the packet ts were corrected to the reception times, which would occur
without the delay
"""
# copy the dataframe
df_d = df.copy()
# get unique anchor ids
anchor_ids = df.an_id.unique()
# for each anchor search for delayed packets and distribute them
for an_id in anchor_ids:
mask_an_id = df.an_id == an_id
uts = df[mask_an_id].ts.unique()
for i in range(uts.size):
ts = df[mask_an_id & (df.ts == uts[i])]
an_sqns = ts.an_sqn.unique()
# if the results
if an_sqns.size > 1:
# find last properly received packet
pi = 1
while df[mask_an_id & (df.ts == uts[i - pi])].an_sqn.unique().size > 1:
pi = pi + 1
prev_ts = uts[i - pi]
prev_an_sqn = df_d[(df_d.an_id == an_id) & (df_d.ts == uts[i - pi])].an_sqn.values[0]
# correct timestamps
tse = distribute_packet_batch(ts.ts, ts.an_sqn, prev_ts, prev_an_sqn)
df_d.ts[(df_d.an_id == an_id) & (df_d.ts == uts[i])] = tse
return df_d
def distribute_packet_batch(ts, an_sqn, ts_p, an_sqn_p):
"""Correct timestamps of the packets, which were received in a batch due to delay introduced in the WiFi interface.
Parameters
----------
ts: array_like
timestamps of packets received in a batch [in seconds]
an_sqn: array_like
anchor sqns of packets received in a batch [0-255]
ts_p: float
the timestamp of the last properly received packet
an_sqn_p: int
the anchor sqn of the last properly received packet
Returns
-------
tse: ndarray
timestamps corrected to the reception times, which would occur without the delay
"""
# empty list for collected packets
tse = []
for t, ans in zip(ts, an_sqn):
# check if anchor sqn is higher than the previous one or the counter has turned
if ans >= an_sqn_p:
te = ts_p + ans - an_sqn_p
else:
te = ts_p + (256 + ans - an_sqn_p)
tse.append(te)
return np.array(tse)
def rearrange_timestamps_ble(m_ble, tag_id, packet_rate, distribute_delayed=False):
"""Change timestamps values so that the consecutive packets sent at different times do not have the same ts.
The timestamps are changed as follows: \
new_ts = ts + 1/packet_rate * N \
where N is the sequential number of BLE packet inside WiFi frame.
Parameters
----------
m_ble: DataFrame
dataframe with measurement results
tag_id: int
tag identifier
packet_rate: float
packet rate set in the systems [packets per second]
distribute_delayed: bool, optional
if True call distribute_packets_ionis
Returns
-------
m_b: DataFrame
Input m_ble DataFrame with rearranged timestamps.
"""
# filter tag id
m_b = m_ble[m_ble.tag_id == tag_id]
if distribute_delayed: # distribute delayed packets
m_b = distribute_packets_ionis(m_b)
# group and bin by BLE ts
grouped = m_b.groupby(by=['ts', 'an_id', 'an_sqn', 'tag_id'])
bins = []
for n, g in grouped:
bins.append(pd.cut(g.ble_ts, packet_rate, labels=False))
m_b['bin'] = pd.concat(bins)
# group and average power per BLE receiver
grouped = m_b.groupby(by=['ts', 'an_id', 'tag_id', 'bin'])
m_b = grouped.agg({'rssi': log_mean})
m_b = m_b.reset_index()
# get ts with 1/rate
m_b['ts'] = m_b['ts'] + m_b['bin'] / packet_rate
return m_b
def rearrange_timestamps_uwb(m_uwb, tag_id, packet_rate, distribute_delayed=False):
"""Change timestamps values so that the consecutive packets sent at different times do not have the same ts.
The timestamps are changed as follows: \
new_ts = ts + 1/packet_rate * N \
where N is the sequential number of UWB packet inside WiFi frame.
Parameters
----------
m_uwb: DataFrame
dataframe with measurement results
tag_id: int
tag identifier
packet_rate: float
packet rate set in the systems [packets per second]
distribute_delayed: bool, optional
if True call distribute_packets_ionis
Returns
-------
m_u: DataFrame
Input m_uwb DataFrame with rearranged timestamps.
"""
# filter tag id
m_u = m_uwb[m_uwb.tag_id == tag_id].copy()
if distribute_delayed: # distribute delayed packets
m_u = distribute_packets_ionis(m_u)
# group and bin by reception ts (in this case toa value)
grouped = m_u.groupby(by=['ts', 'an_sqn'])
bins = []
for n, g in grouped:
bins.append(pd.cut(g.toa, packet_rate, labels=False))
m_u['bin'] = pd.concat(bins)
# get ts with 1/rate
m_u['ts'] = m_u['ts'] + m_u['bin'] / packet_rate
return m_u
def measurement_array(m_df, mtype, data_frame=False):
"""Create measurement array [ts, meas values...]
Parameters
----------
m_df: DataFrame
measurement dataframe
mtype: str
measurement type: 'ble', 'uwb'
data_frame: bool
return dataframe, None tuple if true
Returns
-------
array: ndarray
measurement array in format [ts, mx, my, mz ...]
an_ids: ndarray
anchor_ids: [x,y,z ...]
df: DataFrame, optional
measurement array stored as dataframe (returned when data_frame==True)
"""
if mtype == 'uwb':
m = m_df[['ts', 'uwb_sqn', 'toa', 'an_id']].copy()
elif mtype == 'ble':
m = m_df[['ts', 'rssi', 'an_id']].copy()
else:
print("Unknown type")
return None, None
df = None
# get unique anchor ids
anchor_ids = np.sort(m.an_id.unique())
# create array
if mtype == 'uwb':
for i in anchor_ids:
mp = m[m.an_id == i].rename(columns={'toa': 'toa_' + str(i)}).drop(columns='an_id')
if df is None:
df = mp
else:
df = df.merge(mp, how='outer', on=['ts', 'uwb_sqn'])
df = df.sort_values(['ts', 'uwb_sqn'], ascending=[True, True]).reset_index(drop=True)
df = df.drop(columns='uwb_sqn')
elif mtype == 'ble':
for i in anchor_ids:
mp = m[m.an_id == i].rename(columns={'rssi': 'rssi_' + str(i)}).drop(columns='an_id')
if df is None:
df = mp
else:
df = df.merge(mp, how='outer', on=['ts'])
df = df.sort_values(['ts'], ascending=[True]).reset_index(drop=True)
array = df.values
anchor_ids = np.r_[0, anchor_ids] # add 0 for ts column
if data_frame:
return array, anchor_ids, df
return array, anchor_ids
def hybrid_array(dfs, on='ts', how='outer'):
""" Create a hybrid measurement array
Parameters
----------
dfs: iterable
DataFrames which would be merged into a hybrid frame
on: str, default: 'ts'
on which column the frames will be merged
how: str, default: 'outer'
how the frames will be merged
Returns
-------
m: ndarray
measurement array in format [ts, results...]
m_type: ndarray
type of data in each of the columns e.g. ['ts', 'rssi', 'toa]
m_id: ndarray
anchor ids of the columns df. Default id is 0 - for 'ts' and other parameter
not associated with any particular anchor.
df: DataFrame
hybrid DataFrame
"""
df = dfs[0]
for d in dfs[1:]:
df = df.merge(d, on=on, how=how)
m_type= np.array([x.split('_')[0] for x in df.columns[:]])
m_id= np.array([x.split('_')[1] if '_' in x else 0 for x in df.columns[:] ]).astype('int')
return df.values, m_type, m_id, df
def log_mean(v_db, axis=None):
""" Calculate average for values in log scale by converting to linear and back to log.
Parameters
----------
v_db: ndarray
values in log scale
axis: {int, None}, optional
axis along which the mean will be calculated
Returns
-------
avg: {ndarray, double}
mean value
"""
v_lin = 10 ** (v_db / 10) # Power in mW
l_mean = np.nanmean(v_lin, axis=axis)
db_mean = 10 * np.log10(l_mean)
return db_mean
| 2.515625
| 3
|
Chess/main.py
|
jlefkoff/GridBoard
| 1
|
12784053
|
import ChessFuntions
import pprint
game = ChessFuntions.Chessgame()
game.setup()
game.wereToMove(2, 1)
| 1.617188
| 2
|
Q084.py
|
Linchin/python_leetcode_git
| 0
|
12784054
|
<reponame>Linchin/python_leetcode_git
"""
84
hard
max rectangle in histogram
Given an array of integers heights representing the histogram's bar
height where the width of each bar is 1, return the area of the largest rectangle in the histogram.
"""
from typing import List
class Solution:
def largestRectangleArea(self, heights: List[int]) -> int:
# wrong answer
stack = []
area = 0
for i in range(len(heights)):
if not stack or heights[stack[-1]] < heights[i]:
stack.append(i)
else:
while stack and heights[stack[-1]] > heights[i]:
area = max(area, (i - stack[-1]) * heights[stack[-1]])
stack.pop()
stack.append(i)
while stack:
area = max(area, (len(heights) - stack[-1]) * heights[stack[-1]])
stack.pop()
return area
heights = [2, 1, 2]
heights2 = [2,1,5,6,2,3]
sol = Solution()
print(sol.largestRectangleArea(heights))
| 3.578125
| 4
|
packages/w3af/w3af/core/controllers/threads/threadpool.py
|
ZooAtmosphereGroup/HelloPackages
| 0
|
12784055
|
"""
threadpool.py
Copyright 2006 <NAME>
This file is part of w3af, http://w3af.org/ .
w3af is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation version 2 of the License.
w3af is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with w3af; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
"""
import sys
import time
import Queue
import threading
import traceback
from functools import partial
from multiprocessing.dummy import Process, current_process
from multiprocessing.util import Finalize, debug
from multiprocessing import cpu_count
from .pool276 import ThreadPool, RUN, create_detailed_pickling_error, mapstar
from w3af.core.data.fuzzer.utils import rand_alnum
from w3af.core.controllers.threads.decorators import apply_with_return_error
__all__ = ['Pool', 'return_args', 'one_to_many']
class one_to_many(object):
"""
This is a simple wrapper that translates one argument to many in a function
call. Useful for passing to the threadpool map function.
"""
def __init__(self, func):
self.func_orig = func
# Similar to functools wraps
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, args):
return self.func_orig(*args)
class return_args(object):
"""
Utility function that returns the args in the result, useful when calling
functions like imap_unordered().
"""
def __init__(self, func, *args, **kwds):
self.func = partial(func, *args, **kwds)
# Similar to functools wraps
self.func_orig = func
self.__name__ = func.__name__
self.__doc__ = func.__doc__
def __call__(self, *args, **kwds):
return args, self.func(*args, **kwds)
class DaemonProcess(Process):
def __init__(self, group=None, target=None, name=None, args=(), kwargs={}):
super(DaemonProcess, self).__init__(group, target, name, args, kwargs)
self.daemon = True
self.worker = target
self.name = name
def get_state(self):
state = self.worker.get_state()
state['name'] = self.name
return state
def is_idle(self):
return self.worker.is_idle()
def start(self):
"""
This is a race condition in DaemonProcess.start() which was found
during some of the test scans I run. The race condition exists
because we're using Threads for a Pool that was designed to be
used with real processes: thus there is no worker.exitcode,
thus it has to be simulated in a race condition-prone way.
I'm overriding this method in order to move this line:
self._start_called = True
Closer to the call to .start(), which should reduce the chances
of triggering the race conditions by 1% ;-)
"""
assert self._parent is current_process()
if hasattr(self._parent, '_children'):
self._parent._children[self] = None
self._start_called = True
threading.Thread.start(self)
def add_traceback_string(_exception):
"""
Add the traceback string as a new attribute to the exception raised
by the target function defined by the developer.
Adding this original traceback allows us to better understand the
root cause for exceptions that happen in functions which are run inside
the Pool (most).
For example, this is an exception stored in a /tmp/w3af-crash file before
this patch:
A "TypeError" exception was found while running crawl.phpinfo on "Method: GET | http://domain/".
The exception was: "unsupported operand type(s) for -: 'float' and 'NoneType'" at pool276.py:get():643.
The full traceback is:
File "/home/user/tools/w3af/w3af/core/controllers/core_helpers/consumers/crawl_infrastructure.py", line 533, in _discover_worker
result = plugin.discover_wrapper(fuzzable_request)
File "/home/user/tools/w3af/w3af/core/controllers/plugins/crawl_plugin.py", line 53, in crawl_wrapper
return self.crawl(fuzzable_request_copy)
File "/home/user/tools/w3af/w3af/plugins/crawl/phpinfo.py", line 148, in crawl
self.worker_pool.map_multi_args(self._check_and_analyze, args)
File "/home/user/tools/w3af/w3af/core/controllers/threads/threadpool.py", line 430, in map_multi_args
return self.map_async(one_to_many(func), iterable, chunksize).get()
File "/home/user/tools/w3af/w3af/core/controllers/threads/pool276.py", line 643, in get
raise self._value
And after adding the original traceback and using it in exception_handler.py:
A "TypeError" exception was found while running crawl.phpinfo on "Method: GET | http://domain/".
The exception was: "unsupported operand type(s) for -: 'float' and 'NoneType'" at pool276.py:get():643.
The full traceback is:
Traceback (most recent call last):
File "/home/user/tools/w3af/w3af/core/controllers/threads/threadpool.py", line 238, in __call__
result = (True, func(*args, **kwds))
File "/home/user/tools/w3af/w3af/core/controllers/threads/pool276.py", line 67, in mapstar
return map(*args)
File "/home/user/tools/w3af/w3af/core/controllers/threads/threadpool.py", line 55, in __call__
return self.func_orig(*args)
File "/home/user/tools/w3af/w3af/plugins/crawl/phpinfo.py", line 180, in _check_and_analyze
1.0 - None
TypeError: unsupported operand type(s) for -: 'float' and 'NoneType'
The exact line where the exception is raised is shown!
Adding new attributes to instances is not something I like, but in
this case I had no choice...
Creating a new Exception type and wrapping all exceptions generated
by the pool with that one wouldn't work: we lose the exception type
and can't do:
try:
...
except TypeError:
...
The code for the whole framework would need to be changed to something
like:
try:
...
except PoolException, pe:
if isinstance(pe.original_exception, TypeError):
...
:param _exception: The exception instance where to add the new attribute
:return: None
"""
except_type, except_class, tb = sys.exc_info()
tb = traceback.format_exception(type(_exception), _exception, tb)
_exception.original_traceback_string = ''.join(tb)
class Worker(object):
__slots__ = ('func', 'args', 'kwargs', 'start_time', 'job', 'id')
def __init__(self):
self.func = None
self.args = None
self.kwargs = None
self.start_time = None
self.job = None
self.id = rand_alnum(8)
def is_idle(self):
return self.func is None
def get_real_func_name_args(self):
"""
Because of various levels of abstraction the function name is not always in
self.func.__name__, this method "unwraps" the abstractions and shows us
something easier to digest.
:return: The function name
"""
# self.func/self.args could change over the execution of this method, so take
# a copy here.
current_func = self.func
current_args = self.args
if current_func is mapstar:
current_func = current_args[0][0]
current_args = current_args[0][1:]
if current_func is apply_with_return_error:
current_func = current_args[0][0]
current_args = current_args[0][1:]
if isinstance(current_func, return_args):
return current_func.func_orig.__name__, current_args
if isinstance(current_func, one_to_many):
return current_func.func_orig.__name__, current_args
if current_func is None:
return None, None
return current_func.__name__, current_args
def get_state(self):
func_name, func_args = self.get_real_func_name_args()
return {'func_name': func_name,
'args': func_args,
'kwargs': self.kwargs,
'start_time': self.start_time,
'idle': self.is_idle(),
'job': self.job,
'worker_id': self.id}
def __call__(self, inqueue, outqueue, initializer=None, initargs=(), maxtasks=None):
assert maxtasks is None or (type(maxtasks) in (int, long) and maxtasks > 0)
put = outqueue.put
get = inqueue.get
if hasattr(inqueue, '_writer'):
inqueue._writer.close()
outqueue._reader.close()
if initializer is not None:
initializer(*initargs)
completed = 0
while maxtasks is None or (maxtasks and completed < maxtasks):
try:
task = get()
except (EOFError, IOError):
debug('worker got EOFError or IOError -- exiting')
break
if task is None:
debug('worker got sentinel -- exiting')
break
job, i, func, args, kwds = task
# Tracking
self.func = func
self.args = args
self.kwargs = kwds
self.start_time = time.time()
self.job = job
try:
result = (True, func(*args, **kwds))
except Exception, e:
add_traceback_string(e)
result = (False, e)
# Tracking
self.func = None
self.args = None
self.kwargs = None
self.start_time = None
self.job = None
try:
put((job, i, result))
except Exception as e:
wrapped = create_detailed_pickling_error(e, result[1])
put((job, i, (False, wrapped)))
finally:
# https://bugs.python.org/issue29861
task = None
job = None
result = None
func = None
args = None
kwds = None
completed += 1
debug('worker exiting after %d tasks' % completed)
class Pool(ThreadPool):
def __init__(self, processes=None, initializer=None, initargs=(),
worker_names=None, maxtasksperchild=None,
max_queued_tasks=0):
"""
Overriding this method in order to:
* Name the pool worker threads
* Name the threads used for managing the Pool internals
"""
self.Process = partial(DaemonProcess, name=worker_names)
self.worker_names = worker_names
# Setting the max number of queued tasks for the ThreadPool is not
# as simple as it looks.
#
# First I tried to limit the max size of self._inqueue (defined
# in _setup_queues), that didn't work.
#
# Then I tried to limit the size for self._taskqueue, that didn't
# work either.
#
# I had to set the maxsize of self._taskqueue to 1 and use the
# max_queued_tasks parameter to limit the size of self._inqueue
# This is required due to the ThreadPool internals, see the
# definition of the _handle_tasks method in pool276.py where
# the function is reading from self._taskqueue and writing to
# self._inqueue.
#
# Not setting the limit in self._taskqueue allows the main thread
# to enqueue an infinite number of tasks.
#
# Only setting the limit in self._taskqueue will not work, since
# the _handle_tasks method is always reading from that queue
# (which decreases its size) and writing to self._inqueue. Because
# of those reads to self._taskqueue, the queue never reaches the
# limit.
#
if max_queued_tasks != 0:
assert max_queued_tasks - 1 > 0, 'max_queued_tasks needs to be at least 2'
self._setup_queues(max_queued_tasks - 1)
self._taskqueue = Queue.Queue(maxsize=1)
self._cache = {}
self._state = RUN
self._maxtasksperchild = maxtasksperchild
self._initializer = initializer
self._initargs = initargs
if processes is None:
try:
processes = cpu_count()
except NotImplementedError:
processes = 1
if processes < 1:
raise ValueError("Number of processes must be at least 1")
if initializer is not None and not hasattr(initializer, '__call__'):
raise TypeError('initializer must be a callable')
self._processes = processes
self._pool = []
self._repopulate_pool()
self._worker_handler = threading.Thread(
target=Pool._handle_workers,
args=(self, ),
name='PoolWorkerHandler')
self._worker_handler.daemon = True
self._worker_handler._state = RUN
self._worker_handler.start()
self._task_handler = threading.Thread(
target=Pool._handle_tasks,
args=(self._taskqueue, self._quick_put, self._outqueue,
self._pool, self._cache),
name='PoolTaskHandler')
self._task_handler.daemon = True
self._task_handler._state = RUN
self._task_handler.start()
self._result_handler = threading.Thread(
target=Pool._handle_results,
args=(self._outqueue, self._quick_get, self._cache),
name='PoolResultHandler')
self._result_handler.daemon = True
self._result_handler._state = RUN
self._result_handler.start()
self._terminate = Finalize(
self, self._terminate_pool,
args=(self._taskqueue, self._inqueue, self._outqueue, self._pool,
self._worker_handler, self._task_handler,
self._result_handler, self._cache),
exitpriority=15)
def get_inqueue(self):
return self._inqueue
def get_outqueue(self):
return self._outqueue
def get_running_task_count(self):
# Cheating here a little bit because the task queued in _inqueue will
# eventually be run by the pool, but is not yet in the pool
running_tasks = self._inqueue.qsize()
for process in self._pool[:]:
if not process.is_idle():
running_tasks += 1
return running_tasks
def _repopulate_pool(self):
"""
Bring the number of pool processes up to the specified number,
for use after reaping workers which have exited.
I overwrite this in order to change the Process target to a Worker
object (instead of a function) in order to keep better stats of
what it is doing.
"""
for i in range(self._processes - len(self._pool)):
w = self.Process(target=Worker(),
args=(self._inqueue,
self._outqueue,
self._initializer,
self._initargs,
self._maxtasksperchild))
self._pool.append(w)
w.name = w.name.replace('Process', 'PoolWorker')
w.daemon = True
w.start()
debug('added worker')
def get_worker_count(self):
return len(self._pool)
def set_worker_count(self, count):
"""
Set the number of workers.
Keep in mind that this is not an immediate when decreasing
the pool process count!
* When increasing the size, the threadpool will call
repopulate_pool() and the new threads will be created
* When decreasing the size, a thread will finish because
of maxtasksperchild, then repopulate_pool() will be
called async and the thread will *not* be created,
thus decreasing the pool size
The change is made effective depending on the work load and
the time required to finish each task.
:param count: The new process count
:return: None
"""
assert self._maxtasksperchild, 'Can only adjust size if maxtasksperchild is set'
assert count >= 1, 'Number of processes must be at least 1'
self._processes = count
self._repopulate_pool()
def _setup_queues(self, max_queued_tasks):
self._inqueue = Queue.Queue(maxsize=max_queued_tasks)
self._outqueue = Queue.Queue()
self._quick_put = self._inqueue.put
self._quick_get = self._outqueue.get
def map_multi_args(self, func, iterable, chunksize=None):
"""
Blocks until all results are done (please note the .get())
"""
assert self._state == RUN
return self.map_async(one_to_many(func), iterable, chunksize).get()
def in_qsize(self):
return self._taskqueue.qsize()
def is_running(self):
return self._state == RUN
def terminate_join(self):
self.terminate()
self.join()
def _join_exited_workers(self):
"""Cleanup after any worker processes which have exited due to reaching
their specified lifetime. Returns True if any workers were cleaned up.
"""
cleaned = False
for i in reversed(range(len(self._pool))):
worker = self._pool[i]
if worker.exitcode is not None:
# worker exited
try:
worker.join()
except RuntimeError:
#
# RuntimeError: cannot join thread before it is started
#
# This is a race condition in DaemonProcess.start() which was found
# during some of the test scans I run. The race condition exists
# because we're using Threads for a Pool that was designed to be
# used with real processes: thus there is no worker.exitcode,
# thus it has to be simulated in a race condition-prone way.
#
continue
else:
debug('cleaning up worker %d' % i)
cleaned = True
del self._pool[i]
return cleaned
def finish(self, timeout=120):
"""
Wait until all tasks in the self._inqueue have been processed (the queue
has size == 0) and then call terminate on the Pool.
I know this is not the best way of doing it, but had some dead-lock
issues with:
self.close()
self.join()
:param timeout: Wait up to timeout seconds for the queues to be empty
"""
delay = 0.1
for _ in xrange(int(timeout / delay)):
if (self._inqueue.qsize() == 0 and
self._outqueue.qsize() == 0 and
self._taskqueue.qsize() == 0):
break
time.sleep(delay)
self.terminate()
self.join()
def inspect_threads(self):
"""
This method inspects the attributes exposed by the Worker object defined
above and lets us debug the thread pool.
This is useful for answering the question: "What functions are running in
the pool right now?"
:return: Data as a list of dicts, which is usually sent to inspect_data_to_log()
"""
inspect_data = []
for process in self._pool[:]:
worker_state = process.get_state()
inspect_data.append(worker_state)
return inspect_data
| 2.59375
| 3
|
lib/service.py
|
psobot/biographer
| 1
|
12784056
|
import logging
import inspect
import mechanize
log = logging.getLogger(__name__)
class Service(object):
"""
The superclass of all services.
When creating a service, inherit from this class
and implement the following methods as necessary:
__init__
authenticate
check_<attribute>
modify_<attribute>
"""
_NO_DOCSTRING = "No documentation provided."
_authenticated = False
_cache = {}
def __init__(self, *args, **kwargs):
raise NotImplementedError()
def authenticate(self, *args, **kwargs):
raise NotImplementedError()
@classmethod
def documentation(cls):
doc = inspect.getdoc(cls)
base = [doc or cls._NO_DOCSTRING, "Attributes:"]
name = lambda: "=== %s ===" % cls.__name__
return name() + "\n" + "\n\t".join(base
+ ["\t%s %s" % (name, doc or "")
for name, doc in cls.attributes_with_docstrings()]
)
@classmethod
def attributes(cls):
methods = dir(cls)
return sorted([x.replace('check_', '')
for x in methods if x.startswith('check_')
and (x.replace('check_', 'modify_') in methods)])
@classmethod
def attributes_with_docstrings(cls):
return [(x, inspect.getdoc(getattr(cls, "check_" + x))
or inspect.getdoc(getattr(cls, "modify_" + x)))
for x in cls.attributes()]
def __match(self, check, value, takes_arguments):
if takes_arguments:
r = check(value)
if r in [True, False]:
return r
else:
raise RuntimeError(
"Value returned from %s.%s not True or False" %
(check.im_class.__name__, check.__name__)
)
else:
return check() == value
def ensure(self, key, value):
name = self.__class__.__name__
log = logging.getLogger("service.%s" % name)
check = getattr(self, "check_" + key.lower(), None)
modify = getattr(self, "modify_" + key.lower(), None)
if check and modify:
log.info("Checking %s on %s...", key, name)
takes_arguments = len(inspect.getargspec(check).args[1:]) > 0
match = lambda: self.__match(check, value, takes_arguments)
if not match():
log.info("Did not find expected value '%s'.", value)
log.info("Updating %s on %s...", key, name)
modify(value)
if not match():
raise RuntimeError("Value of %s on %s has not changed "
"after modification. Please verify.",
key, name)
else:
log.info("Success! Updated %s on %s.", key, name)
return True
elif check and not modify:
log.warning("Missing modifier for %s on %s.", key, name)
elif modify and not check:
log.warning("Missing checker for %s on %s.", key, name)
else: # this property does not exist on this service
return None
def cached_property(fn):
"""
Decorator that turns the given method into a cached property.
To clear the cache, delete self._cache[fn].
The preferred way of clearing the cache is by using an
"@invalidates_cache" decorator on another method.
"""
def wrapped(self, *args, **kwargs):
if fn not in self._cache:
self._cache[fn] = fn(self, *args, **kwargs)
return self._cache[fn]
return property(wrapped)
def invalidates_cache(fn):
"""
Clears all cached properties after the decorated function is called.
Useful when changing external (third-party) state that requires
reverification. (e.g.: decorate a "modify_something" method with this.)
"""
def wrapped(self, *args, **kwargs):
r = fn(self, *args, **kwargs)
self._cache = {}
return r
return wrapped
def with_new_browser(fn):
"""
Forces a new browser object to be created before running the wrapped
function.
"""
def wrapped(self, *args, **kwargs):
self.browser = mechanize.Browser()
return fn(self, *args, **kwargs)
return wrapped
def requires_authentication(fn):
"""
Decorator that forces the "authenticate" method of a service
to have been called before the given method.
"""
def wrapped(self, *args, **kwargs):
if not self._authenticated:
self.browser = self.authenticate(mechanize.Browser())
self._authenticated = True
return fn(self, *args, **kwargs)
return wrapped
| 2.96875
| 3
|
ostap/fitting/fitresult.py
|
TatianaOvsiannikova/ostap
| 14
|
12784057
|
<reponame>TatianaOvsiannikova/ostap
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# =============================================================================
## @file ostap/fitting/fitresult.py
# Tiny decoration for ROOT.FitResult object
# @author <NAME> <EMAIL>
# @date 2011-06-07
# =============================================================================
"""Tiny decoration for ROOT.FitResult object
"""
# =============================================================================
__version__ = "$Revision$"
__author__ = "<NAME> <EMAIL>"
__date__ = "2011-06-07"
__all__ = () ## nothing to import
# =============================================================================
import ROOT
# =============================================================================
# logging
# =============================================================================
from ostap.logger.logger import getLogger
if '__main__' == __name__ : logger = getLogger( 'ostap.fitting.fitresult' )
else : logger = getLogger( __name__ )
# =============================================================================
logger.debug ( 'Tiny decoration for ROOT.FitResult object')
# =============================================================================
from ostap.math.ve import VE
from ostap.core.ostap_types import integer_types
# =============================================================================
## representation of TFitResult object
# @code
# fit_result = histo.Fit( func , 'S' , ... )
# print fit_result
# @endcode
# @author <NAME> <EMAIL>
# @date 2011-06-07
def _fit_repr_ ( self ) :
"""Representaion of TFitResult object
>>> fit_result = histo.Fit( func , 'S' , ... )
>>> print fit_result
"""
_r = ''
_r += "\n Status = %s " % self.Status ()
_r += "\n Chi2/nDoF = %s/%s " % ( self.Chi2 () , self.Ndf() )
_r += "\n Probability = %s " % self.Prob ()
_p = self.Parameters ()
_e = self.Errors ()
for i in range( 0 , len(_p) ) :
v = _p[i]
e = _e[i]
a = VE ( v ,e*e )
_r += " \n %s " % a
return _r
# ==============================================================================
## print <code>TFitResult</code> as a table
def _fit_table_ ( rfit , title = '' , prefix = '' ) :
"""Print <code>TFitResult</code> as a table
"""
from ostap.fitting.utils import fit_status, cov_qual
from ostap.logger.colorized import attention, allright
from ostap.logger.utils import pretty_float, pretty_ve, pretty_2ve, fmt_pretty_ve
header = ( '', 'Unit' , 'Value' )
rows = []
## 0. minimized type
row = "Minimizer Type" , '' , rfit.MinimizerType()
rows.append ( row )
## 0. minimized type
v = rfit.IsValid()
if v : row = "Valid" , '' , 'True'
else : row = "Valid" , '' , attention ( 'False')
rows.append ( row )
## 1. fit status
status = rfit.Status()
if status :
row = attention ( 'Status' ) , '' , attention ( fit_status ( status ) )
rows.append ( row )
else :
row = 'Status' , '' , allright ( fit_status ( status ) )
rows.append ( row )
## 4. covariance status
cq = rfit.CovMatrixStatus()
cn = ''
if -1 == cq : cn = cov_qual ( cq )
elif 3 == cq : cn = allright ( cov_qual ( cq ) )
elif cq in ( 0 , 1 , 2 ) : cn = attention ( cov_qual ( cq ) )
else : cn = cov_qual ( cq )
rows.append ( ( 'Covariance matrix quality' , '' , ' ' + cn ) )
## 3-6. chi2,nDoF,chi2/nDoF,minFCN
chi2 = rfit.Chi2 ()
s , n = pretty_float ( chi2 )
if n : n = '[10^%+d]' % n
else : n = ''
rows.append ( ( "Chi2" , n , ' ' + s ) )
##
ndf = rfit.Ndf()
rows.append ( ( "nDoF" , '' , ' ' + '%d' % ndf ) )
##
c2ndf = rfit.Chi2 () / ndf
s , n = pretty_float ( c2ndf )
if n : n = '[10^%+d]' % n
else : n = ''
rows.append ( ( "Chi2/nDoF" , n , ' ' + s ) )
##
minfcn = rfit.MinFcnValue()
s , n = pretty_float ( minfcn )
if n : n = '[10^%+d]' % n
else : n = ''
rows.append ( ( "Minimal FCN" , n , ' ' + s ) )
## 7.Probability in %[%]
prob = rfit.Prob() / 100
rows.append ( ( "Probability" , '[%]' , ' %5.3e' % prob ) )
## 8. distrance to minimum
edm = rfit.Edm()
s , n = pretty_float ( edm )
if n : n = '[10^%+d]' % n
else : n = ''
rows.append ( ( "Estimated distance to minimum" , n , ' ' + s ) )
ncalls = rfit.NCalls()
rows.append ( ( "FCN calls" , '' , ' ' + '%d' % ncalls ) )
##
has_minos = False
for i in rfit :
if not rfit.HasMinosError( i ) : continue
has_minos = True
break
if has_minos :
rows = [ row + ('','','') for row in rows ]
header = header + ( 'neg-minos' , 'pos-minos' , 'Global corr.' )
else :
rows = [ row + ('',) for row in rows ]
header = header + ( 'Global corr.' , )
for i in rfit :
pname = rfit.GetParameterName ( i )
value = rfit.Value ( i )
fixed = rfit.IsParameterFixed ( i )
fmte = ''
if fixed :
v = value
s , n = pretty_float ( v )
s = s + '(fixed)'
nv = n
else :
error = rfit.Error ( i )
v = VE ( value , error * error )
##
fmt , fmtv , fmte , n = fmt_pretty_ve ( v )
s = fmt % ( value / 10**n , error / 10**n )
nv = n
if n : n = '[10^%+d]' % n
else : n = ''
pname = "%-2d: %s"% ( i , pname )
row = pname , n , ' ' + s
if not fixed and rfit.HasMinosError( i ) :
if fmte :
error_low = fmte % ( rfit.LowerError ( i ) / 10**nv )
error_up = fmte % ( rfit.UpperError ( i ) / 10**nv )
else :
error_low = "%+8g" % ( rfit.LowerError ( i ) / 10**nv )
error_up = "%+8g" % ( rfit.UpperError ( i ) / 10**nv )
else :
error_low = ''
error_up = ''
if has_minos :
row = row + ( error_low , error_up )
gcc = rfit.GlobalCC ( i ) * 100
gcc = '%+5.1f%%' % gcc
row = row + ( gcc, )
rows.append ( row )
if not title : title = rfit.GetTitle()
import ostap.logger.table as T
rows = [ header ] + rows
return T.table ( rows , title = title , prefix = prefix )
# =============================================================================
## get number of parameters
# @code
# fit_result = histo.Fit( func , 'S' , ... )
# print len(fit_result)
# @endcode
def _fit_len_ ( r ) :
"""Get number of parameters
>>> fit_result = hiisto.Fit( func , 'S' , ... )
>>> print len(fit_result)
"""
return len ( r.Parameters() )
# =============================================================================
## iterator over fit-result object
# @code
# fit_result = hiisto.Fit( func , 'S' , ... )
# for i in fit_results : print i,fit_results[i]
# @endcode
# @author <NAME> <EMAIL>
# @date 2011-06-07
def _fit_iter_ ( r ) :
"""Iterator over fit-result object
>>> fit_result = hiisto.Fit( func , 'S' , ... )
>>> for i in fit_results : print i,fit_results[i]
"""
l = len( r )
i = 0
while i < l :
yield i
i += 1
# =============================================================================
## get parameter number
# @code
# r = h1.Fit( ... )
# name = r.GetParNumber ( 'mass' )
# @endcode
def _fit_parnum_ ( self , par ) :
"""Get parameter number:
>>> r = h1.Fit( ... )
>>> name = r.GetParNumber ( 'mass' )
"""
if isinstance ( par , integer_types ) :
if 0<= par< len ( self ) : return int( par ) ## RETURN
else : return -1 ## RETURN
#
if isinstance ( par , str ) :
ll = len ( self )
for i in range ( 0 , ll ) :
if self.ParName(i) == par : return i ## RETURN
## nothing is found
return -1 ## RETURN
# =============================================================================
## check parameter
# @code
# r = h1.Fit(....) ##
# if i in r : ... ## check parameter by index
# if 'm' in r : ... ## check parameter by name
# @endcode
# @author <NAME> <EMAIL>
# @date 2015-07-12
def _fit_contains_ ( self , par ) :
"""Check parameter
>>> r = h1.Fit(....) ##
>>> if i in r : ... ## check parameter by index
>>> if 'm' in r : ... ## check parameter by name
"""
return 0 <= _fit_parnum_ ( self , par )
# =============================================================================
## getitem for fit-result-object
# @code
# r = h1.Fit(....) ##
# print r[0] ## print 0th parameter
# @endcode
# @author <NAME> <EMAIL>
# @date 2011-06-07
def _fit_getitem_ ( self , par ) :
"""Getitem for fit-result-object
>>> r = h1.Fit(....) ##
>>> print r[0] ## print 0th parameter
"""
## convert parameter into integer
ipar = _fit_parnum_ ( self , par )
if not 0<= ipar : raise IndexError("TFitResult:illegal index %s" % par)
#
_p = self.Parameter ( ipar )
_e = self.Error ( ipar )
#
return VE( _p , _e * _e )
# =============================================================================
## Get correlation coefficient for parameters 'i' and 'j'
# @code
# r = ...
# print r.cor(1,2)
# @endcode
def _fit_cor_ ( self , i , j ) :
"""Get correlation coefficient for parameters 'i' and 'j'
>>> r = ...
>>> print r.cor(1,2)
"""
ipar = _fit_parnum_ ( self , i )
jpar = _fit_parnum_ ( self , j )
#
if 0 > ipar : raise IndexError( "TFitResult:invalid index %s" % i )
if 0 > jpar : raise IndexError( "TFitResult:invalid index %s" % j )
#
_cij = self.CovMatrix ( ipar , jpar )
_ei = self.Errors ( ipar )
_ej = self.Errors ( jpar )
##
if 0 == _ei or 0 == _ej : return 0 ## RETURN
#
return _cij / ( _ei * _ej )
# =============================================================================
## Get correlation matrix
# @code
# r = ...
# print r.corMatrix()
# @endcode
def _fit_corm_ ( self , root = False ) :
"""Get correlation matrix
>>> r = ...
>>> print r.corMtrx ()
"""
_l = len (self)
matrix = None
import ostap.math.linalg
try :
matrix = Ostap.Math.SymMatrix(_l)
except :
pass
## fill matrix
for i in range (0,_l):
for j in range (i, _l):
_cij = self.CovMatrix( i , j )
_eij = self.Error( i ) * self.Error( j )
if 0 != _eij : _vij = _cij / _eij
else : _vij = 0
matrix [ i , j ] = _vij
matrix [ j , i ] = _vij
return matrix
for klass in ( ROOT.TFitResult ,
ROOT.TFitResultPtr ) :
klass.__contains__ = _fit_contains_
klass.__repr__ = _fit_repr_
klass.__str__ = _fit_repr_
klass.__iter__ = _fit_iter_
klass.__getitem__ = _fit_getitem_
klass.__call__ = _fit_getitem_
klass.__len__ = _fit_len_
klass.cor = _fit_cor_
klass.corMtrx = _fit_corm_
klass.GetParNumber = _fit_parnum_
klass.parnum = _fit_parnum_
klass.table = _fit_table_
# =============================================================================
_decorated_classes_ = (
ROOT.TFitResult ,
ROOT.TFitResultPtr ,
)
_new_methods_ = (
##
ROOT.TFitResult .__contains__ ,
ROOT.TFitResult .__repr__ ,
ROOT.TFitResult .__str__ ,
ROOT.TFitResult .__iter__ ,
ROOT.TFitResult .__getitem__ ,
ROOT.TFitResult .__call__ ,
ROOT.TFitResult .__len__ ,
ROOT.TFitResult .cor ,
ROOT.TFitResult .corMtrx ,
ROOT.TFitResult .GetParNumber ,
ROOT.TFitResult .parnum ,
ROOT.TFitResult .table ,
##
ROOT.TFitResultPtr.__contains__ ,
ROOT.TFitResultPtr.__repr__ ,
ROOT.TFitResultPtr.__str__ ,
ROOT.TFitResultPtr.__iter__ ,
ROOT.TFitResultPtr.__getitem__ ,
ROOT.TFitResultPtr.__call__ ,
ROOT.TFitResultPtr.__len__ ,
ROOT.TFitResultPtr.cor ,
ROOT.TFitResultPtr.corMtrx ,
ROOT.TFitResultPtr.GetParNumber ,
ROOT.TFitResultPtr.parnum ,
ROOT.TFitResultPtr.table ,
)
# =============================================================================
if '__main__' == __name__ :
from ostap.utils.docme import docme
docme ( __name__ , logger = logger )
# =============================================================================
## The END
# =============================================================================
| 1.703125
| 2
|
positionsize.py
|
fearlessspider/IKH-Quant-Connect
| 3
|
12784058
|
<reponame>fearlessspider/IKH-Quant-Connect
from decimal import Decimal
class PositionSize:
symbol = "USDJPY"
algorithm = None
bonus = 0
def __init__(self, algorithm, symbol, tenkan, kijun, senkou, period=15):
self.symbol = symbol
self.algorithm = algorithm
# Get POINT value for current asset
def get_point_value(self):
# lotSize = 1000
conversionRate = self.algorithm.Portfolio.Securities[self.symbol].QuoteCurrency.ConversionRate
# conversionRate = 1.31819(GBPUSD)
pointValue = self.get_lot_size() * conversionRate
# pointValue = 1318.19USD
return pointValue
# Get LOT size
def get_lot_size(self):
# E.g.: symbol = "EURGBP";
lotSize = self.algorithm.Portfolio.Securities[self.symbol].SymbolProperties.LotSize
return lotSize
def checkSLPosition(self, sl, type, data):
self.algorithm.Log("Check {0}".format(sl))
_bonus = self.bonus
_volume = Decimal(0.01) * self.get_wins_lose_multiplier()
position = self.get_point_value() * Decimal(sl)
lot = (self.algorithm.Portfolio.Cash * (_volume + Decimal(_bonus))) * self.get_lot_size() / position
orderQua = abs(self.algorithm.CalculateOrderQuantity(self.symbol, 38 * type))
#buiyng = self.algorithm.Portfolio.GetBuyingPower(self.symbol, OrderDirectiondirection = OrderDirection.Up)
if lot > orderQua:
return int(orderQua)
return int(lot)
# Get wins and loses multiplier from closed positions
def get_wins_lose_multiplier(self):
multiplier = 1
positionClosed = []
for position in self.positions:
if position.leave > 0:
positionClosed.append(position)
if len(positionClosed) > 0:
if positionClosed[-1].Result >= 0:
multiplier += 1
if len(positionClosed) > 1:
if positionClosed[-2].Result >= 0:
multiplier += 1
if len(positionClosed) > 2:
if positionClosed[-3].Result >= 0:
multiplier += 1
if positionClosed[-1].Result < 0:
multiplier -= 0.1
if len(positionClosed) > 1:
if positionClosed[-2].Result < 0:
multiplier -= 0.1
if len(positionClosed) > 2:
if positionClosed[-3].Result < 0:
multiplier -= 0.1
return multiplier
| 2.5
| 2
|
alpha.py
|
ssadjina/FatTailedTools
| 0
|
12784059
|
# A collection of various tools to help estimate and analyze the tail exponent.
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from FatTailedTools.plotting import plot_survival_function
from FatTailedTools.survival import get_survival_function
def fit_alpha_linear(series, tail_start_mad=2.5, plot=True, return_loc=False):
'''
Estimates the tail parameter by fitting a linear function to the log-log tail of the survival function.
'tail_start_mad' defines where the tail starts in terms of the mean absolute deviation (typically between 2-4 MADs).
The estimated location of the Pareto (with the estimated tail exponent) will also re returned if 'return_loc' is True.
'''
# Get survival function values
if plot:
survival, ax = plot_survival_function(series, tail_zoom=False)
else:
survival = get_survival_function(series)
# Estimate tail start (= everything beyond 'tail_start_mad' mean absolute deviations)
tail_start = get_tail_start(series, tail_start_mad)
# Get tail
survival_tail = np.log10(survival.loc[survival['Values'] >= tail_start].iloc[:-1])
# Fit the tail
tail_fit = np.polyfit(survival_tail['Values'], survival_tail['P'], 1)
lin_func = np.poly1d(tail_fit)
# Get tail parameter and location/scale
tail = -tail_fit[0]
location = (1 - tail_fit[1]) / tail_fit[0]
# Get MSE (mean squared error)
mse_error = np.mean(np.square(np.subtract(lin_func(survival_tail['Values']), survival_tail['Values'])))
# Plot the fit
if plot:
ax.plot(10**survival_tail['Values'], 10**lin_func(survival_tail['Values']), 'r');
ax.legend(['Fit (MSE = {:.2f})'.format(mse_error), 'Data']);
plt.title('Tail exponent fitted to tail (alpha = {:.2f}, loc = {:.2f})'.format(tail, location));
# Construct result
result = tail, location if return_loc else tail
return result
def get_tail_start(series, tail_start_mad):
'''
Returns the start of the tail of 'series' based on 'tail_start_mad'.
'tail_start_mad' defines where the tail starts in terms of the mean absolute deviation (typically between 2-4 MADs).
'''
return tail_start_mad * series.abs().mad()
from scipy.stats import t
def fit_alpha(series, plot=True):
'''
Estimates the tail parameter by fitting a Studend-T to the data.
If the passed data is from a one-sided distribution, it will first be mirrored at 0 to make it symmetrical.
'''
# Is the data only one-sided?
if (series.dropna() < 0).sum() * (series.dropna() > 0).sum() == 0:
# ... then construct a two-sided distribution
series = pd.concat([-series.dropna().abs(), series.dropna().abs()])
# Fit the distribution
params = t.fit(series.dropna())
if plot:
_, ax = plot_survival_function(series, distribution=(t, params));
plt.title('Tail exponent estimated from fitting (alpha = {:.2f})'.format(params[0]));
return params[0]
import seaborn as sns
def fit_alpha_subsampling(series, frac=0.7, n_subsets=100, n_tail_start_samples=1, plot=True, return_loc=False):
'''
Estimates the tail parameter by fitting a linear function to the log-log tail of the survival function.
Uses 'n_subsets' subsamples to average results over subsets with a fraction 'frac' of samples kept.
If return_loc is True, also returns where the tail of the distribution is assumed to start (using random subsampling with 'n_tail_start_samples' samples per subset).
'''
# Set up lists
_results_both = []
_results_left = []
_results_right = []
# Subsample and fit
for subsample in [series.sample(frac=frac) for i in range(n_subsets)]:
for tail_start_mad in np.random.normal(2.5, 0.5, n_tail_start_samples):
_results_both.append(subsample.abs().agg(fit_alpha_linear, tail_start_mad=tail_start_mad, plot=False, return_loc=True))
_results_left.append(subsample.where(subsample < 0).abs().agg(fit_alpha_linear, tail_start_mad=tail_start_mad, plot=False, return_loc=True))
_results_right.append(subsample.where(subsample >= 0).abs().agg(fit_alpha_linear, tail_start_mad=tail_start_mad, plot=False, return_loc=True))
# Assemble into DataFrame
alphas = pd.DataFrame.from_records(np.hstack([_results_both, _results_left, _results_right]), columns=pd.MultiIndex.from_product([['Both', 'Left', 'Right'], ['Tail Exponent', 'Location']]))
# Plot
if plot:
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
fig.suptitle('Tail exponents for {} with random subsamples'.format(series.name))
for idx, name in enumerate(['Both', 'Left', 'Right']):
sns.histplot(data=alphas[(name, 'Tail Exponent')], color=['C7', 'C3', 'C0'][idx], stat='probability', bins=10, ax=ax[idx]);
ax[idx].set_title('Median = {:.1f} | Mean = {:.1f} ({})'.format(alphas[(name, 'Tail Exponent')].median(), alphas[(name, 'Tail Exponent')].mean(), ['both', 'left', 'right'][idx]));
ax[idx].set_xlabel('Tail exponent ({})'.format(['both', 'left', 'right'][idx]));
plt.show();
# Also plot locations if return_loc
if return_loc:
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
fig.suptitle('Locations for {} with random subsamples'.format(series.name))
for idx, name in enumerate(['Both', 'Left', 'Right']):
sns.histplot(data=alphas[(name, 'Location')], color=['C7', 'C3', 'C0'][idx], stat='probability', bins=10, ax=ax[idx]);
ax[idx].set_title('Median = {:.1f} | Mean = {:.1f} ({})'.format(alphas[(name, 'Location')].median(), alphas[(name, 'Location')].mean(), ['both', 'left', 'right'][idx]));
ax[idx].set_xlabel('Location ({})'.format(['both', 'left', 'right'][idx]));
plt.show();
# Construct result
result = alphas if return_loc else alphas.loc[:, (slice(None), 'Tail Exponent')]
return result
| 3.25
| 3
|
foreman_compute_resource.py
|
Nosmoht/ansible-module-foreman
| 53
|
12784060
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Ansible module to manage Foreman compute resource resources.
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: foreman_compute_resource
short_description: Manage Foreman Compute resources using Foreman API v2
description:
- Create and delete Foreman Compute Resources using Foreman API v2
options:
name:
description: Compute Resource name
required: true
datacenter: Name of Datacenter (only for Vmware)
required: false
default: None
description: Description of compute resource
required: false
default: None
locations: List of locations the compute resource should be assigned to
required: false
default: None
organizations: List of organizations the compute resource should be assigned to
required: false
default: None
password:
description: Password for Ovirt, EC2, Vmware, Openstack. Secret key for EC2
required: false
default: None
provider:
description: Providers name (e.g. Ovirt, EC2, Vmware, Openstack, EC2, Google)
required: false
default: None
server:
description: Hostname of Vmware vSphere system
required: false
default: None
state:
description: Compute Resource state
required: false
default: present
choices: ["present", "absent"]
tenant:
description: Tenant name for Openstack
required: false
default: None
domain:
description: Domain name for Openstack (required for Keystone v3 API)
required: false
default: Default
url:
description: URL for Libvirt, Ovirt, and Openstack
required: false
default: None
user:
description: Username for Ovirt, EC2, Vmware, Openstack. Access Key for EC2.
required: false
default: None
foreman_host:
description: Hostname or IP address of Foreman system
required: false
default: 127.0.0.1
foreman_port:
description: Port of Foreman API
required: false
default: 443
foreman_user:
description: Username to be used to authenticate on Foreman
required: true
foreman_pass:
description: Password to be used to authenticate user on Foreman
required: true
foreman_ssl:
description: Enable SSL when connecting to Foreman API
required: false
default: true
notes:
- Requires the python-foreman package to be installed. See https://github.com/Nosmoht/python-foreman.
version_added: "2.0"
author: "<NAME> (@nosmoht)"
'''
EXAMPLES = '''
- name: Ensure Vmware compute resource
foreman_compute_resource:
name: VMware
datacenter: dc01
locations:
- Nevada
organizations:
- DevOps
provider: VMware
server: vsphere.example.com
url: vsphere.example.com
user: domain\admin
password: <PASSWORD>
state: present
foreman_user: admin
foreman_pass: secret
- name: Ensure Openstack compute resource
foreman_compute_resource:
name: Openstack
provider: OpenStack
tenant: ExampleTenant
url: https://compute01.example.com:5000/v2.0/tokens
user: admin
'''
try:
from foreman.foreman import *
except ImportError:
foremanclient_found = False
else:
foremanclient_found = True
def get_provider_params(provider):
provider_name = provider.lower()
if provider_name == 'docker':
return ['password', 'url', 'user']
elif provider_name == 'ec2':
return ['access_key', 'password', 'region', 'url', 'user']
elif provider_name == 'google':
return ['email', 'key_path', 'project', 'url', 'zone']
elif provider_name == 'libvirt':
return ['display_type', 'url']
elif provider_name == 'ovirt':
return ['url', 'user', 'password']
elif provider_name == 'openstack':
return ['url', 'user', 'password', 'tenant', 'domain']
elif provider_name == 'vmware':
return ['datacenter', 'user', 'password', 'server']
else:
return []
def get_organization_ids(module, theforeman, organizations):
result = []
for i in range(0, len(organizations)):
try:
organization = theforeman.search_organization(data={'name': organizations[i]})
if not organization:
module.fail_json('Could not find Organization {0}'.format(organizations[i]))
result.append(organization.get('id'))
except ForemanError as e:
module.fail_json('Could not get Organizations: {0}'.format(e.message))
return result
def get_location_ids(module, theforeman, locations):
result = []
for i in range(0, len(locations)):
try:
location = theforeman.search_location(data={'name':locations[i]})
if not location:
module.fail_json('Could not find Location {0}'.format(locations[i]))
result.append(location.get('id'))
except ForemanError as e:
module.fail_json('Could not get Locations: {0}'.format(e.message))
return result
def ensure(module):
name = module.params['name']
state = module.params['state']
provider = module.params['provider']
description = module.params['description']
locations = module.params['locations']
organizations = module.params['organizations']
foreman_host = module.params['foreman_host']
foreman_port = module.params['foreman_port']
foreman_user = module.params['foreman_user']
foreman_pass = module.params['foreman_pass']
foreman_ssl = module.params['foreman_ssl']
theforeman = Foreman(hostname=foreman_host,
port=foreman_port,
username=foreman_user,
password=<PASSWORD>,
ssl=foreman_ssl)
data = dict(name=name)
try:
compute_resource = theforeman.search_compute_resource(data=data)
except ForemanError as e:
module.fail_json(msg='Could not get compute resource: {0}'.format(e.message))
if organizations:
data['organization_ids'] = get_organization_ids(module, theforeman, organizations)
if locations:
data['location_ids'] = get_location_ids(module, theforeman, locations)
if state == 'absent':
if compute_resource:
try:
compute_resource = theforeman.delete_compute_resource(id=compute_resource.get('id'))
except ForemanError as e:
module.fail_json(msg='Could not delete compute resource: {0}'.format(e.message))
return True, compute_resource
data['provider'] = provider
params = get_provider_params(provider=provider) + ['description']
for key in params:
data[key] = module.params[key]
if state == 'present':
if not compute_resource:
try:
compute_resource = theforeman.create_compute_resource(data=data)
except ForemanError as e:
module.fail_json(msg='Could not create compute resource: {0}'.format(e.message))
return True, compute_resource
# Foreman's API doesn't return the password we can't tell if we need to
# change it
data['password'] = None
if not all(data.get(key, None) == compute_resource.get(key, None) for key in params):
try:
compute_resource = theforeman.update_compute_resource(id=compute_resource.get('id'), data=data)
except ForemanError as e:
module.fail_json(msg='Could not update compute resource: {0}'.format(e.message))
return True, compute_resource
return False, compute_resource
def main():
module = AnsibleModule(
argument_spec=dict(
name=dict(type='str', required=True),
access_key=dict(type='str', requireD=False),
datacenter=dict(type='str', required=False),
description=dict(type='str', required=False),
display_type=dict(type='str', required=False),
email=dict(type='str', required=False),
key_path=dict(type='str', required=False),
password=dict(type='str', required=False, no_log=True),
provider=dict(type='str', required=False),
region=dict(type='str', required=False),
server=dict(type='str', required=False),
url=dict(type='str', required=False),
user=dict(type='str', required=False),
state=dict(type='str', default='present', choices=['present', 'absent']),
tenat=dict(type='str', required=False),
domain=dict(type='str', required=False),
locations=dict(type='list', required=False),
organizations=dict(type='list', required=False),
foreman_host=dict(type='str', default='127.0.0.1'),
foreman_port=dict(type='str', default='443'),
foreman_user=dict(type='str', required=True),
foreman_pass=dict(type='str', required=True, no_log=True),
foreman_ssl=dict(type='bool', default=True)
),
)
if not foremanclient_found:
module.fail_json(msg='python-foreman module is required. See https://github.com/Nosmoht/python-foreman.')
changed, compute_resource = ensure(module)
module.exit_json(changed=changed, compute_resource=compute_resource)
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
| 1.53125
| 2
|
tests/isct/test_container.py
|
UvaCsl/desist
| 0
|
12784061
|
<filename>tests/isct/test_container.py
import pathlib
import pytest
from desist.isct.container import create_container
from desist.isct.docker import Docker
from desist.isct.singularity import Singularity
from .test_runner import DummyRunner
from desist.isct.utilities import OS
@pytest.mark.parametrize("platform", [OS.MACOS, OS.LINUX])
@pytest.mark.parametrize("path, container_type",
[(None, Docker), ("./containers", Singularity)])
def test_new_container(mocker, platform, path, container_type):
mocker.patch('desist.isct.utilities.OS.from_platform',
return_value=platform)
container = create_container(".", path)
assert isinstance(container, container_type)
@pytest.mark.parametrize("platform, permission", [(OS.MACOS, ''),
(OS.LINUX, 'sudo')])
def test_new_container_permission(mocker, platform, permission):
mocker.patch('desist.isct.utilities.OS.from_platform',
return_value=platform)
# Docker
container = create_container(".")
assert container.sudo == permission
# Singularity
container = create_container(".", container_path='./container')
assert container.sudo == 'sudo -E'
@pytest.mark.parametrize("host, local", [
("/home/folder", "/patient"),
])
@pytest.mark.parametrize("container", [Docker, Singularity])
def test_bind_volume(container, host, local):
runner = DummyRunner()
if container == Docker:
c = container(".", runner=runner)
flag = '-v'
else:
c = container(".", "./container", runner=runner)
flag = '-B'
assert c.volumes == ''
c.bind(host, local)
# add host/local pair
host = pathlib.Path(host).absolute()
local = pathlib.Path(local)
assert c.bind_volumes[0] == (host, local)
# ensure `:` formatting
assert f'{host}:{local}' in c.volumes
# run the command; which is traced in the `DummyRunner`s output
c.run()
# ensure correct `bind_flag` is present
assert flag in runner
| 1.96875
| 2
|
pysprint/core/bases/_apply.py
|
kingablgh/PySprint
| 13
|
12784062
|
import types
import warnings
from collections.abc import Iterable
from inspect import getfullargspec
import numpy as np
class _DatasetApply:
"""
Helper class to apply function to
`pysprint.core.bases.dataset.Dataset` objects.
"""
def __init__(
self,
obj,
func,
axis=None,
args=None,
kwargs=None
):
self.obj = obj
self.args = args or ()
self.kwargs = kwargs or {}
self.f = func
self.axis = axis
if self.axis == "x" or self.axis == 0:
self.target = "x"
elif self.axis == "y" or self.axis == 1:
self.target = "y"
else:
raise ValueError("Axis must be 'x', 'y', '0' or '1'.")
self.shape = len(getattr(self.obj, self.target))
def perform(self):
"""
Apply the specified function.
"""
if isinstance(self.f, str):
func = getattr(self.obj, self.f)
sig = getfullargspec(func)
if "axis" in sig.args:
self.kwargs["axis"] = self.axis
# Let's assume we don't mess up the shape internally
func(*self.args, **self.kwargs)
return self.obj # we need to return this because of `inplacify` deco.
elif isinstance(self.f, np.ufunc):
target = getattr(self.obj, self.target)
retval = self.f(target, *self.args, **self.kwargs)
value = self._validate(retval)
setattr(self.obj, self.target, value)
if self.target == "y":
setattr(self.obj, "y_norm", value)
return value
elif isinstance(self.f, types.FunctionType):
sig = getfullargspec(self.f)
if "axis" in sig.args:
self.kwargs["axis"] = self.axis
# we can safely vectorize it here
self.f = np.vectorize(self.f)
target = getattr(self.obj, self.target)
retval = self.f(target, *self.args, **self.kwargs)
value = self._validate(retval)
setattr(self.obj, self.target, value)
if self.target == "y":
setattr(self.obj, "y_norm", value)
return value
def _validate(self, val):
if isinstance(val, (Iterable, np.ndarray)):
val = np.asarray(val, dtype=np.float64)
if val.ndim != 1:
val = np.concatenate(val).ravel()
warnings.warn("Function return value was flattened.")
if len(val) != len(np.unique(val)):
if len(np.unique(val)) == self.shape:
return val
else:
if self.target == "x":
raise ValueError(
f"Function returned duplicated values which is not allowed when"
" modifying the x axis. After filtering to unique values "
f"a {len(np.unique(val))}-length array was produced, "
f"but {self.shape} was expected."
)
return val
if len(val) != self.shape:
retval = self._broadcast(val)
return retval
return val
else:
raise TypeError("Function should return a number or Iterable type.")
def _broadcast(self, val):
if len(val) > self.shape:
return val[:self.shape]
elif len(val) < self.shape:
if not self.shape % len(val) == 0:
raise ValueError("Cannot broadcast safely to the desired shape.")
else:
return np.repeat(val, (self.shape % len(val)))
| 2.53125
| 3
|
calculations/statraw/stats2csv.py
|
paqs2020/paqs2020
| 1
|
12784063
|
<gh_stars>1-10
import csv
import pickle
finalstat = pickle.load(open("finalstats.pkl","rb"))
with open('finalstat.csv','a') as f:
w = csv.writer(f)
w.writerow(['statistics']+list(finalstat['accurate'].keys()))
for quality in finalstat:
w.writerow([quality]+list(finalstat[quality].values()))
stats = pickle.load(open("stats.pkl","rb"))
for quality in stats:
with open(quality+'.csv','a') as s:
w = csv.writer(s)
w.writerow(['subject','fid','response'])
for subject in stats[quality]:
del stats[quality][subject]['neutral']
for fid in stats[quality][subject]:
for resp in stats[quality][subject][fid]:
w.writerow([subject,fid,resp])
rq2 = pickle.load(open("RQ2mean.pkl","rb"))
with open('RQ2mean.csv','a') as r:
w = csv.writer(r)
w.writerow(['questions']+list(rq2['accurate'].keys()))
for quality in rq2:
w.writerow([quality]+list(rq2[quality].values()))
rq2raw = pickle.load(open("RQ2stats.pkl","rb"))
for quality in rq2raw:
with open(quality+'RQ2.csv','a') as rw:
w = csv.writer(rw)
w.writerow(['subject','fid','Qtype','response'])
for subject in rq2raw[quality]:
for fid in rq2raw[quality][subject]:
for query in rq2raw[quality][subject][fid]:
if len(rq2raw[quality][subject][fid][query])>0:
for response in rq2raw[quality][subject][fid][query]:
w.writerow([subject,fid,query.replace('Query',''),response])
| 2.765625
| 3
|
chill/examples/chill/testcases/jacobi_box_1_32.py
|
CompOpt4Apps/Artifact-DataDepSimplify
| 5
|
12784064
|
from chill import *
source('jacobi_box_1_32.c')
destination('jacobi_box_1_32modified.c')
procedure('smooth_box_1_32')
loop(0)
original()
print_code()
stencil_temp(3)
print_code()
print_space()
| 1.679688
| 2
|
Exercicios/PythonExercicios/ex041 - 050/ex047.py
|
sggrilo/Curso-em-Video-Python
| 0
|
12784065
|
# CONTAGEM DE PARES — Crie um programa que mostre na tela todos os números pares entre 1 e 50.
from time import sleep
print('NÚMEROS PARES ENTRE 2 E 50\n')
sleep(1)
for i in range(2, 51, 2):
print(i)
| 3.5
| 4
|
tests/conftest.py
|
workforce-data-initiative/etp-warehouse
| 0
|
12784066
|
import os
import pytest
from sqlalchemy.inspection import inspect
from sqlalchemy_utils.functions import drop_database
from alembic import config
from dbutils import conn_uri_factory, DbConnection
def pytest_addoption(parser):
"""
Custom command line options required for test runs
"""
parser.addoption('--name', action='store', default=None,
help='Schema [transactional | warehouse] to operate on')
parser.addoption('--adapter', action='store', default='sqlite',
help='SQLAlchemy database connection adapter')
parser.addoption('--conf', action='store',
default=os.path.join(os.path.dirname(__file__),
'../alembic.ini'),
help="Alembic config INI path")
parser.addoption('--dbconf', action='store',
default=os.path.join(os.path.dirname(__file__),
'../conf/db.yml'),
help='Database connection parameters YAML path')
@pytest.fixture(scope='session')
def schema_name(request):
return request.config.getoption('--name')
@pytest.fixture(scope='session')
def alembic_cfg(request):
return request.config.getoption('--conf')
@pytest.fixture(scope='session')
def db_conf(request):
return request.config.getoption('--dbconf')
@pytest.fixture(scope='session')
def db_adapter(request):
return request.config.getoption('--adapter')
@pytest.fixture(scope='session')
def alchemy_url(db_conf, db_adapter, schema_name, request):
return conn_uri_factory(db_conf, db_adapter, schema_name)
@pytest.fixture(scope='session', autouse=True)
def db_setup(alembic_cfg, schema_name, db_conf, db_adapter, alchemy_url, request):
# run all db migrations
config.main(['-c', alembic_cfg, '-n', schema_name,
'-x', 'dbconf={0}'.format(db_conf),
'-x', 'adapter={0}'.format(db_adapter),
'upgrade', 'head'])
def db_drop():
# db teardown - drop all
config.main(['-c', alembic_cfg, '-n', schema_name,
'-x', 'dbconf={0}'.format(db_conf),
'-x', 'adapter={0}'.format(db_adapter),
'downgrade', 'base'])
# drop db incl. alembic tables
drop_database(alchemy_url)
request.addfinalizer(db_drop)
@pytest.fixture(scope='session')
def db_conn(alchemy_url):
return DbConnection(alchemy_url)
@pytest.fixture(scope='session')
def db_inspector(db_conn):
return inspect(db_conn.engine)
@pytest.fixture(scope='session')
def alembic_tables():
"""
Tables created by Alembic to track migrations.
Fixture is maintained to differentiate alembic tables
from application tables when `inspector.get_table_names()`
for tests
"""
tbl_list = ['alembic_version']
return tbl_list
@pytest.fixture(scope='session')
def db_tables(schema_name):
"""
Manifest of all application tables expected
in database when all migrations are run
"""
if schema_name.lower() == 'transactional':
tbl_list = ['participant', 'program',
'participant_program', 'program_provider',
'provider', 'outcome',
'exit_type', 'wage', 'entity_type']
else:
tbl_list = []
# sort table list - easier test comparison
tbl_list.sort()
return tbl_list
| 2.203125
| 2
|
uploader.py
|
fuzzycode/DawnDriver
| 1
|
12784067
|
<gh_stars>1-10
# Copyright (c) 2014, Dawn Robotics Ltd
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# 3. Neither the name of the Dawn Robotics Ltd nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import shutil
import filecmp
import subprocess
import logging
from distutils.spawn import find_executable
#---------------------------------------------------------------------------------------------------
class BoardInfo:
#-----------------------------------------------------------------------------------------------
def __init__( self, name, processor, uploadSpeed ):
self.name = name
self.processor = processor
self.uploadSpeed = uploadSpeed
#---------------------------------------------------------------------------------------------------
DEFAULT_SERIAL_PORT_NAME = "/dev/ttyUSB0"
DEFAULT_BOARD_MODEL = "uno"
BUILD_OUTPUT_FILENAME = "/tmp/ino_build_output.txt"
INO_BUILDER = "ino"
BIN_UPLOADER = "avrdude"
BOARD_INFO_DICT = {
"uno" : BoardInfo( "uno", "atmega328p", 115200 ),
"atmega8" : BoardInfo( "atmega8", "atmega8", 19200 )
}
#---------------------------------------------------------------------------------------------------
def getInoUploaderUserDir():
homeDir = os.environ[ "HOME" ]
return homeDir + "/.ino_uploader"
#---------------------------------------------------------------------------------------------------
def getInoBuilder():
return find_executable(INO_BUILDER)
#---------------------------------------------------------------------------------------------------
def getBinUploader():
return find_executable(BIN_UPLOADER)
#---------------------------------------------------------------------------------------------------
def upload( sketchDir, serialPortName=DEFAULT_SERIAL_PORT_NAME,
boardModel=DEFAULT_BOARD_MODEL, forceRebuild=False ):
boardInfo = BOARD_INFO_DICT[ boardModel ]
uploadSucceeded = False
inoUploaderUserDir = getInoUploaderUserDir()
sketchDirBasename = os.path.basename( sketchDir )
if len( sketchDirBasename ) == 0:
raise Exception( "Invalid sketch directory - " + sketchDir )
inoUploaderSketchDir = inoUploaderUserDir + "/" + sketchDirBasename
inoUploaderSrcDir = inoUploaderSketchDir + "/src"
sketchFiles = os.listdir( sketchDir )
# Check to see if we need to copy files over
fileCopyNeeded = False
if forceRebuild:
if os.path.exists( inoUploaderSketchDir ):
shutil.rmtree( inoUploaderSketchDir )
fileCopyNeeded = True
else:
if not os.path.exists( inoUploaderSrcDir ):
fileCopyNeeded = True
else:
match, mismatch, errors = filecmp.cmpfiles( sketchDir, inoUploaderSrcDir, sketchFiles )
if len( mismatch ) > 0 or len( errors ) > 0:
fileCopyNeeded = True
# Copy files over if needed
if fileCopyNeeded:
logging.info( "Copying sketch src files" )
if os.path.exists( inoUploaderSrcDir ):
shutil.rmtree( inoUploaderSrcDir )
shutil.copytree( sketchDir, inoUploaderSrcDir )
else:
logging.info( "No file copy needed" )
# Now try to build the sketch
logging.debug( "Building sketch in dir " + inoUploaderSketchDir )
outputFile = open( BUILD_OUTPUT_FILENAME, "w" )
buildResult = subprocess.call(
[ getInoBuilder(), "build", "-m", boardModel ], cwd=inoUploaderSketchDir,
stdout=outputFile, stderr=outputFile )
outputFile.close()
# Upload if the build was successful
if buildResult == 0:
hexFilename = inoUploaderSketchDir + "/.build/{0}/firmware.hex".format( boardModel )
logging.debug( "Trying to upload " + hexFilename )
uploadResult = subprocess.call( [ getBinUploader(),
"-p", boardInfo.processor,
"-P", serialPortName, "-c", "arduino", "-b", str( boardInfo.uploadSpeed ),
"-D", "-U", "flash:w:{0}:i".format( hexFilename ) ] )
logging.debug( "uploadResult = " + str( uploadResult ) )
if uploadResult == 0:
uploadSucceeded = True
else:
logging.warning( "Building of sketch was unsuccessful" )
return uploadSucceeded
| 0.878906
| 1
|
res/py/3/Context/http/scrape/_DEFAULT.py
|
ytyaru0/Python.TemplateFileMaker.20180314204216
| 0
|
12784068
|
import urllib.request
from bs4 import BeautifulSoup
url = 'https://www.google.co.jp'
with urllib.request.urlopen(url) as res:
soup = BeautifulSoup(res.read(), 'html.parser')
print(soup.select('title')[0].text) # `<title>*</title>`
| 3.484375
| 3
|
restapi/apis.py
|
reindexer/django-restapi
| 2
|
12784069
|
all_apis = []
| 1.078125
| 1
|
env_settings/migrations/0001_initial.py
|
jjorissen52/django-env-settings
| 2
|
12784070
|
<filename>env_settings/migrations/0001_initial.py
from django.conf import settings
from django.contrib.auth import get_user_model
from django.db import migrations, models
INITIAL_GROUPS = []
def add_initial_auth(apps, schema_editor):
Group = apps.get_model('auth', 'Group')
User = get_user_model()
initial_groups = INITIAL_GROUPS
existing_groups = list(Group.objects.filter(name__in=initial_groups).values_list('name', flat=True))
Group.objects.bulk_create(
[Group(name=group_name) for group_name in initial_groups if group_name not in existing_groups]
)
_ = User.objects.create_superuser(username=settings.INITIAL_ADMIN_USERNAME,
password=<PASSWORD>,
email=settings.INITIAL_ADMIN_EMAIL)
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [migrations.RunPython(add_initial_auth)]
| 2.0625
| 2
|
wiki/forms.py
|
Jeromeschmidt/makewiki_v2
| 0
|
12784071
|
<filename>wiki/forms.py
from django.forms import ModelForm
from wiki.models import Page
class PageForm(ModelForm):
""" Render and process a form based on the Page model. """
model = Page
| 2.390625
| 2
|
parallel/worker.py
|
proteneer/timemachine
| 91
|
12784072
|
<filename>parallel/worker.py
import argparse
import numpy as np
from datetime import datetime
import os
import logging
import pickle
from concurrent import futures
from parallel.grpc import service_pb2, service_pb2_grpc
from parallel.utils import get_worker_status
from parallel.constants import DEFAULT_GRPC_OPTIONS
import grpc
class Worker(service_pb2_grpc.WorkerServicer):
def Submit(self, request, context):
start = datetime.now()
task_fn, args, kwargs = pickle.loads(request.binary)
task_name = task_fn.__name__
print(f"Started {task_name} at {start}", flush=True)
result = task_fn(*args, **kwargs)
finish_time = datetime.now()
total_seconds = (finish_time - start).seconds
print(f"Running {task_name} took {total_seconds}", flush=True)
return service_pb2.PickleData(binary=pickle.dumps(result))
def Status(self, request, context):
return get_worker_status()
def serve(args):
server = grpc.server(futures.ThreadPoolExecutor(max_workers=1), options=DEFAULT_GRPC_OPTIONS)
service_pb2_grpc.add_WorkerServicer_to_server(Worker(), server)
server.add_insecure_port("[::]:" + str(args.port))
server.start()
server.wait_for_termination()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Worker Server")
parser.add_argument("--gpu_idx", type=int, required=True, help="Location of all output files")
parser.add_argument(
"--port", type=int, required=True, help="Either single or double precision. Double is 8x slower."
)
args = parser.parse_args()
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_idx)
logging.basicConfig()
serve(args)
| 2.71875
| 3
|
common/test/test_signing_serializer.py
|
andkononykhin/plenum
| 148
|
12784073
|
<gh_stars>100-1000
from collections import OrderedDict
import pytest
from common.serializers.serialization import serialize_msg_for_signing
def test_serialize_int():
assert b"1" == serialize_msg_for_signing(1)
def test_serialize_str():
assert b"aaa" == serialize_msg_for_signing("aaa")
def test_serialize_none():
assert b"" == serialize_msg_for_signing(None)
def test_serialize_simple_dict():
assert b"1:a|2:b" == serialize_msg_for_signing({1: 'a', 2: 'b'})
assert b"1:a|2:b" == serialize_msg_for_signing({"2": 'b', "1": 'a'})
def test_serialize_array():
assert b"1,5,3,4,2" == serialize_msg_for_signing([1, 5, 3, 4, 2])
def test_serialize_ordered_dict():
v1 = OrderedDict([('1', 'a'), ('2', 'b')])
v2 = OrderedDict([('2', 'b'), ('1', 'a')])
assert b"1:a|2:b" == serialize_msg_for_signing(v1)
assert b"1:a|2:b" == serialize_msg_for_signing(v2)
def test_serialize_dict_with_array():
assert b"1:a|2:b|3:1,2:k" == serialize_msg_for_signing({1: 'a', 2: 'b', 3: [1, {2: 'k'}]})
assert b"1:a|2:b|3:1,2:k" == serialize_msg_for_signing({'1': 'a', '2': 'b', '3': ['1', {'2': 'k'}]})
@pytest.mark.skip("An issue in Signing Serializer: https://jira.hyperledger.org/browse/INDY-1469")
def test_serialize_dicts_with_different_keys():
v1 = serialize_msg_for_signing(
{
1: 'a',
2: {
3: 'b',
4: {
5: {
6: 'c'
}
}
}
})
v2 = serialize_msg_for_signing(
{
1: 'a',
2: {
3: 'b',
},
4: {
5: {
6: 'c'
}
}
})
assert v1 == v2
@pytest.mark.skip("An issue in Signing Serializer: https://jira.hyperledger.org/browse/INDY-1469")
def test_serialize_complex_dict():
assert b'1:a|2:3:b|2:4:5:6:c' == serialize_msg_for_signing(
{
1: 'a',
2: {
3: 'b',
4: {
5: {
6: 'c'
}
}
}
})
assert b'1:a|2:3:b|2:4:5:6:c' == serialize_msg_for_signing(
{
'1': 'a',
'2': {
'3': 'b',
'4': {
'5': {
'6': 'c'
}
}
}
})
v = serialize_msg_for_signing(
{
'1': 'a',
'2': 'b',
'3': {
'4': 'c',
'5': 'd',
'6': {
'7': {
'8': 'e',
'9': 'f'
},
'10': {
'11': 'g',
'12': 'h'
}
},
'13': {
'13': {
'13': 'i',
}
}
}
})
assert b'1:a|2:b|3:4:c|3:5:d|3:6:7:8:e|3:6:7:9:f|3:6:10:11:g|3:6:10:12:h|3:13:13:13:i' == v
@pytest.mark.skip("An issue in Signing Serializer: https://jira.hyperledger.org/browse/INDY-1469")
def test_serialize_complex_ordered_dict():
assert b'1:a|2:3:b|4:c' == serialize_msg_for_signing(
OrderedDict([
('1', 'a'),
('2', OrderedDict([
('3', 'b'),
('4', 'c'),
]))
]))
assert b'1:a|2:3:b|4:c' == serialize_msg_for_signing(
OrderedDict([
('2', OrderedDict([
('4', 'c'),
('3', 'b'),
])),
('1', 'a'),
]))
| 2.53125
| 3
|
src/asphalt/redis/component.py
|
asphalt-framework/asphalt-redis
| 5
|
12784074
|
<filename>src/asphalt/redis/component.py
from __future__ import annotations
import logging
from typeguard import check_argument_types
from asphalt.core import Component, Context, context_teardown
from redis.asyncio import Redis
logger = logging.getLogger(__name__)
class RedisComponent(Component):
"""
Provides a :class:`redis.asyncio.Redis` client as a resource.
:param resource_name: name of the client resource to be added
:param validate_connection: if true, check that the server can be connected to
before adding the client resource
:param kwargs: keyword arguments passed to :class:`redis.asyncio.Redis`
"""
def __init__(
self,
*,
resource_name: str = "default",
validate_connection: bool = True,
**kwargs,
):
check_argument_types()
self.resource_name = resource_name
self.validate_connection = validate_connection
self.client = Redis(**kwargs)
@context_teardown
async def start(self, ctx: Context) -> None:
async with self.client:
if self.validate_connection:
await self.client.ping()
ctx.add_resource(self.client, self.resource_name)
connection_kwargs = self.client.connection_pool.connection_kwargs
if "path" in connection_kwargs:
params = f"path={connection_kwargs['path']!r}"
else:
params = (
f"host={connection_kwargs.get('host', 'localhost')!r}, "
f"port={connection_kwargs.get('port', 6379)}"
)
logger.info(
"Configured Redis client (%s; %s)",
self.resource_name,
params,
)
yield
logger.info("Redis client (%s) shut down", self.resource_name)
| 2.34375
| 2
|
models/HarmonicNet.py
|
stegmaierj/HarmonicNet
| 1
|
12784075
|
# -*- coding: utf-8 -*-
"""
# HarmonicNet.
# Copyright (C) 2021 <NAME>, <NAME>, S.Koppers, <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the Liceense at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Please refer to the documentation for more information about the software
# as well as for installation instructions.
#
# If you use this application for your work, please cite the following
# publication:
#
# <NAME>, <NAME>, S.Koppers, <NAME>,
# "Spherical Harmonics for Shape-Constrained 3D Cell Segmentation", ISBI, 2021.
#
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torchvision
import pytorch_lightning as pl
import json
from argparse import ArgumentParser, Namespace
from collections import OrderedDict
from torch.utils.data import DataLoader
from dataloader.harmonic_dataloader import MeristemH5Dataset
from ThirdParty.radam import RAdam
class HarmonicNet_module(nn.Module):
"""Implementation of the 3D U-Net architecture.
"""
def __init__(self, in_channels, coefficients, feat_channels=16, norm_method='instance', **kwargs):
super(HarmonicNet_module, self).__init__()
self.in_channels = in_channels
self.coefficients = coefficients
self.feat_channels = feat_channels
self.norm_method = norm_method # instance | batch | none
if self.norm_method == 'instance':
self.norm = nn.InstanceNorm3d
elif self.norm_method == 'batch':
self.norm = nn.BatchNorm3d
elif self.norm_method == 'none':
self.norm = nn.Identity
else:
raise ValueError('Unknown normalization method "{0}". Choose from "instance|batch|none".'.format(self.norm_method))
# Define layer instances
self.conv_in = nn.Sequential(
nn.Conv3d(in_channels, feat_channels, kernel_size=3, padding=1),
nn.PReLU(feat_channels),
nn.Conv3d(feat_channels, feat_channels, kernel_size=3, padding=1),
nn.PReLU(feat_channels),
self.norm(feat_channels)
)
self.conv_pre = nn.Sequential(
nn.Conv3d(feat_channels, feat_channels, kernel_size=3, padding=1),
nn.PReLU(feat_channels),
nn.Conv3d(feat_channels, feat_channels, kernel_size=3, padding=1),
nn.PReLU(feat_channels),
self.norm(feat_channels)
)
self.down1 = nn.Sequential(
nn.Conv3d(feat_channels, feat_channels*4, kernel_size=4, padding=1, stride=2),
nn.PReLU(feat_channels*4),
self.norm(feat_channels*4)
)
self.down1_conv = nn.Sequential(
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
self.norm(feat_channels*4)
)
self.down2 = nn.Sequential(
nn.Conv3d(feat_channels*4, feat_channels*8, kernel_size=4, padding=1, stride=2),
nn.PReLU(feat_channels*8),
self.norm(feat_channels*8)
)
self.down2_conv = nn.Sequential(
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
self.norm(feat_channels*8)
)
self.down3 = nn.Sequential(
nn.Conv3d(feat_channels*8, feat_channels*16, kernel_size=4, padding=1, stride=2),
nn.PReLU(feat_channels*16),
self.norm(feat_channels*16)
)
self.down3_conv = nn.Sequential(
nn.Conv3d(feat_channels*16, feat_channels*16, kernel_size=3, padding=1),
nn.PReLU(feat_channels*16),
nn.Conv3d(feat_channels*16, feat_channels*16, kernel_size=3, padding=1),
nn.PReLU(feat_channels*16),
self.norm(feat_channels*16)
)
self.up1 = nn.Sequential(
nn.ConvTranspose3d(feat_channels*16, feat_channels*8, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.PReLU(feat_channels*8),
self.norm(feat_channels*8)
)
self.up1_conv = nn.Sequential(
nn.Conv3d(feat_channels*16, feat_channels*8, kernel_size=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
self.norm(feat_channels*8)
)
self.up2 = nn.Sequential(
nn.ConvTranspose3d(feat_channels*8, feat_channels*4, kernel_size=3, stride=2, padding=1, output_padding=1),
nn.PReLU(feat_channels*4),
self.norm(feat_channels*4)
)
self.up2_conv = nn.Sequential(
nn.Conv3d(feat_channels*8, feat_channels*4, kernel_size=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
self.norm(feat_channels*4)
)
self.det1 = nn.Sequential(
nn.Conv3d(feat_channels*16, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, 1, kernel_size=1),
nn.Sigmoid()
)
self.shape1 = nn.Sequential(
nn.Conv3d(feat_channels*16, feat_channels*16, kernel_size=3, padding=1),
nn.PReLU(feat_channels*16),
nn.Conv3d(feat_channels*16, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, coefficients, kernel_size=1)
)
self.det2 = nn.Sequential(
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, 1, kernel_size=1),
nn.Sigmoid()
)
self.shape2 = nn.Sequential(
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, coefficients, kernel_size=1)
)
self.det3 = nn.Sequential(
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, 1, kernel_size=1),
nn.Sigmoid()
)
self.shape3 = nn.Sequential(
nn.Conv3d(feat_channels*4, feat_channels*4, kernel_size=3, padding=1),
nn.PReLU(feat_channels*4),
nn.Conv3d(feat_channels*4, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, feat_channels*8, kernel_size=3, padding=1),
nn.PReLU(feat_channels*8),
nn.Conv3d(feat_channels*8, coefficients, kernel_size=1)
)
def forward(self, img):
conv_pre1 = self.conv_in(img)
conv_pre2 = self.conv_pre(conv_pre1)
conv_pre3 = self.conv_pre(conv_pre2+conv_pre1)
conv_pre4 = self.conv_pre(conv_pre3+conv_pre2)
conv_pre5 = self.conv_pre(conv_pre4+conv_pre3)
down1 = self.down1(conv_pre5)
down1_conv = self.down1_conv(down1)
down2 = self.down2(down1_conv+down1)
down2_conv = self.down2_conv(down2)
down3 = self.down3(down2_conv+down2)
down3_conv = self.down3_conv(down3)
up1 = self.up1(down3_conv+down3)
up1_conv = self.up1_conv(torch.cat((up1,down2_conv),1))
up2 = self.up2(up1_conv+up1)
up2_conv = self.up2_conv(torch.cat((up2,down1_conv),1))
det1 = self.det1(down3_conv)
shape1 = self.shape1(down3_conv)
out1 = torch.cat((det1,shape1),1)
det2 = self.det2(up1_conv)
shape2 = self.shape2(up1_conv)
out2 = torch.cat((det2,shape2),1)
det3 = self.det3(up2_conv)
shape3 = self.shape3(up2_conv)
out3 = torch.cat((det3,shape3),1)
return out1, out2, out3
class HarmonicNet(pl.LightningModule):
def __init__(self, hparams):
super(HarmonicNet, self).__init__()
if type(hparams) is dict:
hparams = Namespace(**hparams)
self.hparams = hparams
self.augmentation_dict = {}
# get the number of coefficients
self.num_coefficients = int((self.hparams.sh_order+1)**2)
# networks
self.network = HarmonicNet_module(in_channels=hparams.in_channels, coefficients=self.num_coefficients, feat_channels=hparams.feat_channels, norm_method=hparams.norm_method)
# cache for generated images
self.last_predictions = None
self.last_imgs = None
self.last_masks = None
def forward(self, z):
return self.network(z)
def load_pretrained(self, pretrained_file, strict=True, verbose=True):
# Load the state dict
state_dict = torch.load(pretrained_file)['state_dict']
# Make sure to have a weight dict
if not isinstance(state_dict, dict):
state_dict = dict(state_dict)
# Get parameter dict of current model
param_dict = dict(self.network.named_parameters())
layers = []
for layer in param_dict:
if strict and not 'network.'+layer in state_dict:
if verbose:
print('Could not find weights for layer "{0}"'.format(layer))
continue
try:
param_dict[layer].data.copy_(state_dict['network.'+layer].data)
layers.append(layer)
except (RuntimeError, KeyError) as e:
print('Error at layer {0}:\n{1}'.format(layer, e))
self.network.load_state_dict(param_dict)
if verbose:
print('Loaded weights for the following layers:\n{0}'.format(layers))
def loss_centroid(self, y_hat, y):
loss = F.l1_loss(y_hat, y, reduction='none')
weight = y*torch.clamp(torch.sum(y<0.5),1,y.numel()) + (1-y)*torch.clamp(torch.sum(y>0.5),1,y.numel())
weight = torch.div(weight, y.numel())
loss = torch.mul(loss, weight)
loss = torch.sum(loss)
loss = torch.div(loss, torch.clamp(torch.sum(weight), 1, y.numel()))
return loss
def loss_encoding(self, y_hat, y, mask):
loss = F.l1_loss(y_hat, y, reduction='none')
loss = torch.mul(loss, mask)
loss = torch.sum(loss)
loss = torch.div(loss, torch.clamp(torch.sum(mask)*self.num_coefficients, self.num_coefficients, y.numel()))
return loss
def training_step(self, batch, batch_idx):
# Get image ans mask of current batch
self.last_imgs = batch['image']
self.centroid_small = batch['stride{0}/centroid_map'.format(self.hparams.strides[2])]
self.encoding_small = batch['stride{0}/encoding_map'.format(self.hparams.strides[2])]
self.centroid_medium = batch['stride{0}/centroid_map'.format(self.hparams.strides[1])]
self.encoding_medium = batch['stride{0}/encoding_map'.format(self.hparams.strides[1])]
self.centroid_large = batch['stride{0}/centroid_map'.format(self.hparams.strides[0])]
self.encoding_large = batch['stride{0}/encoding_map'.format(self.hparams.strides[0])]
# generate images
self.pred_small, self.pred_medium, self.pred_large = self.forward(self.last_imgs)
# get the centroid losses
loss_centroid_small = self.loss_centroid(self.pred_small[:,0:1,...], self.centroid_small)
loss_centroid_medium = self.loss_centroid(self.pred_medium[:,0:1,...], self.centroid_medium)
loss_centroid_large = self.loss_centroid(self.pred_large[:,0:1,...], self.centroid_large)
loss_centroid = (1/6 * loss_centroid_small + 2/6 * loss_centroid_medium + 3/6 * loss_centroid_large)
# get the encoding losses
loss_encoding_small = self.loss_encoding(self.pred_small[:,1:,...], self.encoding_small, self.centroid_small)
loss_encoding_medium = self.loss_encoding(self.pred_medium[:,1:,...], self.encoding_medium, self.centroid_medium)
loss_encoding_large = self.loss_encoding(self.pred_large[:,1:,...], self.encoding_large, self.centroid_large)
loss_encoding = (1/6 * loss_encoding_small + 2/6 * loss_encoding_medium + 3/6 * loss_encoding_large)
loss = loss_centroid * self.hparams.centroid_weight + \
loss_encoding/self.num_coefficients * self.hparams.encoding_weight
tqdm_dict = {'centroid_loss': loss_centroid, 'encoding_loss':loss_encoding, 'epoch': self.current_epoch}
output = OrderedDict({
'loss': loss,
'progress_bar': tqdm_dict,
'log': tqdm_dict
})
if torch.isnan(loss):
print('Image minmax: {0}/{1}'.format(self.last_imgs.min(), self.last_imgs.max()))
print('Cent_small:{0}, Cent_medium:{1}, Cent_large:{2}, Enc_small:{3}, Enc_medium:{4}, Enc_large:{5}'.format(self.centroid_small.min(),\
self.centroid_medium.min(),\
self.centroid_large.min(),\
self.encoding_small.min(),\
self.encoding_medium.min(),\
self.encoding_large.min()))
print('Cent_small:{0}, Cent_medium:{1}, Cent_large:{2}, Enc_small:{3}, Enc_medium:{4}, Enc_large:{5}'.format(loss_centroid_small,\
loss_centroid_medium,\
loss_centroid_large,\
loss_encoding_small,\
loss_encoding_medium,\
loss_encoding_large))
return output
def test_step(self, batch, batch_idx):
test_imgs = batch['image']
centroid_small = batch['stride{0}/centroid_map'.format(self.hparams.strides[2])]
encoding_small = batch['stride{0}/encoding_map'.format(self.hparams.strides[2])]
centroid_medium = batch['stride{0}/centroid_map'.format(self.hparams.strides[1])]
encoding_medium = batch['stride{0}/encoding_map'.format(self.hparams.strides[1])]
centroid_large = batch['stride{0}/centroid_map'.format(self.hparams.strides[0])]
encoding_large = batch['stride{0}/encoding_map'.format(self.hparams.strides[0])]
pred_small, pred_medium, pred_large = self.forward(test_imgs)
loss_centroid_small = self.loss_centroid(pred_small[:,0:1,...], centroid_small)
loss_centroid_medium = self.loss_centroid(pred_medium[:,0:1,...], centroid_medium)
loss_centroid_large = self.loss_centroid(pred_large[:,0:1,...], centroid_large)
loss_centroid = (loss_centroid_small + loss_centroid_medium + loss_centroid_large) / 3
# get the encoding losses
loss_encoding_small = self.loss_encoding(pred_small[:,1:,...], encoding_small, centroid_small)
loss_encoding_medium = self.loss_encoding(pred_medium[:,1:,...], encoding_medium, centroid_medium)
loss_encoding_large = self.loss_encoding(pred_large[:,1:,...], encoding_large, centroid_large)
loss_encoding = (loss_encoding_small + loss_encoding_medium + loss_encoding_large) / 3
loss = self.hparams.centroid_weight * loss_centroid + \
self.hparams.encoding_weight * loss_encoding
return {'test_loss': loss}
def test_end(self, outputs):
avg_loss = torch.stack([x['test_loss'] for x in outputs]).mean()
tensorboard_logs = {'test_loss': avg_loss}
return {'avg_test_loss': avg_loss, 'log': tensorboard_logs}
def validation_step(self, batch, batch_idx):
val_imgs = batch['image']
centroid_small = batch['stride{0}/centroid_map'.format(self.hparams.strides[2])]
encoding_small = batch['stride{0}/encoding_map'.format(self.hparams.strides[2])]
centroid_medium = batch['stride{0}/centroid_map'.format(self.hparams.strides[1])]
encoding_medium = batch['stride{0}/encoding_map'.format(self.hparams.strides[1])]
centroid_large = batch['stride{0}/centroid_map'.format(self.hparams.strides[0])]
encoding_large = batch['stride{0}/encoding_map'.format(self.hparams.strides[0])]
pred_small, pred_medium, pred_large = self.forward(val_imgs)
loss_centroid_small = self.loss_centroid(pred_small[:,0:1,...], centroid_small)
loss_centroid_medium = self.loss_centroid(pred_medium[:,0:1,...], centroid_medium)
loss_centroid_large = self.loss_centroid(pred_large[:,0:1,...], centroid_large)
loss_centroid = (loss_centroid_small + loss_centroid_medium + loss_centroid_large) / 3
# get the encoding losses
loss_encoding_small = self.loss_encoding(pred_small[:,1:,...], encoding_small, centroid_small)
loss_encoding_medium = self.loss_encoding(pred_medium[:,1:,...], encoding_medium, centroid_medium)
loss_encoding_large = self.loss_encoding(pred_large[:,1:,...], encoding_large, centroid_large)
loss_encoding = (loss_encoding_small + loss_encoding_medium + loss_encoding_large) / 3
loss = self.hparams.centroid_weight * loss_centroid + \
self.hparams.encoding_weight * loss_encoding
return {'val_loss': loss}
def validation_end(self, outputs):
avg_loss = torch.stack([x['val_loss'] for x in outputs]).mean()
tensorboard_logs = {'val_loss': avg_loss}
return {'avg_val_loss': avg_loss, 'log': tensorboard_logs}
def configure_optimizers(self):
opt = RAdam(self.network.parameters(), lr=self.hparams.learning_rate)
return [opt], []
def train_dataloader(self):
if self.hparams.train_list is None:
return None
else:
dataset = MeristemH5Dataset(self.hparams.train_list, self.hparams.data_root, patch_size=self.hparams.patch_size,\
image_group=self.hparams.image_groups[0], strides=self.hparams.strides, norm_method=self.hparams.data_norm,\
sh_order=self.hparams.sh_order)
return DataLoader(dataset, batch_size=self.hparams.batch_size, shuffle=True, drop_last=True)
def test_dataloader(self):
if self.hparams.test_list is None:
return None
else:
dataset = MeristemH5Dataset(self.hparams.test_list, self.hparams.data_root, patch_size=self.hparams.patch_size,\
image_group=self.hparams.image_groups[0], strides=self.hparams.strides, norm_method=self.hparams.data_norm, \
sh_order=self.hparams.sh_order)
return DataLoader(dataset, batch_size=self.hparams.batch_size)
def val_dataloader(self):
if self.hparams.val_list is None:
return None
else:
dataset = MeristemH5Dataset(self.hparams.val_list, self.hparams.data_root, patch_size=self.hparams.patch_size,\
image_group=self.hparams.image_groups[0], strides=self.hparams.strides, norm_method=self.hparams.data_norm,\
sh_order=self.hparams.sh_order)
return DataLoader(dataset, batch_size=self.hparams.batch_size)
def on_epoch_end(self):
# log sampled images
z_slice_small = int(self.pred_small.shape[2]//2)
grid_small = torch.cat((self.pred_small[:,0:1,z_slice_small,:,:], self.centroid_small[:,0:1,z_slice_small,:,:]), 0)
prediction_grid = torchvision.utils.make_grid(grid_small)
self.logger.experiment.add_image('centroids_small', prediction_grid, self.current_epoch)
z_slice_medium = int(self.pred_medium.shape[2]//2)
grid_small = torch.cat((self.pred_medium[:,0:1,z_slice_medium,:,:], self.centroid_medium[:,0:1,z_slice_medium,:,:]), 0)
prediction_grid = torchvision.utils.make_grid(grid_small)
self.logger.experiment.add_image('centroids_medium', prediction_grid, self.current_epoch)
z_slice_large = int(self.pred_large.shape[2]//2)
grid_small = torch.cat((self.pred_large[:,0:1,z_slice_large,:,:], self.centroid_large[:,0:1,z_slice_large,:,:]), 0)
prediction_grid = torchvision.utils.make_grid(grid_small)
self.logger.experiment.add_image('centroids_large', prediction_grid, self.current_epoch)
z_slice_raw = int(self.last_imgs.shape[2]//2)
img_grid = torchvision.utils.make_grid(self.last_imgs[:,0,z_slice_raw,:,:])
self.logger.experiment.add_image('raw_images', img_grid, self.current_epoch)
def set_augmentations(self, augmentation_dict_file):
self.augmentation_dict = json.load(open(augmentation_dict_file))
@staticmethod
def add_model_specific_args(parent_parser):
"""
Parameters you define here will be available to your model through self.hparams
"""
parser = ArgumentParser(parents=[parent_parser])
# network params
parser.add_argument('--in_channels', default=1, type=int)
parser.add_argument('--feat_channels', default=16, type=int)
parser.add_argument('--patch_size', default=(64,128,128), type=int, nargs='+')
parser.add_argument('--norm_method', default='instance', type=str)
# data
parser.add_argument('--data_norm', default='percentile', type=str)
parser.add_argument('--data_root', default=r'D:\LfB\pytorchRepo\data\PNAS', type=str)
parser.add_argument('--train_list', default=r'D:\LfB\pytorchRepo\data\PNAS_harmonic_plant_split1_train.csv', type=str)
parser.add_argument('--test_list', default=r'D:\LfB\pytorchRepo\data\PNAS_harmonic_plant_split1_test.csv', type=str)
parser.add_argument('--val_list', default=r'D:\LfB\pytorchRepo\data\PNAS_harmonic_plant_split1_val.csv', type=str)
parser.add_argument('--image_groups', default=('data/image',), type=str, nargs='+')
parser.add_argument('--strides', default=(2,4,8), type=int, nargs='+')
parser.add_argument('--sh_order', default=5, type=int)
# training params (opt)
parser.add_argument('--batch_size', default=1, type=int)
parser.add_argument('--learning_rate', default=0.001, type=float)
parser.add_argument('--centroid_weight', default=0.50, type=float)
parser.add_argument('--encoding_weight', default=0.50, type=float)
return parser
| 1.6875
| 2
|
Eldorado_Actor-Critic/source/utils/rollouts.py
|
jbr-ai-labs/BAROCCO
| 3
|
12784076
|
<gh_stars>1-10
import numpy as np
from source.env.lib.log import Blob
def discountRewardsTD(rewards, vals, gamma=0.99, nSteps=10000):
N = len(rewards) - 1
rets = np.zeros(N)
rets[-1] = rewards[-1]
for idx in reversed(range(N - 1)):
nStepsCur = min(nSteps, N - 1 - idx)
ret = [rewards[idx + i + 1] * gamma ** i for i in range(nStepsCur)]
rets[idx] = sum(ret) + gamma ** nStepsCur * vals[idx + nStepsCur]
return list(rets)
class Rollout:
def __init__(self, config):
self.config = config
self.actions = []
self.policy = []
self.states = []
self.states_global = []
self.rewards = []
self.rets = []
self.vals = []
self.states = []
self.contacts = []
self.feather = Feather()
def step(self, action, policy, state=None, reward=None, contact=None, val=None):
self.actions.append(action)
self.policy.append(policy)
self.states.append(state)
self.rewards.append(reward)
self.vals.append(val)
self.contacts.append(contact)
def stepGlobal(self, state_global):
self.states_global.append(state_global)
def finish(self):
self.lifespan = len(self.rewards)
self.rets = discountRewardsTD(self.rewards, self.vals, self.config.GAMMA)
self.feather.finish()
# Rollout logger
class Feather:
def __init__(self):
self.blob = Blob()
def scrawl(self, ent, val, reward, attack, contact):
self.blob.annID = ent.annID
self.stats(val, reward, attack, contact)
def stats(self, value, reward, attack, contact):
self.blob.reward.append(reward)
self.blob.value.append(float(value))
self.blob.contact.append(float(contact))
if attack is not None:
self.blob.attack.append(float(attack))
def finish(self):
self.blob.finish()
| 2.15625
| 2
|
pyfont/api.py
|
BingerYang/pyfont
| 0
|
12784077
|
# -*- coding: utf-8 -*-
# @Time : 2019-11-08 10:08
# @Author : binger
from .draw_by_html import EffectFont
from .draw_by_html import FontDrawByHtml
from . import FontDraw, FontAttr
from PIL import Image, ImageDraw, ImageFont
class FontFactory(object):
def __init__(self, effect_font, render_by_mixed=False):
"""
:param render_by_mixed: False 只有浏览器生成,
:param fill_type:
"""
assert isinstance(effect_font, EffectFont)
self.render_by_mixed = render_by_mixed
self._effect_font = effect_font
self.font_draw = None
def is_effected(self):
return any([self._effect_font.gradient_enable, self._effect_font.shadow_enable, self._effect_font.stroke_enable,
self._effect_font.weight != 'normal'])
def get_line_size(self, size, text):
if not self.font_draw:
self.font_draw = FontDraw(self._effect_font.base)
return self.font_draw.get_line_size(text, size=size)
def get_max_canvas_size(self, text):
if not self.font_draw:
self.font_draw = FontDraw(self._effect_font.base)
return self.font_draw.max_canvas_size(text, is_truncated=True)
def get_size_at_limit_range(self, text, size, char_spacing_par=0.1):
if not self.font_draw:
self.font_draw = FontDraw(self._effect_font.base)
return self.font_draw.get_size_at_limit_range(text, size, char_spacing_par=char_spacing_par)
def get_text_size_at_width(self, size, text, width, spacing=4):
effect_font = ImageFont.truetype(font=self._effect_font.base.path, size=size)
len_text = len(text)
line = text
remain = ''
temp_width = 0
for n in range(len_text):
temp_width += effect_font.getsize(text[n])[0]
if temp_width > width:
line = text[:n - 1]
remain = text[n - 1:]
break
temp_width += spacing
return line, remain
def render_to_rng(self):
if self.render_by_mixed and not self.is_effected():
limit_text_cb = lambda *args, **kwargs: True
if not self.font_draw:
self.font_draw = FontDraw(self._effect_font.base)
resp = self.font_draw.write(self._effect_font.text, limit_text_cb=limit_text_cb)
img = resp.img
else:
img = FontDrawByHtml.render_to_image(self._effect_font)
return img
if __name__ == '__main__':
font = EffectFont()
path = '../simkai.ttf'
import os
print(os.path.abspath(path))
font.set_text_base(size=100, path=path)
font.base.clear_margin = True
# font.shadow((80, 31, 191, 0.3), sigma=8, x=0, y=6)
# font.gradient([("#da7eeb", 0), ("#9070d8", 0.5)], angle="center", type="radial")
# font.fill_color = "#da7eeb"
font.fill_color = (1, 1, 1, 255)
# font.stroke("#4e1269", 1)
font.text = '实例\n中国\n实例'
obj = FontFactory(font, render_by_mixed=True)
img = obj.render_to_rng()
print(img.size)
img.save("11.png")
| 2.609375
| 3
|
study_tool/query.py
|
cubeman99/russian-study-tool
| 0
|
12784078
|
<reponame>cubeman99/russian-study-tool<filename>study_tool/query.py
from study_tool.card import Card
class CardQuery:
"""
Queries a list of cards.
"""
def __init__(self, max_count=30, max_score=1.0,
max_proficiency=10, card_type=None):
"""
Defines a study query
"""
self.max_count = max_count
self.max_score = max_score
self.max_proficiency = max_proficiency
self.card_type = None
def matches(self, card: Card, study_data) -> bool:
if self.card_type is not None and card.get_word_type() != self.card_type:
return False
if self.max_score is not None and study_data.get_history_score() > self.max_score:
return False
if self.max_proficiency is not None and study_data.get_proficiency_level() > self.max_proficiency:
return False
return True
| 3.140625
| 3
|
thermosteam/utils/decorators/thermo_user.py
|
yoelcortes/thermotree
| 2
|
12784079
|
<filename>thermosteam/utils/decorators/thermo_user.py
# -*- coding: utf-8 -*-
# BioSTEAM: The Biorefinery Simulation and Techno-Economic Analysis Modules
# Copyright (C) 2020-2021, <NAME> <<EMAIL>>
#
# This module is under the UIUC open-source license. See
# github.com/BioSTEAMDevelopmentGroup/biosteam/blob/master/LICENSE.txt
# for license details.
"""
"""
import thermosteam as tmo
__all__ = ('thermo_user',)
def thermo_user(cls):
cls._load_thermo = _load_thermo
cls.thermo = thermo
cls.chemicals = chemicals
cls.mixture = mixture
return cls
def _load_thermo(self, thermo):
self._thermo = thermo = tmo.settings.get_default_thermo(thermo)
return thermo
@property
def thermo(self):
return self._thermo
@property
def chemicals(self):
return self._thermo.chemicals
@property
def mixture(self):
return self._thermo.mixture
| 1.929688
| 2
|
project/university/migrations/0009_rename_courseid_course_courseid.py
|
minaee/cd557
| 0
|
12784080
|
# Generated by Django 4.0.3 on 2022-03-12 20:16
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('university', '0008_rename_course_id_course_courseid_and_more'),
]
operations = [
migrations.RenameField(
model_name='course',
old_name='courseId',
new_name='courseid',
),
]
| 1.734375
| 2
|
personal_context_builder/test/test_API_models.py
|
InternetOfUs/personal-context-builder
| 0
|
12784081
|
""" Test for the API that handle models
Copyright (c) 2021 Idiap Research Institute, https://www.idiap.ch/
Written by <NAME> <<EMAIL>>,
"""
import unittest
from fastapi.testclient import TestClient # type: ignore
from personal_context_builder import config
from personal_context_builder.wenet_fastapi_app import app
class APIModelsTestCase(unittest.TestCase):
def setUp(self):
self.client = TestClient(app)
def test_simple_lda_exist(self):
response = self.client.get(config.PCB_VIRTUAL_HOST_LOCATION + "/models/")
self.assertIn("SimpleLDA", response.json())
def test_simple_bow_exist(self):
response = self.client.get(config.PCB_VIRTUAL_HOST_LOCATION + "/models/")
self.assertIn("SimpleBOW", response.json())
| 2.484375
| 2
|
fonction_py/tools.py
|
LaRiffle/axa_challenge
| 1
|
12784082
|
<filename>fonction_py/tools.py
from numpy import *
import math
import pandas
def poly_exp(X, degree):
N,D = X.shape
for d in range(2,degree+1):
X = column_stack([X,X[:,0:D]**d])
return X
def MSE(yt,yp):
print("NE PAS UTILISER MSE !! utiliser LinExp !!!")
def normalize(df):
return (df - df.mean()) / (df.max() - df.min())
def faireSplitting(x, y, taille): # return xTrain, xTest, yTrain, yTest
ln = (random.rand(x.shape[0]) < taille)
return x[ln], x[~ln], y[ln], y[~ln];
def check(yEmpirique, yTest): # A UTILISER AVEC LES DATA FRAME DE PANDAS
alpha=0.1
if(yTest.shape[0] != yEmpirique.shape[0]):
print("Erreur sur la taille de la prédiction")
return 0
print("accuracy en % :")
print(sum(yEmpirique==yTest)*100/yEmpirique.shape[0]) # pourcentage de bonne prediction
linex = 0
diff = (yTest-yEmpirique).values
for i in range(len(diff)):
linex = linex + math.exp(alpha * diff[i]) - alpha*diff[i]-1
print("linEx :")
print(linex/yTest.shape[0])
def LinExp(yEmpirique, yTest):#Retourne l'erreur moyenne #UTILISER AVEC DES VECTEURS : POUR CONVERTIR DATA FRAME TO VECTOR DataFrame.values
alpha = 0.1
<<<<<<< HEAD
=======
coeff=linspace(1,3,len(yEmpirique))
>>>>>>> origin/master
linex = 0
diff = (yTest - yEmpirique).values
for i in range(len(diff)):
linex = linex + coeff[i]*( math.exp(alpha * diff[i]) - alpha * diff[i] - 1)
return linex/yTest.shape[0]
def MatLinExp(yEmpirique, yTest): #retourne la matrice d'erreur#UTILISER AVEC DES VECTEURS : POUR CONVERTIR DATA FRAME TO VECTOR DataFrame.values
alpha = 0.1
linex = []
diff = (yTest - yEmpirique)
for i in range(len(diff)):
linex.append(math.exp(alpha * diff[i]) - alpha * diff[i] - 1)
return linex
def Accuracy(yEmpirique, yTest):
return sum(yEmpirique==yTest)*100/yEmpirique.shape[0]
| 2.84375
| 3
|
pyot/managedep/static/startproject/views.py
|
paaksing/Pyot
| 60
|
12784083
|
<reponame>paaksing/Pyot
from aiohttp import web
from pyot.models import lol
from pyot.core.exceptions import NotFound
from .wsgi import app
# Create your views here ...
@app.routes.get('/summoner_level/{name}/{platform}')
async def summoner_level(request: web.Request):
'''Get summoner level by name and platform'''
# Sample Function based View
# Pyot Model: lol
# Pyot Core Object: Summoner
try:
summoner = await lol.Summoner(name=request.match_info["name"], platform=request.match_info["platform"]).get()
return web.json_response(summoner.level)
except NotFound:
return web.json_response({"message": "Summoner Not Found"}, status=404)
@app.routes.view('/summoner_winrate/{name}/{platform}')
class SummonerWinrate(web.View):
'''Get summoner winrate by name and platform for this season'''
# Sample Class based View
# Pyot Model: lol
# Pyot Core Object: Summoner, SummonerLeague
def get_winrate(self, league: lol.SummonerLeague):
wr = []
for entry in league.entries:
wr.append(entry.wins / (entry.wins + entry.losses))
return sum(wr) / len(wr)
async def get(self):
summoner = await lol.Summoner(name=self.request.match_info["name"], platform=self.request.match_info["platform"]).get()
league = await summoner.league_entries.get()
return web.json_response(self.get_winrate(league) * 100)
| 2.46875
| 2
|
tests/test_guard_instance.py
|
joaoteixeira88/pyguard
| 2
|
12784084
|
import pytest
from exception.argument_not_instance_of_exception import ArgumentNotInstanceOfException
from guard import Guard
@pytest.mark.parametrize(
"param, typeof, param_name, message, expected",
[
(2, str, None, "parameter is not from type <class 'str'>.", pytest.raises(ArgumentNotInstanceOfException)),
([], dict, None, "parameter is not from type <class 'dict'>.", pytest.raises(ArgumentNotInstanceOfException)),
("test", bool, None, "parameter is not from type <class 'bool'>.", pytest.raises(ArgumentNotInstanceOfException))
]
)
def test_IsNotInstanceOfType_InvalidType_RaisedArgumentNotInstanceOfException(param, typeof, param_name,
message, expected):
with expected as err:
Guard.is_not_instance_of_type(param=param, typeof=typeof, param_name=param_name)
assert message in str(err.value)
| 2.765625
| 3
|
src/UnloadAutoPartitions/genunload.py
|
rmcelroyF8/amazon-redshift-utils
| 2,452
|
12784085
|
<reponame>rmcelroyF8/amazon-redshift-utils<gh_stars>1000+
"""
GenUnload : Generate unload commands given a config file (config.ini) which has information about table, schema, partition column & sort columns
-------------------------------------------------------------------------------------------------------------------------------------------
-- <NAME> 05/23/2019
example:
python3 genunload.py
Readme.md has the requirements info.
"""
from __future__ import print_function
import os
import sys
import datetime
from datetime import timedelta
import boto3
import base64
import json
import argparse
import configparser
import pgpasslib
import pg8000
import re
ssl = True
__version__ = "1.0"
pg8000.paramstyle = "qmark"
def connect(host, port, db, dbuser, table, schema, column_list, partition_column, sort_keys, s3path, iamrole):
# get password from .pgpass or environment
try:
pg_pwd = pgpasslib.getpass(host,port,db,dbuser)
print(pg_pwd)
if pg_pwd:
pwd = <PASSWORD>pwd
except pgpasslib.FileNotFound as e:
pass
# Connect to the cluster
try:
if debug:
print('Connecting to Redshift: %s' % host)
conn = pg8000.connect(database=db, user=dbuser, password=<PASSWORD>, host=host, port=port, ssl=ssl)
conn.autocommit = True
except:
print('Redshift Connection Failed: exception %s' % sys.exc_info()[1])
raise
if debug:
print('Successfully connected to Redshift cluster')
# create a new cursor
cursor = conn.cursor()
check_table_exists(cursor, conn, table, schema)
full_column_list, partition_keys, partition_column_type = get_column_list_partition_keys(cursor, conn, table, schema, column_list, partition_column)
gen_unload(full_column_list, partition_keys, partition_column_type, schema, table, partition_column, sort_keys, s3path, iamrole)
if execute:
print("Executing unload commands !")
execute_unload(cursor, conn)
conn.commit()
if debug:
print("Done with the script !!")
def check_table_exists(cursor, conn, table, schema):
# check if table exists
if debug:
print('Check for table exists: %s' % table)
stmt = "SELECT EXISTS (SELECT 1 FROM information_schema.tables WHERE table_schema = '%s' AND table_name = '%s');" % (schema, table)
cursor.execute(stmt)
s = cursor.fetchone()
s = ' '.join(map(str, s))
if s == 'False':
print('Table does not exist: %s' % table)
exit(1)
else:
print('Table %s exists' % table)
def get_column_list_partition_keys(cursor, conn, table, schema, column_list, partition_column):
# get partition column data type
stmt = "select data_type from information_schema.columns where table_name = '%s' and table_schema = '%s' and column_name = '%s';" % (
table, schema, partition_column)
if debug:
print('Collecting data type information for partition column: %s' % partition_column)
cursor.execute(stmt)
partition_column_type = cursor.fetchone()
if partition_column_type is None:
print('Please check your partition column: %s' % partition_column)
exit(1)
partition_column_type = ' '.join(map(str, partition_column_type))
if any(re.findall(r'integer|numeric|decimal|bigint|real|double precision|smallint', partition_column_type,
re.IGNORECASE)):
partition_column_type = 'numeric'
elif any(re.findall(r'timestamp without time zone|date|character varying|character|timestamp with time zone|bool|boolean', partition_column_type, re.IGNORECASE)):
partition_column_type = 'alphanumeric'
# if column_list not set , then select all columns except for partition column
if not column_list:
stmt = "select column_name from information_schema.columns where table_name = '%s' and table_schema = '%s' order by ordinal_position;" % (table, schema)
if debug:
print('Collecting column list excluding partition column: %s' % partition_column)
cursor.execute(stmt)
column_list = cursor.fetchall()
full_column_list = [x[0] for x in column_list]
full_column_list.remove(partition_column)
full_column_list = ','.join(map(str, full_column_list))
else:
full_column_list = column_list
# get distinct partition keys using partition column
stmt = "select distinct %s from %s.%s;" % (partition_column, schema, table)
if debug:
print('Collecting distinct partition keys for partition column: %s [skipping NULL values]' % partition_column)
cursor.execute(stmt)
keys = cursor.fetchall()
partition_keys = [x[0] for x in keys]
print('Column list = %s' % full_column_list)
# print('Partition keys = %s' % partition_keys)
return full_column_list, partition_keys, partition_column_type
def gen_unload(full_column_list, partition_keys, partition_column_type, schema, table, partition_column, sort_keys, s3path, iamrole):
"""
:param full_column_list: list
:param partition_keys: list
:param partition_column_type: str
:param schema: str
:param table: str
:param partition_column: str
:param sort_keys: str
:param s3path: str
:param iamrole: str
:return: str
"""
s3path = s3path[:-1] if s3path.endswith('/') else s3path
column_list_str = full_column_list
if sort_keys:
sql = 'SELECT ' + column_list_str + ' FROM ' + schema + '.' + table + ' WHERE ' + partition_column + '=' + '<>' + ' ORDER BY ' + sort_keys
else:
sql = 'SELECT ' + column_list_str + ' FROM ' + schema + '.' + table + ' WHERE ' + partition_column + '=' + '<>'
part1 = 'UNLOAD ( ' + '\'' + sql + '\''
part3 = 'IAM_ROLE \'' + iamrole + '\' FORMAT PARQUET ALLOWOVERWRITE;'
unload_stmt = str()
for key in partition_keys:
if key is not None:
if partition_column_type == 'numeric':
temp = part1.replace('<>', str(key))
unload_stmt = unload_stmt + temp + ') TO ' + '\'' + s3path + '/' + partition_column + '=' + str(key) + '/\' ' + part3 + '\n'
elif partition_column_type == 'alphanumeric':
temp = part1.replace('<>', '\\\'' + str(key) + '\\\'')
unload_stmt = unload_stmt + temp + ') TO ' + '\'' + s3path + '/' + partition_column + '=' + str(key) + '/\' ' + part3 + '\n'
if debug:
print('Generating unload statements !')
with open('unload.sql', 'w') as file:
file.write(unload_stmt)
def execute_unload(cursor, conn):
with open(os.path.dirname(__file__) + 'unload.sql', 'r') as sql:
unload_commands = sql.read()
for s in unload_commands.split(";"):
stmt = s.strip()
if s is not None and stmt != "":
if debug is True:
print(stmt)
try:
cursor.execute(stmt)
except Exception as e:
if re.search(".*column.*does not exist*", str(e)) is not None:
print('Check the column list !')
raise
else:
print(e)
raise e
conn.commit()
print('Done with executing unload commands !')
def main():
config = configparser.ConfigParser()
config.read('config.ini')
parser = argparse.ArgumentParser()
global debug
global execute
host = config.get('cluster', 'identifier', fallback=None)
dbuser = config.get('cluster', 'dbuser', fallback='dbadmin')
db = config.get('cluster', 'database', fallback='dev')
port = int(config.get('cluster', 'port', fallback='5439'))
schema = config.get('cluster', 'schema', fallback='public')
table = config.get('cluster', 'table', fallback=None)
partition_column = config.get('cluster', 'partition_key', fallback=None)
sort_keys = config.get('cluster', 'sort_keys', fallback=None)
column_list = config.get('cluster', 'column_list', fallback='All')
debug = config.getboolean('cluster', 'debug', fallback=False)
execute = config.getboolean('cluster', 'execute', fallback=False)
s3path = config.get('s3', 'path', fallback=None)
iamrole = config.get('creds', 'iam_role', fallback=None)
if host is None or dbuser is None or db is None or schema is None or table is None or partition_column is None or sort_keys is None or column_list is None or s3path is None or iamrole is None:
parser.print_help()
exit()
connect(host, port, db, dbuser, table, schema, column_list, partition_column, sort_keys, s3path, iamrole)
if __name__ == "__main__":
main()
| 2.578125
| 3
|
django_shop/apps/goods/admin.py
|
XZH950926/mydjango
| 0
|
12784086
|
<gh_stars>0
from django.contrib import admin
from .models import Goods,GoodsCategory,IndexBanner,goodsImage
# Register your models here.
import xadmin
class GoodsAdmin(object):
#显示的列
list_display = ["name", "click_num", "sold_num", "fav_num", "goods_num", "market_price",
"shop_price", "goods_brief", "goods_desc", "is_new", "is_hot", "add_time"]
# 可以搜索的字段
style_fields = {"goods_desc": "ueditor"}
search_fields = ['name', ]
#列表页可以直接编辑的
list_editable = ["is_hot",]
class BannerAdmin(object):
list_display=['goods','index','image']
class goodsImageAdmin(object):
list_display=['goods','image']
xadmin.site.register(Goods,GoodsAdmin)
xadmin.site.register(IndexBanner,BannerAdmin)
xadmin.site.register(goodsImage,goodsImageAdmin)
| 1.921875
| 2
|
src/materials/reeds_data.py
|
spasmann/1DQMC
| 0
|
12784087
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 28 13:22:41 2022
@author: sampasmann
"""
import numpy as np
from time import process_time
def reeds_data(Nx=1000, LB=-8.0, RB=8.0):
G = 1 # number of energy groups
sigt = np.empty((Nx,G))
sigs = np.empty((Nx,G,G))
source = np.empty((Nx,G))
dx = (RB-LB)/Nx
xspan = np.linspace(LB+dx/2, RB-dx/2, Nx)
count = 0
for x in xspan:
if (x < -6):
sigt[count,:] = 1.0
sigs[count,:,:] = 0.9
source[count,:] = 0.0
elif (-6 < x) and (x < -5):
sigt[count,:] = 1.0
sigs[count,:] = 0.9
source[count,:] = 1.0
elif (-5 < x < -3): #vacuum region 1
sigt[count,:] = 0.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (-3 < x < -2):
sigt[count,:] = 5.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (-2 < x < 2):
sigt[count,:] = 50.0
sigs[count,:,:] = 0.0
source[count,:] = 50.0
elif (2 < x < 3):
sigt[count,:] = 5.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (3 < x < 5): # vacuum region 2
sigt[count,:] = 0.0
sigs[count,:,:] = 0.0
source[count,:] = 0.0
elif (5 < x < 6):
sigt[count,:] = 1.0
sigs[count,:,:] = 0.9
source[count,:] = 1.0
elif (6 < x):
sigt[count,:] = 1.0
sigs[count,:,:] = 0.9
source[count,:] = 0.0
count += 1
siga = (sigt - sigs)
return sigt, sigs, siga, source, G
if __name__ == "__main__":
"""
start = process_time()
sigt, sigs, siga, source, G = reeds_data(1)
stop = process_time()
time1 = stop-start
print("Elapsed time with Compliation:", time1)
"""
start = process_time()
sigt, sigs, siga, source, G = reeds_data(100)
stop = process_time()
time2 = stop-start
#print("Elapsed time After Compliation:", time2)
# print("A {}x speed up".format(round(time1/time2)))
| 2.15625
| 2
|
Establishment of basic Database/a3-Q1.py
|
Xuechunqiu/Basic-SQLite
| 0
|
12784088
|
import sqlite3
import time
import hashlib
conn = None
c = None
def connect(path):
global conn, c
conn = sqlite3.connect(path)
c = conn.cursor()
c.execute(' PRAGMA foreign_keys=ON; ')
conn.commit()
return
def drop_tables():
global conn, c
c.execute("drop table if exists demeritNotices;")
c.execute("drop table if exists tickets;")
c.execute("drop table if exists registrations;")
c.execute("drop table if exists vehicles;")
c.execute("drop table if exists marriages;")
c.execute("drop table if exists births;")
c.execute("drop table if exists persons;")
c.execute("drop table if exists payments;")
c.execute("drop table if exists users;")
conn.commit()
def define_tables():
global conn, c
c.execute('''create table persons (
fname char(12),
lname char(12),
bdate date,
bplace char(20),
address char(30),
phone char(12),
primary key (fname, lname)
); ''')
c.execute('''create table births (
regno int,
fname char(12),
lname char(12),
regdate date,
regplace char(20),
genderegister char(1),
f_fname char(12),
f_lname char(12),
m_fname char(12),
m_lname char(12),
primary key (regno),
foreign key (fname,lname) references persons,
foreign key (f_fname,f_lname) references persons,
foreign key (m_fname,m_lname) references persons
);''')
c.execute('''create table marriages (
regno int,
regdate date,
regplace char(20),
p1_fname char(12),
p1_lname char(12),
p2_fname char(12),
p2_lname char(12),
primary key (regno),
foreign key (p1_fname,p1_lname) references persons,
foreign key (p2_fname,p2_lname) references persons
);''')
c.execute('''create table vehicles (
vin char(5),
make char(10),
model char(10),
year egister int,
color char(10),
primary key (vin)
);''')
c.execute('''create table registrations (
regno int,
regdate date,
expiry date,
plate char(7),
vin char(5),
fname char(12),
lname char(12),
primary key (regno),
foreign key (vin) references vehicles,
foreign key (fname,lname) references persons
);''')
c.execute('''create table tickets (
tno int,
regno int,
fine int,
violation text,
vdate date,
primary key (tno),
foreign key (regno) references registrations
);''')
c.execute('''create table demeritNotices (
ddate date,
fname char(12),
lname char(12),
points int,
desc text,
primary key (ddate,fname,lname),
foreign key (fname,lname) references persons
);''')
c.execute('''create table payments (
tno int,
pdate date,
amount int,
primary key (tno, pdate),
foreign key (tno) references tickets
);''')
c.execute('''create table users (
uid char(8),
pwd char(8),
utype char(1), -- 'a' for agents, 'o' for officers
fname char(12),
lname char(12),
city char(15),
primary key(uid),
foreign key (fname,lname) references persons
);''')
conn.commit()
def insert_data():
global conn, c
conn.commit()
def function_name1():
global conn, c
conn.commit()
def function_name2():
global conn, c
conn.commit()
def function_name3():
global conn, c
conn.commit()
def process_bill(provide_vin, provide_fname, provide_lname, provide_platenum):
global conn, c
conn.row_factory = sqlite3.Row
c.execute('''SELECT r.fname, r.lname
FROM vehicles v, registrations r
WHERE v.vin = r.vin AND r.vin = ? AND r.plate = ?
ORDER BY r.regdate DESC
limit 1;''', (provide_vin, provide_platenum))
name = c.fetchall()
if name == []:
print("Sorry, the name you provided is not in our database.")
elif (provide_fname == name[0][0]) and (provide_lname == name[0][1]):
#end current registration
c.execute('''UPDATE registrations
SET expiry = datetime('time')
WHERE fname = ? and lname = ?;''', (provide_fname, provide_lname))
#make new registration
new_regno = random.randint(0, 1000)
c.execute('''INSERT INTO registrations(regno, regdate, expiry, plate, vin, fname, lname)
VALUES (?, datetime('now'), datetime('now', '1 year'), ?, ?, ?, ?);''', (new_regno, provide_platenum, provide_vin, provide_fname, provide_lname))
conn.commit()
return
def process_payment(ticketnum, amount):
global conn, c
c.execute('''SELECT tno FROM tickets;''')
tno = c.fetchall()
for t in tno:
if t==ticketnum:
c.execute('''SELECT sum(amount)
FROM payments
WHERE tno = ?;''', ticketnum)
total = c.fetchall()
c.execute('''SELECT fine
FROM tickets
WHERE tno = ?;''', ticketnum)
fine = c.fetchall()
if fine > (total+amount): #sum of those payments cannot exceed the fine amount of ticket
c.execute('''INSERT INTO payments(tno, pdate, amount
VALUES (?, datetime('now'), ?);''', (ticketnum, amount))
conn.commit()
def get_driver_abstract(fname, lname):
global conn, c
c.execute('''SELECT count(t.tno)
FROM registrations r, tickets t
WHERE r.regno = t.regno
and r.fname = ? and r.lname = ?;''', (fname, lname))
num_tickets = c.fetchall()
print(num_tickets)
print("The number of ticket is %d" % num_tickets[0])
c.execute('''SELECT count(*)
FROM demeritNotices d
WHERE d.fname = ? and d.lname = ?;''', (fname, lname))
num_dnotices = c.fetchall()
print(num_dnotices)
print("The number of demerit notices is %d" % num_dnotices[0])
c.execute('''SELECT COALESCE(SUM(d.points),0)
FROM demeritNotices d
WHERE d.fname = ? and d.lname = ?
and d.ddate >= datetime('now', '-2 years');''', (fname, lname))
points_2years = c.fetchall()
print(points_2years)
print("The total number of demerit points received within 2 years is %d" % points_2years[0])
c.execute('''SELECT COALESCE(SUM(d.points),0)
FROM demeritNotices d
WHERE d.fname = ? and d.lname = ?;''', (fname, lname))
points_lifetime = c.fetchall()
print(points_lifetime)
print("The total number of demerit points received within life time is %d" % points_lifetime[0])
check = input("Would you like to see the tickets ordered form te latest to the oldest?(Y/N): ")
if check == 'Y' or check == 'y':
c.execute('''SELECT t.tno, t.vdate, t.violation, t.fine, t.regno, v.make, v.model
FROM tickets t, vehicles v, registrations r
WHERE t.regno = r.regno and r.vin = v.vin
and r.fname = ? and r.lname = ?
ORDER BY t.vdate DESC;''', (fname, lname))
tickets_infor = c.fetchall()
ticketsNum = 0
for t in tickets_infor:
ticketsNum += 1
if ticketsNum <= 5:
print(t)
else:
see_more = input("Would you like to see more tickets information?(Y/N): ")
if see_more == 'y' or see_more == 'Y':
print(t)
print(ticketsNum)
conn.commit()
def function_name7():
global conn, c
conn.commit()
def function_name8():
global conn, c
conn.commit()
def main():
global conn, c
#path = input("Enter the name of database: ")
path="./a3.db"
connect(path)
drop_tables()
define_tables()
insert_data()
#4
user_vin = input("Please provide the vin of a car: ")
currentowner_fname = input("Please provide the first name of the current owner: ")
currentowner_lname = input("Please provide the last name of the current owner: ")
plate_number = input("Please provide a plate number for the new registration: ")
process_bill(user_vin, currentowner_fname, currentowner_lname, plate_number)
#5
ticket_number = input("Please input a valid ticket number: ")
amount = ("Please input the amount you would like to pay:")
process_payment(ticket_number, amount)
#6
driver_fname = input("Please enter the first name of the driver: ")
driver_lname = input("Please enter the last name of the driver: ")
get_driver_abstract(driver_fname, driver_lname)
conn.commit()
conn.close()
return
if __name__ == "__main__":
main()
| 3.1875
| 3
|
pajbot/modules/basic/permaban.py
|
sadlyfell/bullbot
| 0
|
12784089
|
<reponame>sadlyfell/bullbot
import logging
from pajbot.managers.adminlog import AdminLogManager
from pajbot.models.command import Command
from pajbot.models.command import CommandExample
from pajbot.modules import BaseModule
from pajbot.modules import ModuleType
from pajbot.modules.basic import BasicCommandsModule
log = logging.getLogger(__name__)
class PermabanModule(BaseModule):
ID = __name__.split(".")[-1]
NAME = "!permaban/!unpermaban"
DESCRIPTION = "Permaban a user. (re-bans him if unbanned by mod)"
CATEGORY = "Feature"
ENABLED_DEFAULT = True
MODULE_TYPE = ModuleType.TYPE_ALWAYS_ENABLED
PARENT_MODULE = BasicCommandsModule
@staticmethod
def permaban_command(**options):
message = options["message"]
bot = options["bot"]
source = options["source"]
if message:
username = message.split(" ")[0].strip().lower()
with bot.users.get_user_context(username) as user:
if user.banned:
bot.whisper(source.username, "User is already permabanned.")
return False
user.banned = True
message = message.lower()
log_msg = "{} has been permabanned".format(user.username_raw)
bot.whisper(source.username, log_msg)
AdminLogManager.add_entry("Permaban added", source, log_msg)
@staticmethod
def unpermaban_command(**options):
message = options["message"]
bot = options["bot"]
source = options["source"]
if message:
username = message.split(" ")[0].strip().lower()
with bot.users.find_context(username) as user:
if not user:
bot.whisper(source.username, "No user with that name found.")
return False
if user.banned is False:
bot.whisper(source.username, "User is not permabanned.")
return False
user.banned = False
message = message.lower()
log_msg = "{} is no longer permabanned".format(user.username_raw)
bot.whisper(source.username, log_msg)
AdminLogManager.add_entry("Permaban remove", source, log_msg)
def load_commands(self, **options):
self.commands["permaban"] = Command.raw_command(
self.permaban_command,
level=1000,
description="Permanently ban a user. Every time the user types in chat, he will be permanently banned again",
examples=[
CommandExample(
None,
"Default usage",
chat="user:!permaban Karl_Kons\n" "bot>user:Karl_Kons has now been permabanned",
description="Permanently ban Karl_Kons from the chat",
).parse()
],
)
self.commands["unpermaban"] = Command.raw_command(
self.unpermaban_command,
level=1000,
description="Remove a permanent ban from a user",
examples=[
CommandExample(
None,
"Default usage",
chat="user:!unpermaban Karl_Kons\n" "bot>user:Karl_Kons is no longer permabanned",
description="Remove permanent ban from Karl_Kons",
).parse()
],
)
| 2.203125
| 2
|
text/jython/delete/text_delete.py
|
ekzemplaro/data_base_language
| 3
|
12784090
|
#! /opt/jython/bin/jython
# -*- coding: utf-8 -*-
#
# delete/text_delete.py
#
# Oct/12/2016
import sys
import string
#
# ---------------------------------------------------------------
sys.path.append ('/var/www/data_base/common/python_common')
sys.path.append ('/var/www/data_base/common/jython_common')
from jython_text_manipulate import text_read_proc
from jython_text_manipulate import text_write_proc
#
from text_manipulate import dict_delete_proc
# ---------------------------------------------------------------
#
sys.stderr.write ("*** 開始 ***\n")
#
file_in = sys.argv[1]
id_in = sys.argv[2]
print ("%s" % id_in)
dict_aa = text_read_proc (file_in)
dict_bb = dict_delete_proc (dict_aa,id_in)
text_write_proc (file_in,dict_bb)
#
sys.stderr.write ("*** 終了 ***\n")
# ---------------------------------------------------------------
| 2.421875
| 2
|
dekit/run_once.py
|
algobot76/dekit
| 0
|
12784091
|
import functools
def run_once(func):
""" The decorated function will only run once. Other calls to it will
return None. The original implementation can be found on StackOverflow(https://stackoverflow.com/questions/4103773/efficient-way-of-having-a-function-only-execute-once-in-a-loop)
:param func: the function to be limited to run once only
:return: the decorated function
""" # noqa
@functools.wraps(func)
def func_wrapper(*args, **kwargs):
if not func_wrapper.has_run:
func_wrapper.has_run = True
return func(*args, **kwargs)
func_wrapper.has_run = False
return func_wrapper
| 3.796875
| 4
|
scripts/lib/FrequencyLib.py
|
ixent/qosa-snee
| 1
|
12784092
|
<gh_stars>1-10
#Util library for frequency distributions held as dictionaries of key:count
from math import sqrt
def sortedReport(frequencies):
keys = frequencies.keys()
keys.sort()
for key in keys:
report(str(key)+" :" + str(frequencies[key]))
def showSumary(frequencies):
sortedReport(frequencies)
if len(frequencies) > 0:
keys = frequencies.keys()
min = keys[0]
max = keys[0]
sum = 0
square = 0
sumSquare = 0
count = 0
for (key, value) in frequencies.iteritems():
if (key < min):
min = key
if (key > max):
max = key
count = count + value
sum = sum + (key * value)
sumSquare = sumSquare + (key * key * value)
#report("Count: " + str(count))
#report("Min: " + str(min))
#report("Max: " + str(max))
average = (sum / count)
#report("Average: " + str(average))
#report("Standard Deviation: "+ str(sqrt((sumSquare/count)-(average*average))))
return (str(count)+","+str(min)+","+str(max)+","+str(sum)+","+str(average)+","+str(sqrt((sumSquare/count)-(average*average))))
else:
#report ("count = 0")
return ("0,n/a,n/a,n/a,n/a")
def merge(frequency1, frequency2):
results = {}
results.update(frequency1)
for (key, value) in frequency2.iteritems():
results[key] = frequency1.get(key,0) + value
return results
def add(frequency1, frequency2):
print frequency1
print frequency2
for (key, value) in frequency2.iteritems():
frequency1[key] = frequency1.get(key,0) + value
print frequency1
print frequency2
return frequency1
logger = None
#Registers a logger for this library
def registerLogger(l):
global logger
logger = l
#Ouput info message to screen and logger if applicable
def report(message):
if (logger != None):
logger.info(message)
print message
#Ouput warning message to screen and logger if applicable
def reportWarning(message):
if (logger != None):
logger.warning(message)
print message
#Ouput error message to screen and logger if applicable
def reportError(message):
if (logger != None):
logger.error(message)
print message
def main():
test = {12: 4139, 54: 4127, 34: 4098}
showSumary(test)
if __name__ == "__main__":
main()
| 3.296875
| 3
|
scripts/hackathon/create_all_sts.py
|
mikiec84/delphi
| 25
|
12784093
|
<reponame>mikiec84/delphi
""" Get all preassembled statements, prune away unneeded information, pickle
them. """
import sys
import pickle
from delphi.utils.indra import get_statements_from_json_file
if __name__ == "__main__":
all_sts = get_statements_from_json_file(sys.argv[1])
with open(sys.argv[2], "wb") as f:
pickle.dump(all_sts, f)
| 2.203125
| 2
|
scheduled_bots/ontology/obographs_GO.py
|
turoger/scheduled-bots
| 6
|
12784094
|
import argparse
import os
from scheduled_bots.ontology.obographs import Graph, Node
from wikidataintegrator import wdi_login, wdi_core, wdi_helpers
from scheduled_bots import PROPS
def ec_formatter(ec_number):
splits = ec_number.split('.')
if len(splits) < 4:
for x in range(4 - len(splits)):
splits.append('-')
return '.'.join(splits)
class GONode(Node):
def _pre_create(self):
super(GONode, self)._pre_create()
self.xrefs = list(self.xrefs)
for n, xref in enumerate(self.xrefs):
if xref.startswith("EC:"):
prefix, ec_number = xref.split(":")
self.xrefs[n] = ":".join([prefix, ec_formatter(ec_number)])
self.xrefs = set(self.xrefs)
class GOGraph(Graph):
NAME = "Gene Ontology"
GRAPH_URI = 'http://purl.obolibrary.org/obo/go.owl'
QID = "Q135085"
DEFAULT_DESCRIPTION = "gene ontology term"
APPEND_PROPS = {PROPS['subclass of'], PROPS['instance of'],
PROPS['has cause'], PROPS['location'], PROPS['part of'],
PROPS['has part'], PROPS['regulates (molecular biology)'],
PROPS['Gene Ontology ID']}
FAST_RUN = False
PRED_PID_MAP = {
'is_a': PROPS['subclass of'],
'http://purl.obolibrary.org/obo/BFO_0000050': PROPS['part of'],
'http://purl.obolibrary.org/obo/BFO_0000051': PROPS['has part'],
'http://purl.obolibrary.org/obo/RO_0002211': PROPS['regulates (molecular biology)'], # regulates
'http://purl.obolibrary.org/obo/RO_0002212': None, # negatively regulates
'http://purl.obolibrary.org/obo/RO_0002213': None, # positively regulates
}
NODE_CLASS = GONode
regulates = {
'http://purl.obolibrary.org/obo/RO_0002212': 'http://purl.obolibrary.org/obo/GO_0048519',
'http://purl.obolibrary.org/obo/RO_0002213': 'http://purl.obolibrary.org/obo/GO_0048518'
}
def make_statement_from_edge(self, edge):
# custom statement creator for regulates
h = self.helper
if edge['pred'] in {'http://purl.obolibrary.org/obo/RO_0002212', 'http://purl.obolibrary.org/obo/RO_0002213'}:
subj_node = self.uri_node_map[edge['sub']]
obj_qid = self.get_object_qid(edge['obj'])
# print(obj_qid, edge['pred'])
qual_qid = self.uri_node_map[self.regulates[edge['pred']]].qid
pred_pid = self.PRED_PID_MAP['http://purl.obolibrary.org/obo/RO_0002211']
if not (obj_qid and qual_qid and pred_pid):
m = wdi_helpers.format_msg(edge['sub'], None, None, "failed on edge: {}".format(edge))
print(m)
wdi_core.WDItemEngine.log("WARNING", m)
return None
qualifier = wdi_core.WDItemID(qual_qid, h.get_pid(PROPS['subject has role']), is_qualifier=True)
return wdi_core.WDItemID(obj_qid, pred_pid, qualifiers=[qualifier],
references=[subj_node.create_ref_statement()])
else:
return super(GOGraph, self).make_statement_from_edge(edge)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='run wikidata disease ontology bot')
parser.add_argument("json_path", help="Path to json file")
parser.add_argument("--local", help="preconfigured local wikibase port 7171 and 7272", action='store_true')
args = parser.parse_args()
if args.local:
mediawiki_api_url = "http://localhost:7171/w/api.php"
sparql_endpoint_url = "http://localhost:7272/proxy/wdqs/bigdata/namespace/wdq/sparql"
login = wdi_login.WDLogin("testbot", "password", mediawiki_api_url=mediawiki_api_url)
else:
try:
from scheduled_bots.local import WDUSER, WDPASS
except ImportError:
if "WDUSER" in os.environ and "WDPASS" in os.environ:
WDUSER = os.environ['WDUSER']
WDPASS = os.environ['WDPASS']
else:
raise ValueError("WDUSER and WDPASS must be specified in local.py or as environment variables")
mediawiki_api_url = 'https://www.wikidata.org/w/api.php'
sparql_endpoint_url = 'https://query.wikidata.org/sparql'
login = wdi_login.WDLogin(WDUSER, WDPASS)
g = GOGraph(args.json_path, mediawiki_api_url=mediawiki_api_url, sparql_endpoint_url=sparql_endpoint_url)
g.run(login)
| 2.3125
| 2
|
blog/views.py
|
rahulbiswas24680/Blogapp
| 0
|
12784095
|
from django.shortcuts import render, get_object_or_404, redirect
from .models import (
Category,
Post,
Profile,
Comment,
PostCreationForm,
PostEditForm,
UserLoginForm,
UserRegistrationForm,
UserUpdateForm,
ProfileUpdateForm,
CommentForm
)
from django.views.generic.edit import UpdateView
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.utils.text import slugify
from django.contrib.auth import authenticate, login, logout
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect, JsonResponse, Http404
from django.urls import reverse
from django.contrib import messages
from django.db.models import Q
from django.core.paginator import Paginator
from django.template.loader import render_to_string
from django.views.generic.list import ListView
def home(requests):
post_list = Post.published.all().order_by('-id')
cat_section = Category.objects.all()
# regional = Post.published.filter(category="regional").order_by('-id')
# scintific = Post.published.filter(category="scintific").order_by('-id')
# physics = Post.published.filter(category="physics").order_by('-id')
# chemistry = Post.published.filter(category="chemistry").order_by('-id')
# mathematics = Post.published.filter(category="mathematics").order_by('-id')
# biology = Post.published.filter(category="biology").order_by('-id')
# sports = Post.published.filter(category="sports").order_by('-id')
# ai = Post.published.filter(category="ai").order_by('-id')
# offtopic = Post.published.filter(category="offtopic").order_by('-id')
# programming = Post.published.filter(category="programming").order_by('-id')
# datascience = Post.published.filter(category="datascience").order_by('-id')
# entrance_exam = Post.published.filter(category="entrance_exam").order_by('-id')
# travel = Post.published.filter(category="travel").order_by('-id')
# celebrity_talk = Post.published.filter(category="celebrity_talk").order_by('-id')
# world = Post.published.filter(category="world").order_by('-id')
# astronomy = Post.published.filter(category="astronomy").order_by('-id')
# engineering = Post.published.filter(category="engineering").order_by('-id')
# technology = Post.published.filter(category="technology").order_by('-id')
query = requests.GET.get("q")
if query:
post_list = Post.published.filter(
Q(title__icontains=query)
| Q(author__username=query)
| Q(body__icontains=query)
)
paginator = Paginator(post_list, 4)
page = requests.GET.get("page")
try:
page_obj = paginator.get_page(page)
except PageNotAnInteger:
page_obj = paginator.get_page(1)
except EmptyPage:
page_obj = paginator.get_page(paginator.num_page)
# function called for pagination
# regional = paginator(regional),
# scintific = paginator(scintific),
# physics = paginator(physics),
# chemistry = paginator(chemistry),
# mathematics = paginator(mathematics),
# biology = paginator(post_list),
# sports = paginator(sports),
# ai = paginator(ai),
# offtopic = paginator(offtopic),
# programming = paginator(programming),
# datascience = paginator(datascience),
# entrance_exam = paginator(entrance_exam),
# travel = paginator(travel),
# celebrity_talk = paginator(celebrity_talk),
# world = paginator(world),
# astronomy = paginator(astronomy),
# engineering = paginator(engineering),
# technology = paginator(technology),
context = {
"page_obj": page_obj,
"cat_section": cat_section
# "regional": regional,
# "scintific": scintific,
# "physics" : physics,
# "chemistry" : chemistry,
# "mathematics" : mathematics,
# "biology" : biology,
# "sports" : sports,
# "ai" : ai,
# "offtopic" : offtopic,
# "programming" : programming,
# "datascience" : datascience,
# "entrance_exam" : entrance_exam,
# "travel" : travel,
# "celebrity_talk" : celebrity_talk,
# "world" : world,
# "astronomy" : astronomy,
# "engineering" : engineering,
# "technology" : technology,
}
return render(requests, "blog/home.html", context)
def about(requests):
return render(requests, "blog/about.html")
def post_detail(requests, pk, slug):
post = get_object_or_404(Post, pk=pk, slug=slug)
comments = Comment.objects.filter(post=post, reply=None).order_by('-id')
is_liked = False
if post.likes.filter(id=requests.user.id).exists():
is_liked = True
if requests.method == "POST":
comment_form = CommentForm(requests.POST or None)
if comment_form.is_valid():
content = requests.POST.get('content')
reply_id = requests.POST.get('comment_id')
comment_qs = None
if reply_id:
comment_qs = Comment.objects.get(id=reply_id)
comment = Comment.objects.create(post=post, user=requests.user, content=content, reply=comment_qs)
comment.save()
messages.success(requests, f"Your comment has been posted.")
# return HttpResponseRedirect(post.get_absolute_url())
else:
comment_form = CommentForm()
context = {
"post": post,
"is_liked": is_liked,
"total_likes": post.total_likes(),
"comments": comments,
"comment_form": comment_form
}
if requests.is_ajax():
html = render_to_string("blog/comments.html", context, requests)
return JsonResponse({"form": html})
return render(requests, "blog/post_detail.html", context)
def like_post(requests):
post = get_object_or_404(Post, id=requests.POST.get("id"))
is_liked = False
if post.likes.filter(id=requests.user.id).exists():
post.likes.remove(requests.user)
is_liked = False
else:
post.likes.add(requests.user)
is_liked = True
context = {
"post": post,
"is_liked": is_liked,
"total_likes": post.total_likes(),
}
if requests.is_ajax():
html = render_to_string("blog/like_section.html", context, requests)
return JsonResponse({"form": html})
@login_required
def create_post(requests):
if requests.method == "POST":
form = PostCreationForm(requests.POST or None)
if form.is_valid():
post = form.save(commit=False)
post.author = requests.user
post.slug = slugify(post.title)
post.save()
messages.success(requests, f"Your post has been created!")
return HttpResponseRedirect(
reverse("post-detail", args=[post.pk, post.slug])
)
else:
form = PostCreationForm()
context = {
"form": form,
}
return render(requests, "blog/create_post.html", context)
@login_required
def edit_post(requests, pk):
post = get_object_or_404(Post, id=pk)
if post.author != requests.user:
raise Http404()
if requests.method == "POST":
form = PostEditForm(requests.POST or None, instance=post)
if form.is_valid():
form.save()
messages.success(requests, f"Your post has been updated!")
return HttpResponseRedirect(post.get_absolute_url())
else:
form = PostEditForm(instance=post)
context = {"form": form, "post": post}
return render(requests, "blog/edit_post.html", context)
@login_required
def delete_post(requests, pk):
post = get_object_or_404(Post, id=pk)
# post = Post.objects.get(id=id)
if post.author != requests.user:
raise Http404()
post.delete()
print(post)
return redirect('blog-home')
def user_registration(requests):
if requests.method == "POST":
form = UserRegistrationForm(requests.POST)
if form.is_valid():
username = form.cleaned_data.get("username")
new_user = form.save()
Profile.objects.create(user=new_user)
messages.success(
requests,
f"Your account has been created for {username}! Please login now.",
)
return redirect("user-login")
else:
form = UserRegistrationForm()
context = {"form": form}
return render(requests, "blog/user_register.html", context)
def user_login(requests):
if requests.method == "POST":
form = UserLoginForm(requests.POST)
if form.is_valid():
username = requests.POST["username"]
password = requests.POST["password"]
user = authenticate(requests, username=username, password=password)
if user is not None:
login(requests, user)
messages.success(requests, f"You are now loggged in")
return HttpResponseRedirect(reverse("blog-home"))
else:
form = UserLoginForm()
context = {"form": form}
return render(requests, "blog/user_login.html", context)
def user_logout(requests):
logout(requests)
messages.success(requests, f"You have been loggged out")
return HttpResponseRedirect(reverse("blog-home"))
@login_required
def profile(requests):
if requests.method == "POST":
u_form = UserUpdateForm(requests.POST, instance=requests.user)
p_form = ProfileUpdateForm(
requests.POST, instance=requests.user.profile, files=requests.FILES
)
if u_form.is_valid() and p_form.is_valid():
u_form.save()
p_form.save()
messages.success(requests, f"Your profile has been updated")
return redirect("profile")
else:
u_form = UserUpdateForm(instance=requests.user)
p_form = ProfileUpdateForm(instance=requests.user.profile)
context = {"u_form": u_form, "p_form": p_form}
return render(requests, "user/profile.html", context)
class CatListView(ListView):
template_name = 'blog/category.html'
context_object_name = 'catlist'
# paginate_by = 4
def get_queryset(self):
content = {
'cat': self.kwargs['category'],
'posts': Post.published.filter(category__name=self.kwargs['category']).filter(status='published'),
'all': Category.objects.all()
}
return content
| 1.945313
| 2
|
pixelpal/callbacks/model_checkpoint.py
|
mparusinski/pixelpal
| 1
|
12784096
|
from tensorflow.keras.callbacks import ModelCheckpoint
import os
def produce_callback():
checkpoint_filepath = "./run/weights-improvement-{epoch:02d}-{val_psnr_metric:.2f}-{val_ssim_metric:.2f}.hdf5"
os.makedirs(os.path.dirname(checkpoint_filepath), exist_ok=True)
model_checkpoint = ModelCheckpoint(checkpoint_filepath, monitor='val_loss', verbose=1, save_best_only=True, mode='min')
return model_checkpoint
| 2.28125
| 2
|
coding-challenges/firecode/find-ranges.py
|
acfromspace/infinitygauntlet
| 3
|
12784097
|
def find_range(input_list, input_number):
search = []
number_range = []
for index in range(1, len(input_list)):
if input_list[index] == input_number:
search.append(index)
if input_list[index] > input_number:
break
number_range.append(search[0])
number_range.append(search[-1])
return number_range
# return Range(search[0], search[-1])
input_list = [1, 2, 5, 5, 8, 8, 10]
input_number = 8
print("find_range():", find_range(input_list, input_number))
input_number = 2
print("find_range():", find_range(input_list, input_number))
"""
class Range(object):
def __init__(self):
self.lower_bound = -1
self.upper_bound = -1
def __init__(self, lower_bound, upper_bound):
self.lower_bound = lower_bound
self.upper_bound = upper_bound
def __str__(self):
return "["+str(self.lower_bound)+","+str(self.upper_bound)+"]
def find_range(input_list,input_number):
first = 0
lower_bound = -1
upper_bound = -1
last = len(input_list) - 1
while first < last:
mid = (first+last)/2
if input_list[mid] < input_number:
first = mid + 1
else:
last = mid
if input_list[first] != input_number:
return Range()
else:
lower_bound = first
first = 0
last = len(input_list) - 1
while first < last:
mid = (first + last)/2
if (input_list[mid] < input_number + 1):
first = mid + 1
else:
last = mid
if(input_list[first] == input_number):
upper_bound = first
else:
upper_bound = first-1
return Range(lower_bound,upper_bound)
"""
"""
def bisect(input_list, input_number, f):
left, right = 0, len(input_list)
while left < right:
mid = left + (right - left)/2
if f(input_list[mid], input_number): left = mid + 1
else: right = mid
return left
bisect_left = lambda input_list,input_number: bisect(input_list, input_number, lambda x,y: x<y)
bisect_right = lambda input_list,input_number: bisect(input_list, input_number, lambda x,y: x<=y)
def find_range(input_list, input_number):
start = bisect_left(input_list, input_number)
end = bisect_right(input_list, input_number) - 1
return Range(start, end)
"""
| 3.703125
| 4
|
scripts/contractInteraction/config.py
|
omerzam/Sovryn-smart-contracts
| 108
|
12784098
|
from brownie import *
from brownie.network.contract import InterfaceContainer
import json
def loadConfig():
global contracts, acct
thisNetwork = network.show_active()
if thisNetwork == "development":
acct = accounts[0]
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "testnet":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "testnet-ws":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "rsk-testnet":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/testnet_contracts.json')
elif thisNetwork == "rsk-mainnet":
acct = accounts.load("rskdeployer")
configFile = open('./scripts/contractInteraction/mainnet_contracts.json')
else:
raise Exception("Network not supported.")
contracts = json.load(configFile)
| 2.390625
| 2
|
social_core/backends/fedora.py
|
shnaqawi/social-core
| 745
|
12784099
|
<filename>social_core/backends/fedora.py<gh_stars>100-1000
"""
Fedora OpenId backend, docs at:
https://python-social-auth.readthedocs.io/en/latest/backends/fedora.html
"""
from .open_id import OpenIdAuth
class FedoraOpenId(OpenIdAuth):
name = 'fedora'
URL = 'https://id.fedoraproject.org'
USERNAME_KEY = 'nickname'
| 1.390625
| 1
|
SnakesAndLadders.py
|
S0obi/SnakesAndLadders
| 0
|
12784100
|
from Joueur import Joueur
from Plateau import Plateau
from BonusType import BonusType
from random import randrange
nbBonus = int(input("Nombre de bonus : "))
nbJoueur = int(input("Saisissez le nombre de joueur : "))
listJoueur = []
for i in range(0,nbJoueur):
listJoueur.append(Joueur(input("Nom du joueur " + str(i+1) + " : ")))
print("\n")
partieGagne = False
gagnant = None
Plateau = Plateau(nbBonus)
replay = False
tour = 0
while not partieGagne:
j = listJoueur[tour % nbJoueur]
print("*** Tour de " + j.name + " ***")
print("Lancer les dés ... : ", end="")
input()
lancerDe = randrange(1,6)
print(str(lancerDe))
tmpPos = j.pos + lancerDe
if tmpPos > 100:
print("Il faut faire " + str(100 - j.pos) + " pour gagner !\n")
tour += 1
continue
case = Plateau.content[tmpPos]
if case.hasBonus() and tmpPos != 100:
print("!!! BONUS : " + case.bonus.type.name + " !!!\n")
if case.bonus.type == BonusType.REJOUER:
replay = True
elif case.bonus.type == BonusType.DOUBLE:
tmpPos += lancerDe
elif case.bonus.type == BonusType.AVANCER_5_CASES:
tmpPos += 5
if tmpPos == 100:
partieGagne = True
gagnant = j
continue
elif tmpPos <= 100:
j.pos = tmpPos
if replay:
replay = False
continue
case = Plateau.content[tmpPos] # update Case for pouvoir
if case.hasPouvoir():
print("Attention ! C'est une case " + case.pouvoir.name + " direction la case : " + str(case.targetCase))
j.pos = case.targetCase
print ("Case : " + str(j.pos) + "\n")
tour += 1
print("Bravo ! Le gagnant est : " + gagnant.name)
| 3.109375
| 3
|
classifier/example.py
|
Alex92rus/ErrorDetectionProject
| 1
|
12784101
|
<reponame>Alex92rus/ErrorDetectionProject
import tensorflow as tf
'''
input > weight > hidden layer 1 (activation function) > weights > hidden l 2
(activation function) > weights > output layer
feed forward
compare output to intended output > cost or loss function (cross entropy)
optimisation function (optimizer) > minimize cost (AdamOptimizer)
back and manipulate the weights - backpropagation
feed forward + backprop = epoch
'''
from tensorflow.example.tutorials.mnist import input_data
mnist = input_data.read_data_sets("/tmp/data", one_hot=True)
# 10 classes, 0 - 1
'''
one hot on element is on or hot
0 = [1, 0, 0, 0, 0, 0, 0, 0, 0, 0]
1 = [0, 1, 0, 0, 0, 0, 0, 0, 0, 0]
2 = [0, 0, 1, 0, 0, 0, 0, 0, 0, 0]
3 = [0, 0, 0, 1, 0, 0, 0, 0, 0, 0]
'''
n_nodes_hl1 = 500
n_nodes_hl2 = 500
n_nodes_hl3 = 500
n_classes = 10
batch_size = 100
# height x width
x = tf.placeholder('float', [None, 784])
y = tf.placeholder('float')
def neural_network_model(data):
#starting with something random
hidden_1_layer = {'weights':tf.Variable(tf.random_normal([784, n_nodes_hl1])),
'biases':tf.Variable(tf.random_normal(n_nodes_hl1))} # bias adds input_data * weights + bias
hidden_2_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'biases':tf.Variable(tf.random_normal(n_nodes_hl2))}
hidden_3_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'biases':tf.Variable(tf.random_normal(n_nodes_hl3))}
output_layer = {'weights':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'biases':tf.Variable(tf.random_normal(n_classes))}
# model (input_data * weights) + biases
l1 = tf.add(tf.matmul(data, hidden_1_layer['weights']), hidden_1_layer['biases'])
l1 = tf.nn.relu(l1) # treshold function
l2 = tf.add(tf.matmul(data, hidden_2_layer['weights']), hidden_2_layer['biases'])
l2 = tf.nn.relu(l2) # treshold function
l3 = tf.add(tf.matmul(data, hidden_3_layer['weights']), hidden_3_layer['biases'])
l3 = tf.nn.relu(l3) # treshold function
output = tf.matmul(output_layer, output_layer['weights']) + output_layer['biases']
return output
| 3.5625
| 4
|
day19.py
|
drewbrew/advent-of-code-2020
| 4
|
12784102
|
<gh_stars>1-10
from typing import Dict, List, Sequence, Tuple, Union
import re
TEST_RULES, TEST_MESSAGES = [
i.splitlines()
for i in """0: 4 1 5
1: 2 3 | 3 2
2: 4 4 | 5 5
3: 4 5 | 5 4
4: "a"
5: "b"
ababbb
bababa
abbbab
aaabbb
aaaabbb""".split(
"\n\n"
)
]
P2_TEST_RULES, P2_TEST_MESSAGES = [
i.splitlines()
for i in """42: 9 14 | 10 1
9: 14 27 | 1 26
10: 23 14 | 28 1
1: "a"
11: 42 31
5: 1 14 | 15 1
19: 14 1 | 14 14
12: 24 14 | 19 1
16: 15 1 | 14 14
31: 14 17 | 1 13
6: 14 14 | 1 14
2: 1 24 | 14 4
0: 8 11
13: 14 3 | 1 12
15: 1 | 14
17: 14 2 | 1 7
23: 25 1 | 22 14
28: 16 1
4: 1 1
20: 14 14 | 1 15
3: 5 14 | 16 1
27: 1 6 | 14 18
14: "b"
21: 14 1 | 1 14
25: 1 1 | 1 14
22: 14 14
8: 42
26: 14 22 | 1 20
18: 15 15
7: 14 5 | 1 21
24: 14 1
abbbbbabbbaaaababbaabbbbabababbbabbbbbbabaaaa
bbabbbbaabaabba
babbbbaabbbbbabbbbbbaabaaabaaa
aaabbbbbbaaaabaababaabababbabaaabbababababaaa
bbbbbbbaaaabbbbaaabbabaaa
bbbababbbbaaaaaaaabbababaaababaabab
ababaaaaaabaaab
ababaaaaabbbaba
baabbaaaabbaaaababbaababb
abbbbabbbbaaaababbbbbbaaaababb
aaaaabbaabaaaaababaa
aaaabbaaaabbaaa
aaaabbaabbaaaaaaabbbabbbaaabbaabaaa
babaaabbbaaabaababbaabababaaab
aabbbbbaabbbaaaaaabbbbbababaaaaabbaaabba""".split(
"\n\n"
)
]
with open("day19.txt") as infile:
REAL_RULES, REAL_MESSAGES = [i.splitlines() for i in infile.read().split("\n\n")]
def rule_to_format_string(expr: str) -> str:
rules = expr.split()
return "".join("{rules[%s]}" % rule for rule in rules)
def parse_rules(rules: List[str]) -> Dict[str, str]:
"""Convert rules into a list of regex strings"""
result = {}
splits = [rule.split(":") for rule in rules]
base_rules = {}
for rule_number, expr in splits:
base_rules[int(rule_number)] = expr.strip()
for rule_number, expr in base_rules.items():
if '"' in expr:
assert len(expr) == 3, expr
result[int(rule_number)] = expr[1]
# ok, we've gotten our base rules
# all our messages are comprised of just a and b
assert len(result) == 2
while len(result) != len(base_rules):
for rule_number, expr in base_rules.items():
if rule_number in result:
continue
if "|" in expr:
format_str = (
"("
+ "|".join(
f"{rule_to_format_string(sub_expr)}"
for sub_expr in expr.split("|")
)
+ ")"
)
else:
format_str = rule_to_format_string(expr)
try:
result[rule_number] = format_str.format(rules=result)
except KeyError:
pass
return result
def part_one(rules: List[str], messages: List[str]) -> int:
parsed_rules = parse_rules(rules)
regex = re.compile("^" + parsed_rules[0] + "$")
score = 0
for message in messages:
if regex.match(message):
score += 1
return score
def part_two(rules: List[str], messages: List[str]) -> int:
"""work through the rules and sub in as we go"""
table = "\n".join(rules)
rule_table = dict(re.findall("(\d+):(.+)", table))
# Special rules for part 2
rule_table["8"] = "42+"
# make a wild guess and hope that we won't have more than 15 of a recursive rule
rule_table["11"] = "|".join(f"{'42 ' * i} {'31 ' * i}" for i in range(1, 15))
while len(rule_table) > 1:
# Find a "completed" rule to substitute
completed_rule_number, completed_rule = next(
(rule_num, rule_expr)
for rule_num, rule_expr in rule_table.items()
if not re.search("\d", rule_expr)
)
rule_table = {
rule_num: re.sub(
rf"\b{completed_rule_number}\b", f"({completed_rule})", rule_expr
)
for rule_num, rule_expr in rule_table.items()
if rule_num != completed_rule_number
}
# we're done
# is this rule zero?
assert list(rule_table) == ["0"]
# trim quotes and whitespace
regex_str = re.sub('[ "]', "", rule_table["0"])
# add start/finish markers
regex = re.compile(f"^{regex_str}$")
return sum(bool(regex.match(line)) for line in messages)
def main():
p1_test = part_one(TEST_RULES, TEST_MESSAGES)
assert p1_test == 2, p1_test
p1_result = part_one(REAL_RULES, REAL_MESSAGES)
print(p1_result)
print(part_two(REAL_RULES, REAL_MESSAGES))
if __name__ == "__main__":
main()
| 2.25
| 2
|
code/network.py
|
alzaia/four_shapes_classifier
| 0
|
12784103
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Definition of the CNN architectures.
Baseline: very simple net with few params and no regularization.
Regularized: simple net combined with dropout and data augmentation.
"""
import numpy as np
import random
import tensorflow as tf
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
from tensorflow.keras.preprocessing import *
def define_baseline(num_classes,input_shape=(200, 200, 1)):
inputs = Input(input_shape)
inputs_preproc = experimental.preprocessing.Resizing(100, 100, interpolation='bilinear')(inputs)
inputs_preproc = experimental.preprocessing.Rescaling(1./255, input_shape=input_shape)(inputs_preproc)
conv1 = Conv2D(8, 3, padding='same', activation='relu')(inputs_preproc)
conv1 = MaxPool2D(pool_size=(2,2))(conv1)
conv2 = Conv2D(16, 3, padding='same', activation='relu')(conv1)
conv2 = MaxPool2D(pool_size=(2,2))(conv2)
flatten1 = Flatten()(conv2)
outputs = Dense(16, activation='relu')(flatten1)
outputs = Dense(num_classes, activation='softmax')(outputs)
model = Model(inputs = inputs, outputs = outputs)
return model
data_augmentation = Sequential(
[
experimental.preprocessing.RandomFlip("horizontal", input_shape=(200,200,1)),
experimental.preprocessing.RandomZoom(0.4),
experimental.preprocessing.RandomContrast(0.9)
]
)
def define_regularized(num_classes,input_shape=(200, 200, 1)):
inputs = Input(input_shape)
inputs_preproc = data_augmentation(inputs)
inputs_preproc = experimental.preprocessing.Resizing(100, 100, interpolation='bilinear')(inputs_preproc)
inputs_preproc = experimental.preprocessing.Rescaling(1./255, input_shape=input_shape)(inputs_preproc)
conv1 = Conv2D(16, 3, padding='same', activation='relu')(inputs_preproc)
conv1 = MaxPool2D(pool_size=(2,2))(conv1)
conv1 = Dropout(0.25)(conv1)
conv2 = Conv2D(32, 3, padding='same', activation='relu')(conv1)
conv2 = MaxPool2D(pool_size=(2,2))(conv2)
conv2 = Dropout(0.25)(conv2)
flatten1 = Flatten()(conv2)
outputs = Dense(16, activation='relu')(flatten1)
outputs = Dense(num_classes, activation='softmax')(outputs)
model = Model(inputs = inputs, outputs = outputs)
return model
| 3.28125
| 3
|
orthoMcl.py
|
vetscience/Tools
| 0
|
12784104
|
#!/usr/bin/env python
'''
Oct 10, 2017: <NAME>, The University of Melbourne
Simplifies running orthoMCL with a wrapper and pre-checks the known
formatting issues with FASTA headers to avoid failure in later stages of the run.
'''
import os, sys, optparse, getpass
from multiprocessing import Process, Pipe
from Utils import Base, Fasta
#################################################
def options():
parser = optparse.OptionParser('example: %prog -i residues1.fa,residues2.fa,...,residuesN.fa -l LB1,LB2,...,LBN -p 1,1,...,1 -e 1e-5 -s 0.5')
parser.add_option('-a', '--ip', dest='ip', help='MySQL server IP address', metavar='IPADDRESS', default='127.0.0.1')
parser.add_option('-d', '--dir', dest='wd', help='The directory, in which the FASTA files for the analysis are copied to', metavar='DIR', default='TmpOrthoMcl')
parser.add_option('-i', '--filenames', dest='filenames', help='Proteome, CDS or transcript FASTA files of species (separated by commas)', metavar='FILES', default='')
parser.add_option('-l', '--labels', dest='labs', help="Respective labels for proteomes following the order of FASTA files (separated by commas)", metavar='LABELS', default='')
parser.add_option('-p', '--positions', dest='positions', help="Positions of a unique identifier in FASTA header separated by |. Default position is 1 (separated by commas).", metavar='POSITIONS', default='')
parser.add_option('-T', '--threads', dest='pCnt', help='Number of parallel threads (default is half of the capacity but >= 1)', metavar='THREADS', default='0')
parser.add_option('-e', '--evalue', dest='evalue', help="E-value for BLAST run. Default is 1e-5. Use always E-value <= 1e-5 and 1e-X format only!", metavar='EVALUE', default='1e-5')
parser.add_option('-s', '--similarity', dest='sim', help="Required similarity (0 .. 1) in mcl algorithm. Default if 0.5", metavar='SIM', default='0.5')
parser.add_option('-m', '--minlen', dest='minlen', help="Required minimum lenght of a sequence. Default is 20.", metavar='MINLEN', default='20')
parser.add_option('-b', '--noblast', dest='skipBlast', action='store_true', help="Skip BLAST (used to rerun mcl using different E-value and similarity settings)", default=False)
parser.add_option('-n', '--nucl', dest='nucl', action='store_true', help="The residues in sequences represent nucleotides instead of proteins", default=False)
options, args = parser.parse_args()
if options.filenames == '' or options.labs == '':
parser.print_help()
print '\nE.g.: orthoMcl -i proteome1.fa,proteome2.fa -l Tax,Tvi -p 4,4 -e 1e-5'
print "Results will be collected to 'Results' directory in groups.txt file."
print "Note! Labels must be exactly 3 characters long."
sys.exit(-1)
return options
#################################################
def checkMySqlVersion(wd, mySqlIpAddress):
'''
'''
base = Base()
with open("%s/version.sql" %wd, 'w') as handle:
handle.write('SHOW VARIABLES LIKE "%version%";\n')
mySqlVer = base.shell("mysql -h %s -P3306 --protocol tcp --user=root --password=password < %s/version.sql" %(mySqlIpAddress, wd), myStdout = True)
verStr = ""
found = False
for line in mySqlVer.stdout:
items = line.strip().split()
for i in xrange(len(items)):
if "inno" in items[i]:
try:
verStr = items[i+1].strip()
found = True
except IndexError:
break
if found == True: break
try:
verList = [int(item) for item in verStr.split('.')]
except ValueError:
print >> sys.stderr, "### Fatal error: Could not read mysql version (%s). Exiting..." %verStr
sys.exit(-1)
if len(verList) != 3:
print >> sys.stderr, "### Fatal error: Could not read mysql version (%s). Exiting..." %verStr
sys.exit(-1)
return verList
#################################################
def checkResidue(fastaFile):
'''
'''
retVal = "nucleotides"
try:
limit = 100
fasta = Fasta(fastaFile)
for i in xrange(fasta.cnt()):
if i > limit: break
seq = fasta.seq(i).upper()
for item in seq:
if item not in ['A', 'T', 'C', 'G', 'N']:
retVal = "amino acids"
break
except IOError:
print >> sys.stderr, "### Fatal error: file %s not found. Exiting..." %fastaFile
sys.exit(-1)
return retVal
#################################################
def checkUniqueIds(fastaFile):
'''
'''
fasta = Fasta(fastaFile)
if fasta.cnt() != len(set(fasta.headers)):
print >> sys.stderr, "### Fatal error: FASTA sequence identifiers are not unique in %s. Exiting..." %fastaFile
print >> sys.stderr, "### Probably position for this file is given wrong..."
sys.exit(-1)
#################################################
def checkIdLen(fastaFile):
'''
'''
fasta = Fasta(fastaFile)
for i in xrange(fasta.cnt()):
seqId = fasta.headers[i].split()[0]
if len(seqId) > 56:
print >> sys.stderr, "### Fatal error: FASTA sequence identifier %s is too long in %s. Exiting..." %(seqId, fastaFile)
sys.exit(-1)
#################################################
def createOrthoMclConfigFile(wd, userName, eValue, similarity, mySqlIpAddress):
'''
'''
eValue = eValue.split('e')[1]
similarity = int(float(similarity) * 100.0)
with open("%s/orthomcl.config" %wd, 'w') as handle:
handle.write("# this config assumes a mysql database named 'orthomcl'. adjust according\n")
handle.write("# to your situation.\n")
handle.write("dbVendor=mysql\n")
#handle.write("dbConnectString=dbi:mysql:database=ortho%s;host=%s;port=3306\n" %(userName, os.environ["MYSQLHOST"]))
handle.write("dbConnectString=dbi:mysql:database=ortho%s;host=%s;port=3306\n" %(userName, mySqlIpAddress))
handle.write("dbLogin=ortho%s\n" %userName)
handle.write("dbPassword=<PASSWORD>")
handle.write("similarSequencesTable=SimilarSequences\n")
handle.write("orthologTable=Ortholog\n")
handle.write("inParalogTable=InParalog\n")
handle.write("coOrthologTable=CoOrtholog\n")
handle.write("interTaxonMatchView=InterTaxonMatch\n")
handle.write("percentMatchCutoff=%d\n" %similarity)
handle.write("evalueExponentCutoff=%s\n" %eValue)
handle.write("oracleIndexTblSpc=NONE\n")
#################################################
def createMySqlScripts(wd, userName, ver):
'''
'''
with open("%s/createDb.sql" %wd, 'w') as handle:
if ver[0] > 5 or (ver[0] == 5 and ver[1] > 7) or (ver[0] == 5 and ver[1] == 7 and ver[2] > 5):
handle.write("CREATE USER IF NOT EXISTS 'ortho%s'@'%%' IDENTIFIED BY 'password';\n" %userName)
handle.write("CREATE DATABASE ortho%s;\n" %userName)
#handle.write("GRANT SELECT,INSERT,UPDATE,DELETE,CREATE VIEW,CREATE,INDEX,DROP on ortho%s.* TO 'ortho%s'@'%%';\n" %(userName, userName))
handle.write("GRANT ALL PRIVILEGES ON ortho%s.* TO 'ortho%s'@'%%';\n" %(userName, userName))
handle.close()
handle = open("%s/dropDb.sql" %wd, 'w')
handle.write("drop database if exists ortho%s;\n" %userName)
#################################################
def callShell(base, cmdStr, dummy = None):
'''
'''
base.shell(cmdStr)
#################################################
def main():
'''
'''
opts = options() # files contains exactly two PE files
pCnt = int(opts.pCnt)
if pCnt == 0:
pCnt = int(float(multiprocessing.cpu_count()) / 2.0 + 0.5)
eValue = opts.evalue
similarity = opts.sim
minlen = opts.minlen
files = ["%s/%s" %(opts.wd.strip('"').strip("'").rstrip('/'), myFile) for myFile in opts.filenames.strip().split(',')]
labels = opts.labs.strip().split(',')
if len(labels) != len(set(labels)):
print >> sys.stderr, "### Fatal error: duplicate labels found. Exiting..."
sys.exit(-1)
if len(files) != len(set(files)):
print >> sys.stderr, "### Fatal error: duplicate fasta file names found. Exiting..."
sys.exit(-1)
positions = None
if opts.positions != "":
positions = opts.positions.strip().split(',')
if positions == None:
positions = []
for i in xrange(len(files)):
positions.append("1")
if len(files) != len(labels):
print >> sys.stderr, "### Fatal error: number of files does not match with the number of labels. Exiting..."
sys.exit(-1)
if len(positions) != len(labels):
print >> sys.stderr, "### Fatal error: number of labels does not match with the number of positions of the ids. Exiting..."
sys.exit(-1)
for lab in labels:
if len(lab) != 3:
print >> sys.stderr, "### Fatal error: labels have to be exactly three characters long. Exiting..."
sys.exit(-1)
base = Base()
wd = "Results"
wdFasta = "%s/Fasta" %wd
base.shell("rm -rf Results")
base.createDir(wd)
logHandle = open("%s/log.txt" %wd, 'w')
base.setLogHandle(logHandle)
base.createDir(wdFasta)
#userName = getpass.getuser()
#createOrthoMclConfigFile(wd, userName, eValue, similarity)
#createMySqlScripts(wd, userName)
verList = checkMySqlVersion(wd, opts.ip)
createOrthoMclConfigFile(wd, "root", eValue, similarity, opts.ip)
createMySqlScripts(wd, "root", verList)
requiredMolType = "amino acids"
if opts.nucl == True:
requiredMolType = "nucleotides"
for myFile in files:
molType = checkResidue(myFile)
if requiredMolType != molType:
print >> sys.stderr, "### Fatal error: files have to all be %s. Exiting..." %requiredMolType
print >> sys.stderr, "### File %s failed and was %s." %(myFile, molType)
sys.exit(-1)
base.shell("rm -f %s/*.fasta" %wd)
base.shell("rm -f %s/*.fasta" %wdFasta)
for i in xrange(len(files)):
myLab, myFile, myPos = labels[i], files[i], positions[i]
if myFile == "%s.fasta" %myLab:
print >> sys.stderr, "### Fatal error: orthoMCL produces same filenames that you already have. Please rename your fasta files e.g. to .fa instead of .fasta. Exiting..."
sys.exit(-1)
base.shell("orthomclAdjustFasta %s %s %s" %(myLab, myFile, myPos))
checkUniqueIds("%s.fasta" %myLab)
checkIdLen("%s.fasta" %myLab)
base.shell("mv -f %s.fasta %s" %(myLab, wdFasta))
if opts.skipBlast == False:
base.shell("orthomclFilterFasta %s %s 20" %(wdFasta, minlen))
base.shell("mv -f poorProteins.* %s" %wd)
# Blast all against all
if opts.skipBlast == False:
if opts.nucl == False:
base.shell("makeblastdb -in goodProteins.fasta -dbtype prot")
else:
base.shell("makeblastdb -in goodProteins.fasta -dbtype nucl")
base.shell("cp goodProteins.fasta %s/" %wd)
blastEvalue = eValue
if float(blastEvalue) < 1e-5: blastEvalue = "1e-5"
if opts.skipBlast == False:
if opts.nucl == False:
base.shell("blastp -db goodProteins.fasta -query goodProteins.fasta -outfmt 6 -evalue %s -num_threads %d > %s/goodProteins.blast" %(blastEvalue, pCnt, wd))
else:
base.shell("blastn -db goodProteins.fasta -query goodProteins.fasta -outfmt 6 -evalue %s -num_threads %d > %s/goodProteins.blast" %(blastEvalue, pCnt, wd))
base.shell("""awk '{if ($11<=%s) print $0}' %s/goodProteins.blast | grep -v "^#" > %s/filtered.blast""" %(eValue, wd, wd))
base.shell("mv -f goodProteins.* %s/" %wd)
base.shell("orthomclBlastParser %s/filtered.blast %s > %s/similarSequences.txt" %(wd, wdFasta, wd))
# Prepare database
base.shell("mysql -h %s -P 3306 --protocol tcp --user=root --password=password < %s/dropDb.sql" %(opts.ip, wd))
base.shell("mysql -h %s -P 3306 --protocol tcp --user=root --password=password < %s/createDb.sql" %(opts.ip, wd))
base.shell("orthomclInstallSchema %s/orthomcl.config" %wd)
base.shell("orthomclLoadBlast %s/orthomcl.config %s/similarSequences.txt" %(wd, wd))
# Identify potential orthologs
base.shell("orthomclPairs %s/orthomcl.config %s/orthomclPairs.log cleanup=no" %(wd, wd))
base.shell("rm -rf pairs")
base.shell("rm -rf %s/pairs" %wd)
base.shell("orthomclDumpPairsFiles %s/orthomcl.config" %wd)
base.shell("mv -f pairs %s" %wd)
# Group the orthologs
base.shell("mcl mclInput --abc -I 2.0 -o mclOutput")
base.shell("orthomclMclToGroups OWN_ 1 < mclOutput > %s/groups.txt" %wd)
base.shell("mv -f mclInput %s" %wd)
base.shell("mv -f mclOutput %s" %wd)
logHandle.close()
#################################################
if __name__ == "__main__":
main()
| 2.3125
| 2
|
testing/import_text_test.py
|
kstepanmpmg/mldb
| 1
|
12784105
|
<reponame>kstepanmpmg/mldb
#
# import_text_test.py
# <NAME>, 2016-06-21
# This file is part of MLDB. Copyright 2016 mldb.ai inc. All rights reserved.
#
import tempfile
from mldb import mldb, MldbUnitTest, ResponseException
class ImportTextTest(MldbUnitTest):
@classmethod
def setUpClass(cls):
cls.regular_lines_file = \
tempfile.NamedTemporaryFile(dir='build/x86_64/tmp')
cls.regular_lines_file.write(b"a,b,c\n")
cls.regular_lines_file.write(b"d,e,f\n")
cls.regular_lines_file.flush()
cls.irregular_lines_file = \
tempfile.NamedTemporaryFile(dir='build/x86_64/tmp')
cls.irregular_lines_file.write(b"a,b,c\n")
cls.irregular_lines_file.write(b"d,e\n")
cls.irregular_lines_file.write(b"f,g,h,i\n")
cls.irregular_lines_file.flush()
def test_base(self):
res = mldb.post('/v1/procedures', {
'type' : 'import.text',
'params' : {
'runOnCreation' : True,
'dataFileUrl' : 'file://' + self.regular_lines_file.name,
'outputDataset' : {
'id' : 'base_ds',
'type' : 'tabular'
}
}
}).json()
self.assertEqual(res['status']['firstRun']['status'], {
'numLineErrors' : 0,
'rowCount' : 1
})
res = mldb.query("SELECT a, b, c FROM base_ds")
self.assertTableResultEquals(res, [
['_rowName', 'a', 'b', 'c'],
['2', 'd', 'e', 'f']
])
def test_gen_headers(self):
"""
MLDB-1741
"""
res = mldb.post('/v1/procedures', {
'type' : 'import.text',
'params' : {
'runOnCreation' : True,
'dataFileUrl' : 'file://' + self.regular_lines_file.name,
'autoGenerateHeaders' : True,
'outputDataset' : {
'id' : 'gen_headers_ds',
'type' : 'tabular'
}
}
}).json()
self.assertEqual(res['status']['firstRun']['status'], {
'numLineErrors' : 0,
'rowCount' : 2
})
res = mldb.query("SELECT * FROM gen_headers_ds")
self.assertTableResultEquals(res, [
['_rowName', '0', '1', '2'],
['1', 'a', 'b', 'c'],
['2', 'd', 'e', 'f']
])
def test_conflicting_header_config(self):
msg = "autoGenerateHeaders cannot be true if headers is defined."
with self.assertRaisesRegex(ResponseException, msg):
mldb.post('/v1/procedures', {
'type' : 'import.text',
'params' : {
'runOnCreation' : True,
'dataFileUrl' : 'file://' + self.regular_lines_file.name,
'autoGenerateHeaders' : True,
'headers' : ['colA', 'colB', 'colC'],
'outputDataset' : {
'id' : 'gen_headers_ds',
'type' : 'tabular'
}
}
})
def test_basea_irregular(self):
msg = "Error parsing CSV row: too many columns in row"
with self.assertRaisesRegex(ResponseException, msg):
mldb.post('/v1/procedures', {
'type' : 'import.text',
'params' : {
'runOnCreation' : True,
'dataFileUrl' : 'file://' + self.irregular_lines_file.name,
'outputDataset' : {
'id' : 'base_ds',
'type' : 'tabular'
}
}
})
def test_gen_headers_irregular(self):
msg = "Error parsing CSV row: too many columns in row"
with self.assertRaisesRegex(ResponseException, msg):
mldb.post('/v1/procedures', {
'type' : 'import.text',
'params' : {
'runOnCreation' : True,
'dataFileUrl' : 'file://' + self.irregular_lines_file.name,
'autoGenerateHeaders' : True,
'outputDataset' : {
'id' : 'base_ds',
'type' : 'tabular'
}
}
})
def test_rowHash(self):
mldb.post('/v1/procedures', {
'type' : 'import.text',
'params' : {
"dataFileUrl" : "https://raw.githubusercontent.com/datacratic/mldb-pytanic-plugin/master/titanic_train.csv",
'outputDataset' : "titanic_hashed",
"where": "rowHash() % 3 = 0",
'runOnCreation' : True,
}
})
mldb.post('/v1/procedures', {
'type' : 'import.text',
'params' : {
"dataFileUrl" : "https://raw.githubusercontent.com/datacratic/mldb-pytanic-plugin/master/titanic_train.csv",
'outputDataset' : "titanic_no_hashed",
'runOnCreation' : True,
}
})
self.assertTableResultEquals(
mldb.query("select count(*) from titanic_hashed"),
[["_rowName","count(*)"],
["[]",287]]
)
self.assertTableResultEquals(
mldb.query("select count(*) from titanic_no_hashed"),
[["_rowName","count(*)"],
["[]",891]]
)
def test_import_filename_with_whitespaces(self):
"""
MLDB-1797
"""
mldb.post('/v1/procedures', {
'type' : 'import.text',
'params' : {
'dataFileUrl' : 'file://mldb/testing/filename with whitespaces.csv',
'outputDataset' : 'test_impot_filename_with_whitespaces',
'runOnCreation' : True
}
})
res = mldb.query("SELECT * FROM test_impot_filename_with_whitespaces")
self.assertTableResultEquals(res, [
['_rowName', 'a', 'b'],
['2', 1, 2]
])
if __name__ == '__main__':
mldb.run_tests()
| 2.53125
| 3
|
src/pyfx/view/components/help_bar/help_bar.py
|
cielong/pyfx
| 9
|
12784106
|
<filename>src/pyfx/view/components/help_bar/help_bar.py
import urwid
class HelpBar(urwid.WidgetWrap):
HELP_TEXT = [
('title', "Pyfx"), " ", "UP, DOWN, ENTER, Q",
]
def __init__(self, manager):
self._manager = manager
self._text_widget = urwid.Text(HelpBar.HELP_TEXT)
super().__init__(urwid.AttrMap(self._text_widget, "foot"))
| 2.265625
| 2
|
aws_interface/cloud/auth/register.py
|
hubaimaster/aws-interface
| 53
|
12784107
|
from cloud.crypto import *
from cloud.auth.get_login_method import do as get_login_method
from cloud.permission import Permission, NeedPermission
from cloud.auth.get_login_method import match_policy
from cloud.message import error
import string
# Define the input output format of the function.
# This information is used when creating the *SDK*.
info = {
'input_format': {
'email': 'str',
'password': '<PASSWORD>',
'extra?': 'map'
},
'output_format': {
'item?': {
'id': 'str',
'creation_date': 'float',
'email': 'str',
'login_method': 'str',
'...': '...'
}
},
'description': 'Register by email and password'
}
@NeedPermission(Permission.Run.Auth.register)
def do(data, resource):
body = {}
params = data['params']
email = params['email']
password = params['password']
extra = params.get('extra', {})
extra = {key: value for key, value in extra.items() if value != '' and value != {} and value != []}
salt = Salt.get_salt(32)
password_hash = <PASSWORD>_password(password, salt)
password_meta = {
'count': len(password),
'count_lowercase': len([c for c in password if c.islower()]),
'count_uppercase': len([c for c in password if c.isupper()]),
'count_special': len([c for c in password if c in string.punctuation]),
}
partition = 'user'
data['params']['login_method'] = 'email_login'
login_conf = get_login_method(data, resource)['item']
register_policy_code = login_conf.get('register_policy_code', None)
if not data.get('admin', False):
if not match_policy(register_policy_code, extra, password_meta):
body['error'] = error.REGISTER_POLICY_VIOLATION
return body
default_group_name = login_conf['default_group_name']
enabled = login_conf['enabled']
if enabled == 'true':
enabled = True
elif enabled == 'false':
enabled = False
if not enabled:
body['error'] = error.EMAIL_LOGIN_INVALID
return body
instructions = [
[None, 'email', 'eq', email]
]
items, end_key = resource.db_query(partition, instructions)
users = list(items)
if len(users) > 0:
body['error'] = error.EXISTING_ACCOUNT
return body
else:
item = {
'email': email,
'password_hash': <PASSWORD>,
'salt': <PASSWORD>,
'groups': [default_group_name],
'login_method': 'email_login',
}
# Put extra value in the item
for key in extra:
if key not in item:
item[key] = extra[key]
resource.db_put_item(partition, item)
body['item'] = item
return body
| 2.4375
| 2
|
src/plotVoltage.py
|
gdetor/Crebral
| 0
|
12784108
|
<filename>src/plotVoltage.py
import numpy as np
import matplotlib.pylab as plt
def plotVoltage(t, y):
fig = plt.figure(figsize=(10, 10))
ax = fig.add_subplot(111)
ax.plot(t, y, 'k', lw=2)
if __name__ == '__main__':
t = np.genfromtxt('time.dat')
y = np.genfromtxt('solution.dat')
s = np.genfromtxt('stimuli.dat')
n = 10
if 0:
plotVoltage(t[n:], y[0, n:])
plotVoltage(t[n:], y[1, n:])
if 1:
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(t[n:], y[0, n:], 'r', lw=1.5)
ax.plot(t[n:], y[1, n:], 'b', lw=1.5)
ax.plot(t[n:], s[0, n:] - 28, 'k')
plt.show()
| 3.0625
| 3
|
learning-algorithm-batch/run_Batch_DF_Classifier.py
|
rashid-islam/Differential_Fairness
| 4
|
12784109
|
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 6 21:52:54 2019
@author: USER
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import sklearn as sk
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import accuracy_score, roc_auc_score,f1_score
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from utilities import load_census_data
from fairness_metrics import computeEDFforData
from DNN_model import NeuralNet,training_fair_model
#The function below ensures that we seed all random generators with the same value to get reproducible results
def set_random_seed(state=1):
gens = (np.random.seed, torch.manual_seed, torch.cuda.manual_seed)
for set_state in gens:
set_state(state)
RANDOM_STATE = 1
set_random_seed(RANDOM_STATE)
#%% data loading and pre-processing
# load the train dataset
X, y, S = load_census_data('data/adult.data',1)
# Define all the "intersectional groups" to maintain stochastic update of p(y|S) correctly among different batches
intersectionalGroups = np.unique(S,axis=0) # all intersecting groups, i.e. black-women, white-man etc
# load the test dataset
test_X, test_y, test_S = load_census_data('data/adult.test',0)
# scale/normalize train & test data and shuffle train data
scaler = StandardScaler().fit(X)
scale_df = lambda df, scaler: pd.DataFrame(scaler.transform(df), columns=df.columns, index=df.index)
X = X.pipe(scale_df, scaler)
test_X = test_X.pipe(scale_df, scaler)
X, y, S = sk.utils.shuffle(X, y, S, random_state=0)
X = X.values
y = y.values
S = S.values
test_X = test_X.values
test_y = test_y.values
test_S = test_S.values
X, dev_X, y, dev_y, S, dev_S = train_test_split(X, y, S, test_size=0.30,stratify=y, random_state=7)
#%%
# deep neural network using pytorch
trainData = torch.from_numpy(X)
trainLabel = torch.from_numpy(y.reshape((-1,1)))
#devData = torch.from_numpy(devData)
testData = torch.from_numpy(test_X)
devData = torch.from_numpy(dev_X)
# hyperparameters
input_size = trainData.size()[1]
hidden1 = 16
hidden2 = 16
hidden3 = 16
output_size = 1
num_epochs = 500
#burnIn = 50
stepSize = 0.1
learning_rate = 0.001
burnIn = 50
epsilonBase = torch.tensor(0.0) # To protect the 80%-rule in intersectional setting, set this variable to: - log(0.8) = 0.2231
#%%
import sys
sys.stdout=open("batch_DF_EPS0.txt","w")
#%% training DNN model with fairness constraint
# Train a fair classifier
lamda = torch.tensor(0.01) # λ is ahyper-parameter that balances between the prediction loss and fairness.
# Select λ for fair learning algorithms via rigorous grid search on the development set. See paper for details.
DF_Model = training_fair_model(input_size,hidden1,hidden2,hidden3,output_size,learning_rate,num_epochs,trainData,
trainLabel,S,intersectionalGroups,burnIn,stepSize,epsilonBase,lamda)
#%%
# Validate the model
with torch.no_grad():
devData = Variable(devData.float())
predictProb = DF_Model(devData)
predicted = ((predictProb>0.5).numpy()).reshape((-1,))
Accuracy = sum(predicted == dev_y)/len(dev_y)
# Save results
predictProb = (predictProb.numpy()).reshape((-1,))
print(f"DF classifier dev accuracy: {Accuracy: .3f}")
aucScore = roc_auc_score(dev_y,predictProb)
print(f"DF classifier dev ROC AUC: {aucScore: .3f}")
nn_f1 = f1_score(dev_y,predicted)
print(f"DF classifier dev F1 score: {nn_f1: .2f}")
epsilon_hard,epsilon_soft,gamma_hard,gamma_soft,p_rule_hard,p_rule_soft = computeEDFforData(dev_S,predicted,predictProb,intersectionalGroups)
print(f"DF classifier dev epsilon_hard: {epsilon_hard: .3f}")
print(f"DF classifier dev epsilon_soft: {epsilon_soft: .3f}")
print(f"DF classifier dev gamma_hard: {gamma_hard: .3f}")
print(f"DF classifier dev gamma_soft: {gamma_soft: .3f}")
print(f"DF classifier dev p_rule_hard: {p_rule_hard: .3f}")
print(f"DF classifier dev p_rule_soft: {p_rule_soft: .3f}")
#%%
# Test the model
with torch.no_grad():
testData = Variable(testData.float())
predictProb = DF_Model(testData)
predicted = ((predictProb>0.5).numpy()).reshape((-1,))
Accuracy = sum(predicted == test_y)/len(test_y)
# Save results
predictProb = (predictProb.numpy()).reshape((-1,))
print(f"DF_Classifier accuracy: {Accuracy: .3f}")
aucScore = roc_auc_score(test_y,predictProb)
print(f"DF_Classifier ROC AUC: {aucScore: .3f}")
nn_f1 = f1_score(test_y,predicted)
print(f"DF_Classifier F1 score: {nn_f1: .2f}")
epsilon_hard,epsilon_soft,gamma_hard,gamma_soft,p_rule_hard,p_rule_soft = computeEDFforData(test_S,predicted,predictProb,intersectionalGroups)
print(f"DF_Classifier epsilon_hard: {epsilon_hard: .3f}")
print(f"DF_Classifier epsilon_soft: {epsilon_soft: .3f}")
print(f"DF_Classifier gamma_hard: {gamma_hard: .3f}")
print(f"DF_Classifier gamma_soft: {gamma_soft: .3f}")
print(f"DF_Classifier p_rule_hard: {p_rule_hard: .3f}")
print(f"DF_Classifier p_rule_soft: {p_rule_soft: .3f}")
| 2.625
| 3
|
Ejercicios/Filtro.py
|
dannieldev/Fundamentos-de-Python
| 0
|
12784110
|
def filtro(elem):
return(elem > 0)
l = [1,-3,2,-7,-8,-9,10]
lr = filter(filtro,l)
print(l)
print(lr)
| 2.953125
| 3
|
Source/input/ai/searcher.py
|
LucXyMan/starseeker
| 0
|
12784111
|
<reponame>LucXyMan/starseeker<gh_stars>0
#!/usr/bin/env python2.7
# -*- coding:UTF-8 -*-2
u"""searcher.py
Copyright (c) 2019 <NAME>
This software is released under BSD license.
コマンド探索モジュール。
"""
import pieces as _pieces
import utils.const as _const
if _const.IS_MULTIPROCESSING:
import multiprocessing as __multiprocessing
__PPType = __multiprocessing.Process
_PPQueue = __multiprocessing.JoinableQueue
else:
import threading as __threading
import Queue as __Queue
__PPType = __threading.Thread
_PPQueue = __Queue.Queue
class Searcher(__PPType):
u"""コマンドを作成する。
アルカナの使用、ピースのルートを設定。
"""
__MAX_DEPTH_WEIGHT = 0
__MIN_DEPTH_WEIGHT = 0
__AVG_DEPTH_WEIGHT = 0
__HOLE_PREVENTION_WEIGHT = 3
__SMOOTHNESS_WEIGHT = 1
__BLOCKS_ABOVE_OF_HOLES_WEIGHT = 1
__ADJACENT_SPACES_WEIGHT = 0
__GOAL_DISTANCE_WEIGHT = 0
__MALIGNANCY_WEIGHT = 1
__UNLOCK_WEIGHT = 5
def __init__(self, drop_point):
u"""コンストラクタ。
"""
import armament.collectible as __collectible
super(Searcher, self).__init__()
self.__in = _PPQueue()
self.__out = _PPQueue()
self.__drop_point = _pieces.State(drop_point)
self.__is_drive = True
self.__is_stop = False
self.__collectible = __collectible.get_all()
def run(self):
u"""AIの起動。
"""
import time as __time
import command as __command
def __get_tactics():
u"""カード使用コマンドを決定。
"""
def __get_deletion():
u"""カードの削除コマンドを設定。
"""
for cmd, card in zip((
_const.USE5_COMMAND, _const.USE6_COMMAND,
_const.USE7_COMMAND, _const.USE8_COMMAND
), arcana):
if (
card.type == _const.JOKER_ARCANUM and
prm_self.has_purify or
card.type == _const.SUMMON_ARCANUM and
(prm_self.is_full_group or prm_self.has_soul_eat)
):
return [__command.Simple(cmd)]
return []
if prm_self.is_arcana_available:
arcana = [self.__collectible[i] for i in prm_self.hand]
for cmd, arcanum in zip((
_const.USE1_COMMAND, _const.USE2_COMMAND,
_const.USE3_COMMAND, _const.USE4_COMMAND
), arcana):
if arcanum.type == _const.JOKER_ARCANUM:
weight = _const.NUMBER_OF_STAR*prm_self.jokers
if prm_self.has_reverse_sorcery or (
prm_self.resource.total <= weight
) and not prm_self.has_purify:
return [__command.Simple(cmd)]
elif (
arcanum.type == _const.SUMMON_ARCANUM and
arcanum.is_available((prm_self, prm_other))
):
return [__command.Simple(cmd)]
elif arcanum.type == _const.SORCERY_ARCANUM:
altered = arcanum.adapt(
None if prm_self.catalyst == -1 else
self.__collectible[prm_self.catalyst])
sorcery = altered if altered else arcanum
if sorcery.is_available((prm_self, prm_other)):
return [__command.Simple(cmd)]
deletion = __get_deletion()
if deletion:
return deletion
return []
def __search():
u"""探索処理。
"""
import marker as __marker
def __calculate():
u"""ピースポイント計算。
"""
import point as __point
def __get_search_area():
u"""検索範囲取得。
return: top, right, bottom, left.
"""
def __topmost_block_line():
u"""フィールドの最も高いブロック行番号を取得。
"""
height = field.table.height
for y in range(height):
if any(
cell.is_block for
cell in field.table.get_line(y)
):
return y
return height
top = max(__topmost_block_line(), target.bottom)
right = field.table.width-1
bottom = field.table.height+target.height-1
left = -(target.width-1)
return top, right, bottom, left
def __is_landingable():
u"""ピース設置可能判定。
"""
old_state = target.state
if (
not field.is_outer(target) and
not field.is_collide(target) and
not target.move(field, _const.DOWN)
):
return True
else:
target.state = old_state
return False
def __set_point():
u"""得点設定。
"""
def __get_roughness():
u"""フィールドの粗さを取得する。
"""
width = virtual.table.width
surface = map(
lambda block: virtual.height-1 if
block is None else block.point.top, tuple(
virtual.table.get_topmost_block(col) for
col in range(width)))
return (
surface[0]-surface[1]+surface[-1]-surface[-2]+sum(
abs(surface[col]-surface[col+1]) for
col in range(1, width-2)))
def __get_closures(x):
u"""ホールの上に存在するブロック数を取得。
"""
lowest_space = virtual.table.get_lowest_space(x)
lowest_space_height = (
lowest_space.point.top if
lowest_space else 0)
lowest_hole = virtual.table.get_lowest_hole(x)
return (
lowest_hole.point.top-lowest_space_height if
lowest_hole else 0)
def __get_unlock(field):
u"""アンロック可能点数取得。
"""
def __has_pair(line):
u"""ライン上に鍵と宝箱のペアが存在する場合に真。
"""
return any(
cell.is_locked for cell in line) and any(
cell.name in _const.KEY_NAMES.split("#") for
cell in line)
lines = (
field.table.get_line(y) for
y in range(field.table.height))
return sum(__has_pair(line) for line in lines)
def __get_stack():
u"""フィールドの積み重ねを取得。
"""
result = 0
width = virtual.table.width
for col in range(width):
block = virtual.table.get_topmost_block(col)
if block:
result += virtual.height-block.point.top
return result/width
virtual = field.virtual
squares = virtual.squares
depths = tuple(block.point.y for block in target.blocks)
point.max_depth = max(depths) << self.__MAX_DEPTH_WEIGHT
point.min_depth = min(depths) << self.__MIN_DEPTH_WEIGHT
point.avg_depth = (
sum(depths)/len(depths) << self.__AVG_DEPTH_WEIGHT)
point.goal_distance = abs(
target.state.left-self.__drop_point.left
)+abs(
target.state.top-self.__drop_point.top
) << self.__GOAL_DISTANCE_WEIGHT
target.stamp(virtual)
point .unlock = (
__get_unlock(virtual) << self.__UNLOCK_WEIGHT)
if point.is_t_spin:
virtual.set_super_drop()
point.completions = len(
virtual.simple_complete()
) << __get_stack()
virtual.turn()
point.hole_prevention = squares-sum(
1 for cell in virtual.table.below if cell.is_hole
) << self.__HOLE_PREVENTION_WEIGHT
point.smoothness = (
squares-__get_roughness() << self.__SMOOTHNESS_WEIGHT)
point.block_above_of_holes = squares-sum(
__get_closures(x) for
x in range(virtual.table.width)
) << self.__BLOCKS_ABOVE_OF_HOLES_WEIGHT
point.adjasent_spaces = sum(
1 for cell in virtual.table if cell.is_adjacent
) << self.__ADJACENT_SPACES_WEIGHT
point.malignancy = (squares << 2)-sum(
block.get_malignancy() for block in virtual.blocks
) << self.__MALIGNANCY_WEIGHT
points = []
for is_hold, target in enumerate(
(piece,) if prm_self.is_hold_captured else
(piece, hold)
):
top, right, bottom, left = __get_search_area()
for angle in range(target.angles):
if (
target.pruning == _const.SINGLE_PRUNING and
angle in (_const.A90, _const.A180, _const.A270)
) or (
target.pruning == _const.HALF_PRUNING and
angle in (_const.A180, _const.A270)
):
continue
for x in range(left, right):
for y in range(top, bottom):
if (
_const.IS_MULTIPROCESSING and
self.__out.empty()
):
self.__out.put_nowait(
("signal", ("point_calc",)))
self.__out.task_done()
if not self.__in.empty():
name, _ = self.__in.get_nowait()
if name in ("stop", "terminate"):
self.__is_stop = True
if name == "terminate":
self.__is_drive = False
return []
target.angle = angle
target.bottomleft = x, y
if __is_landingable():
point = __point.Point(
target.state, is_hold, target.is_t and
target.is_three_corner(field) and
target.is_flexible(field))
__set_point()
points.append(point)
return points
hold_command = __command.Simple(_const.HOLD_COMMAND)
if prm_self.hold:
if not self.__is_stop:
field = _pieces.Field(
_pieces.Pattern(*prm_self.field), is_virtual=True)
field.skills = prm_self.skills
piece = _pieces.Falling(
_pieces.Rotatable(*prm_self.piece), is_virtual=True
).virtual
hold = _pieces.Falling(
_pieces.Rotatable(*prm_self.hold), is_virtual=True
).virtual
piece.topleft = prm_self.piece_pos
goal = piece.state
points = __calculate()
while points:
max_point = max(points)
points.remove(max_point)
target = hold if max_point.is_hold else piece
target.state = max_point.state
marker = __marker.Marker(field, target, goal)
marker.out = self.__out
result = marker.mark(
max_point.is_t_spin and max_point.completions)
if result:
if max_point.is_hold:
result = [hold_command]+result
return result
return [__command.Simple(_const.UP_COMMAND)]
else:
return [hold_command]
waiting_time = 1./_const.FRAME_RATE
while self.__is_drive:
__time.sleep(waiting_time)
if not self.__in.empty():
cmd, args = self.__in.get_nowait()
if cmd == "search":
prm_self, prm_other = args
tactics = __get_tactics()
cmds = tactics if tactics else __search()
if cmds or (not cmds and self.__is_stop):
self.__out.put_nowait(("search", (cmds,)))
self.__out.task_done()
self.__is_stop = False
elif cmd == "terminate":
self.__is_drive = False
@property
def in_out(self):
u"""入出力キュー取得。
return in, out.
"""
return self.__in, self.__out
| 2.28125
| 2
|
code/Spectral_condensation.py
|
matthew-hirn/condensation
| 2
|
12784112
|
# -*- coding: utf-8 -*-
"""
Created on Thu May 2 13:42:37 2019
@author: <NAME>
"""
import numpy as np
import matplotlib.pyplot as plt
import matplotlib as mpl
import matplotlib.cm as cm
from mpl_toolkits.mplot3d import Axes3D
import cycler
def spectral_decay(case = 4,
vname = 'example_0',
plot_type = 'val',
save = False):
## Parameters & Settings
# 0: Barbell
# 1: Tree
# 2: Gaussian Mix
# 3: Hyperuniform Circle
# 4: Hyperuniform Ellipse
## Parameters & Settings
# case = 4 # 0: Barbell; 1: Tree; 2: Gaussian Mix; 3/4: Hyperuniform Circle/Ellipse
# vname = 'example_0' # spectial tag for video file
colormap = cm.viridis # cmap for viz
# plot_type = 'val' # 'vec': right singular vec embed; 'val': singula val; '3d': 3D
# save = False # Save figure?
save_type = '.pdf'
psi_min = 2 # min right singular vector to plot; 2, 3, ..., 11
psi_max = psi_min + 1 # max right singular vector to plot
psi3d_min = 2 # 2, 3, ..., 11
psi3d_mid = psi3d_min + 1
psi3d_max = psi3d_min + 2
for sig in np.arange(2,12):
if case == 0:
fname = 'barbell'
cdir = 'barbell/'
plot_title = 'Barbell'
elif case == 1:
fname = 'tree'
cdir = 'tree/'
plot_title = 'Tree'
elif case == 2:
fname = 'gauss'
cdir = 'gauss/'
plot_title = 'Gauss'
elif case == 3:
fname = 'hyperuni_circle'
cdir = 'h_circle/'
plot_title = 'Hyperuniform Circle'
elif case == 4:
fname = 'hyperuni_ellipse'
cdir = 'ellipse/'
plot_title = 'Hyperuniform Ellipse'
sname = fname + '_' + vname # save name tag
fname += vname # load name tag
# Get # of Iterations
iter_name = 'dm/'+cdir+'iterations_'+fname+'.npy'
iterations = np.load(iter_name)
# Get Diffusion Maps Spectra
eDM_name = 'dm/'+cdir+'E_'+fname+'.npy'; eDM_sig = np.load(eDM_name)[sig - 2]
# Initialize Specra Lists
ei = []; et = []; eDM = []
# Get Epsilon (shape = (2, #iterations), 0th axis #eps doublings, 1st axis eps)
eps_name = 'dm/'+cdir+'epsilon_list_'+fname+'.npy'; eps_adjust = np.load(eps_name)
# Get Number of Points in Dataset & Color
datasize_name = 'dm/'+cdir+'V_'+fname+'.npy'; N = np.load(datasize_name).shape[0]
C_name = 'dm/'+cdir+'C_'+fname+'.npy'; C = np.load(C_name)
#%%
for i in np.arange(1, 1+iterations):
'''Singular Values (DM for Changing Data, TCDM) & Eigenvalues (DM)'''
pi_name = 'p_i/'+cdir+'Ei_'+str(i)+'_'+fname+'.npy'
pt_name = 'p_t/'+cdir+'Et_'+str(i)+'_'+fname+'.npy'
ei.append([i, np.load(pi_name)[sig - 2]]) # Operator P_i
et.append([i, np.load(pt_name)[sig - 2]]) # Composed Operator P^((t))
eDM.append([i, eDM_sig**i]) # Diffusion Maps Operator P^{t}
if plot_type == 'val':
plt.subplot(311)
plt.plot(np.asarray(ei).T[0], np.asarray(ei).T[1], marker='o', label=r'$P_{\epsilon,i}$', color = 'c')
plt.subplot(312)
plt.plot(np.asarray(et).T[0], np.asarray(et).T[1], marker='o', label=r'$P_{\epsilon}^{(t)}$', color = 'purple')
plt.ylabel(r"$\sigma_{k}$")
plt.subplot(313)
plt.plot(np.asarray(eDM).T[0], np.asarray(eDM).T[1], marker='o', label=r'$P_{\epsilon}^{t}$', color = 'g')
plt.xlabel("Iteration")
# plt.show()
save_dir = 'figs/spectral/'+cdir
save_name = sname+'_sigma'+str(sig)+'_N-'+str(N)
elif plot_type == 'vec':
# Set Singular Vectors
psiDM_name = 'p_i/'+cdir+'Vi_1_'+fname+'.npy'
psiDM = np.load(psiDM_name)
# Generate Figure
plt.title(r'$\psi_{'+str(psi_min)+'}$ & $\psi_{'+str(psi_max)+'}$ for '+plot_title+' (N = '+str(N)+')')
plt.scatter(psiDM[:,psi_min-2], psiDM[:,psi_max-2], c=C, cmap = colormap,
vmin=np.amin(C), vmax=np.amax(C), label=r'$P_{\epsilon}^{t}$')
plt.xlabel(r'$\psi_{'+str(psi_min)+'}$')
plt.ylabel(r'$\psi_{'+str(psi_max)+'}$')
plt.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False, labelleft=False)
plt.show()
save_dir = 'figs/embed/'+cdir
save_name = sname+'_psi'+str(psi_min)+'_psi'+str(psi_max)+'_N-'+str(N)
elif plot_type == '3d':
# Set Singular Vectors and Plot
from mpl_toolkits.mplot3d import Axes3D
psiDM_name = 'p_i/'+cdir+'Vi_1_'+fname+'.npy'
psiDM = np.load(psiDM_name)
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
plt.title(r'$\psi_{'+str(psi3d_min)+'}$, $\psi_{'+str(psi3d_mid)+
'}$, & $\psi_{'+str(psi3d_max)+'}$ Embedding of '
+plot_title+' (N = '+str(N)+')')
ax.scatter(psiDM[:, psi3d_min - 2], psiDM[:, psi3d_mid - 2],
psiDM[:, psi3d_max - 2], c=C, cmap=colormap)
ax.tick_params(axis='both', which='both', bottom=False, top=False,
labelbottom=False, right=False, left=False, labelleft=False)
plt.show()
save_dir = 'figs/embed/'+cdir
save_name = sname+'_psi'+str(psi3d_min)+'_psi'+str(
psi3d_mid)+'_psi'+str(psi3d_max)+'_N-'+str(N)
# Save Figure
if save == True:
save_name = save_dir+save_name+save_type
plt.savefig(save_name, bbox_inches='tight', transparent=True, dpi=300)
return plt.show()
if "__name__" == "__main__":
spectral_decay()
# Save Figure
if save == True:
save_name = save_dir+save_name+save_type
plt.savefig(save_name, bbox_inches='tight', transparent=True, dpi=300)
| 2.3125
| 2
|
dlkit/services/commenting.py
|
UOC/dlkit
| 2
|
12784113
|
"""DLKit Services implementations of commenting service."""
# pylint: disable=no-init
# osid specification includes some 'marker' interfaces.
# pylint: disable=too-many-ancestors
# number of ancestors defined in spec.
# pylint: disable=too-few-public-methods,too-many-public-methods
# number of methods defined in spec. Worse yet, these are aggregates.
# pylint: disable=invalid-name
# method and class names defined in spec.
# pylint: disable=no-self-use,unused-argument
# to catch unimplemented methods.
# pylint: disable=super-init-not-called
# it just isn't.
from . import osid
from .osid_errors import Unimplemented, IllegalState, InvalidArgument
from dlkit.abstract_osid.commenting import objects as abc_commenting_objects
from dlkit.manager_impls.commenting import managers as commenting_managers
DEFAULT = 0
COMPARATIVE = 0
PLENARY = 1
FEDERATED = 0
ISOLATED = 1
ANY_STATUS = 0
ACTIVE = 1
UNSEQUESTERED = 0
SEQUESTERED = 1
AUTOMATIC = 0
MANDATORY = 1
DISABLED = -1
class CommentingProfile(osid.OsidProfile, commenting_managers.CommentingProfile):
"""CommentingProfile convenience adapter including related Session methods."""
def __init__(self):
self._provider_manager = None
def supports_comment_lookup(self):
"""Pass through to provider supports_comment_lookup"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_comment_lookup()
def supports_comment_query(self):
"""Pass through to provider supports_comment_query"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_comment_query()
def supports_comment_admin(self):
"""Pass through to provider supports_comment_admin"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_comment_admin()
def supports_comment_book(self):
"""Pass through to provider supports_comment_book"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_comment_book()
def supports_comment_book_assignment(self):
"""Pass through to provider supports_comment_book_assignment"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_comment_book_assignment()
def supports_book_lookup(self):
"""Pass through to provider supports_book_lookup"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_book_lookup()
def supports_book_admin(self):
"""Pass through to provider supports_book_admin"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_book_admin()
def supports_book_hierarchy(self):
"""Pass through to provider supports_book_hierarchy"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_book_hierarchy()
def supports_book_hierarchy_design(self):
"""Pass through to provider supports_book_hierarchy_design"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.supports_resource_lookup
return self._provider_manager.supports_book_hierarchy_design()
def get_comment_record_types(self):
"""Pass through to provider get_comment_record_types"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_comment_record_types()
comment_record_types = property(fget=get_comment_record_types)
def get_comment_search_record_types(self):
"""Pass through to provider get_comment_search_record_types"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_comment_search_record_types()
comment_search_record_types = property(fget=get_comment_search_record_types)
def get_book_record_types(self):
"""Pass through to provider get_book_record_types"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_book_record_types()
book_record_types = property(fget=get_book_record_types)
def get_book_search_record_types(self):
"""Pass through to provider get_book_search_record_types"""
# Implemented from kitosid template for -
# osid.resource.ResourceProfile.get_resource_record_types
return self._provider_manager.get_book_search_record_types()
book_search_record_types = property(fget=get_book_search_record_types)
class CommentingManager(osid.OsidManager, osid.OsidSession, CommentingProfile, commenting_managers.CommentingManager):
"""CommentingManager convenience adapter including related Session methods."""
def __init__(self, proxy=None):
self._runtime = None
self._provider_manager = None
self._provider_sessions = dict()
self._session_management = AUTOMATIC
self._book_view = DEFAULT
# This is to initialize self._proxy
osid.OsidSession.__init__(self, proxy)
self._sub_package_provider_managers = dict()
def _set_book_view(self, session):
"""Sets the underlying book view to match current view"""
if self._book_view == COMPARATIVE:
try:
session.use_comparative_book_view()
except AttributeError:
pass
else:
try:
session.use_plenary_book_view()
except AttributeError:
pass
def _get_provider_session(self, session_name, proxy=None):
"""Gets the session for the provider"""
agent_key = self._get_agent_key(proxy)
if session_name in self._provider_sessions[agent_key]:
return self._provider_sessions[agent_key][session_name]
else:
session = self._instantiate_session('get_' + session_name, self._proxy)
self._set_book_view(session)
if self._session_management != DISABLED:
self._provider_sessions[agent_key][session_name] = session
return session
def _get_sub_package_provider_manager(self, sub_package_name):
if sub_package_name in self._sub_package_provider_managers:
return self._sub_package_provider_managers[sub_package_name]
config = self._runtime.get_configuration()
parameter_id = Id('parameter:{0}ProviderImpl@dlkit_service'.format(sub_package_name))
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
if self._proxy is None:
# need to add version argument
sub_package = self._runtime.get_manager(sub_package_name.upper(), provider_impl)
else:
# need to add version argument
sub_package = self._runtime.get_proxy_manager(sub_package_name.upper(), provider_impl)
self._sub_package_provider_managers[sub_package_name] = sub_package
return sub_package
def _get_sub_package_provider_session(self, sub_package, session_name, proxy=None):
"""Gets the session from a sub-package"""
agent_key = self._get_agent_key(proxy)
if session_name in self._provider_sessions[agent_key]:
return self._provider_sessions[agent_key][session_name]
else:
manager = self._get_sub_package_provider_manager(sub_package)
try:
session = self._instantiate_session('get_' + session_name + '_for_bank',
proxy=self._proxy,
manager=manager)
except AttributeError:
session = self._instantiate_session('get_' + session_name,
proxy=self._proxy,
manager=manager)
self._set_bank_view(session)
if self._session_management != DISABLED:
self._provider_sessions[agent_key][session_name] = session
return session
def _instantiate_session(self, method_name, proxy=None, *args, **kwargs):
"""Instantiates a provider session"""
if 'manager' in kwargs:
session_class = getattr(kwargs['manager'], method_name)
del kwargs['manager']
else:
session_class = getattr(self._provider_manager, method_name)
if proxy is None:
try:
return session_class(bank_id=self._catalog_id, *args, **kwargs)
except AttributeError:
return session_class(*args, **kwargs)
else:
try:
return session_class(bank_id=self._catalog_id, proxy=proxy, *args, **kwargs)
except AttributeError:
return session_class(proxy=proxy, *args, **kwargs)
def initialize(self, runtime):
"""OSID Manager initialize"""
from .primitives import Id
if self._runtime is not None:
raise IllegalState('Manager has already been initialized')
self._runtime = runtime
config = runtime.get_configuration()
parameter_id = Id('parameter:commentingProviderImpl@dlkit_service')
provider_impl = config.get_value_by_parameter(parameter_id).get_string_value()
if self._proxy is None:
# need to add version argument
self._provider_manager = runtime.get_manager('COMMENTING', provider_impl)
else:
# need to add version argument
self._provider_manager = runtime.get_proxy_manager('COMMENTING', provider_impl)
def close_sessions(self):
"""Close all sessions, unless session management is set to MANDATORY"""
if self._session_management != MANDATORY:
self._provider_sessions = dict()
def use_automatic_session_management(self):
"""Session state will be saved unless closed by consumers"""
self._session_management = AUTOMATIC
def use_mandatory_session_management(self):
"""Session state will be saved and can not be closed by consumers"""
self._session_management = MANDATORY
def disable_session_management(self):
"""Session state will never be saved"""
self._session_management = DISABLED
self.close_sessions()
def get_comment_lookup_session(self, *args, **kwargs):
"""Pass through to provider get_comment_lookup_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_catalog_template
return self._provider_manager.get_comment_lookup_session(*args, **kwargs)
comment_lookup_session = property(fget=get_comment_lookup_session)
def get_comment_lookup_session_for_book(self, *args, **kwargs):
"""Pass through to provider get_comment_lookup_session_for_book"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_catalog_template
return self._provider_manager.get_comment_lookup_session_for_book(*args, **kwargs)
def get_comment_query_session(self, *args, **kwargs):
"""Pass through to provider get_comment_query_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_catalog_template
return self._provider_manager.get_comment_query_session(*args, **kwargs)
comment_query_session = property(fget=get_comment_query_session)
def get_comment_query_session_for_book(self, *args, **kwargs):
"""Pass through to provider get_comment_query_session_for_book"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_catalog_template
return self._provider_manager.get_comment_query_session_for_book(*args, **kwargs)
def get_comment_admin_session(self, *args, **kwargs):
"""Pass through to provider get_comment_admin_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_catalog_template
return self._provider_manager.get_comment_admin_session(*args, **kwargs)
comment_admin_session = property(fget=get_comment_admin_session)
def get_comment_admin_session_for_book(self, *args, **kwargs):
"""Pass through to provider get_comment_admin_session_for_book"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_for_bin_catalog_template
return self._provider_manager.get_comment_admin_session_for_book(*args, **kwargs)
def get_comment_book_session(self, *args, **kwargs):
"""Pass through to provider get_comment_book_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_comment_book_session(*args, **kwargs)
comment_book_session = property(fget=get_comment_book_session)
def get_comment_book_assignment_session(self, *args, **kwargs):
"""Pass through to provider get_comment_book_assignment_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_comment_book_assignment_session(*args, **kwargs)
comment_book_assignment_session = property(fget=get_comment_book_assignment_session)
def get_book_lookup_session(self, *args, **kwargs):
"""Pass through to provider get_book_lookup_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_book_lookup_session(*args, **kwargs)
book_lookup_session = property(fget=get_book_lookup_session)
def get_book_admin_session(self, *args, **kwargs):
"""Pass through to provider get_book_admin_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_book_admin_session(*args, **kwargs)
book_admin_session = property(fget=get_book_admin_session)
def get_book_hierarchy_session(self, *args, **kwargs):
"""Pass through to provider get_book_hierarchy_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_book_hierarchy_session(*args, **kwargs)
book_hierarchy_session = property(fget=get_book_hierarchy_session)
def get_book_hierarchy_design_session(self, *args, **kwargs):
"""Pass through to provider get_book_hierarchy_design_session"""
# Implemented from kitosid template for -
# osid.resource.ResourceManager.get_resource_lookup_session_manager_template
return self._provider_manager.get_book_hierarchy_design_session(*args, **kwargs)
book_hierarchy_design_session = property(fget=get_book_hierarchy_design_session)
def get_commenting_batch_manager(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services')
commenting_batch_manager = property(fget=get_commenting_batch_manager)
##
# The following methods are from osid.commenting.CommentBookSession
def can_lookup_comment_book_mappings(self):
"""Pass through to provider CommentBookSession.can_lookup_comment_book_mappings"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinSession.can_lookup_resource_bin_mappings
return self._get_provider_session('comment_book_session').can_lookup_comment_book_mappings()
def use_comparative_book_view(self):
"""Pass through to provider CommentBookSession.use_comparative_book_view"""
self._book_view = COMPARATIVE
# self._get_provider_session('comment_book_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_book_view()
except AttributeError:
pass
def use_plenary_book_view(self):
"""Pass through to provider CommentBookSession.use_plenary_book_view"""
self._book_view = PLENARY
# self._get_provider_session('comment_book_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_book_view()
except AttributeError:
pass
def get_comment_ids_by_book(self, *args, **kwargs):
"""Pass through to provider CommentBookSession.get_comment_ids_by_book"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bin
return self._get_provider_session('comment_book_session').get_comment_ids_by_book(*args, **kwargs)
def get_comments_by_book(self, *args, **kwargs):
"""Pass through to provider CommentBookSession.get_comments_by_book"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bin
return self._get_provider_session('comment_book_session').get_comments_by_book(*args, **kwargs)
def get_comment_ids_by_books(self, *args, **kwargs):
"""Pass through to provider CommentBookSession.get_comment_ids_by_books"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinSession.get_resource_ids_by_bins
return self._get_provider_session('comment_book_session').get_comment_ids_by_books(*args, **kwargs)
def get_comments_by_books(self, *args, **kwargs):
"""Pass through to provider CommentBookSession.get_comments_by_books"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinSession.get_resources_by_bins
return self._get_provider_session('comment_book_session').get_comments_by_books(*args, **kwargs)
def get_book_ids_by_comment(self, *args, **kwargs):
"""Pass through to provider CommentBookSession.get_book_ids_by_comment"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinSession.get_bin_ids_by_resource
return self._get_provider_session('comment_book_session').get_book_ids_by_comment(*args, **kwargs)
def get_books_by_comment(self, *args, **kwargs):
"""Pass through to provider CommentBookSession.get_books_by_comment"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinSession.get_bins_by_resource
catalogs = self._get_provider_session('comment_book_session').get_books_by_comment(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Book(self._provider_manager, cat, self._runtime, self._proxy))
return BookList(cat_list)
##
# The following methods are from osid.commenting.CommentBookAssignmentSession
def can_assign_comments(self):
"""Pass through to provider CommentBookAssignmentSession.can_assign_comments"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources
return self._get_provider_session('comment_book_assignment_session').can_assign_comments()
def can_assign_comments_to_book(self, *args, **kwargs):
"""Pass through to provider CommentBookAssignmentSession.can_assign_comments_to_book"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinAssignmentSession.can_assign_resources_to_bin
return self._get_provider_session('comment_book_assignment_session').can_assign_comments_to_book(*args, **kwargs)
def get_assignable_book_ids(self, *args, **kwargs):
"""Pass through to provider CommentBookAssignmentSession.get_assignable_book_ids"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids
return self._get_provider_session('comment_book_assignment_session').get_assignable_book_ids(*args, **kwargs)
def get_assignable_book_ids_for_comment(self, *args, **kwargs):
"""Pass through to provider CommentBookAssignmentSession.get_assignable_book_ids_for_comment"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinAssignmentSession.get_assignable_bin_ids_for_resource
return self._get_provider_session('comment_book_assignment_session').get_assignable_book_ids_for_comment(*args, **kwargs)
def assign_comment_to_book(self, *args, **kwargs):
"""Pass through to provider CommentBookAssignmentSession.assign_comment_to_book"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinAssignmentSession.assign_resource_to_bin
self._get_provider_session('comment_book_assignment_session').assign_comment_to_book(*args, **kwargs)
def unassign_comment_from_book(self, *args, **kwargs):
"""Pass through to provider CommentBookAssignmentSession.unassign_comment_from_book"""
# Implemented from kitosid template for -
# osid.resource.ResourceBinAssignmentSession.unassign_resource_from_bin
self._get_provider_session('comment_book_assignment_session').unassign_comment_from_book(*args, **kwargs)
def reassign_comment_to_book(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
##
# The following methods are from osid.commenting.BookLookupSession
def can_lookup_books(self):
"""Pass through to provider BookLookupSession.can_lookup_books"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.can_lookup_bins_template
return self._get_provider_session('book_lookup_session').can_lookup_books()
def get_book(self, *args, **kwargs):
"""Pass through to provider BookLookupSession.get_book"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bin
return Book(
self._provider_manager,
self._get_provider_session('book_lookup_session').get_book(*args, **kwargs),
self._runtime,
self._proxy)
def get_books_by_ids(self, *args, **kwargs):
"""Pass through to provider BookLookupSession.get_books_by_ids"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_ids
catalogs = self._get_provider_session('book_lookup_session').get_books_by_ids(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Book(self._provider_manager, cat, self._runtime, self._proxy))
return BookList(cat_list)
def get_books_by_genus_type(self, *args, **kwargs):
"""Pass through to provider BookLookupSession.get_books_by_genus_type"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_genus_type
catalogs = self._get_provider_session('book_lookup_session').get_books_by_genus_type(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Book(self._provider_manager, cat, self._runtime, self._proxy))
return BookList(cat_list)
def get_books_by_parent_genus_type(self, *args, **kwargs):
"""Pass through to provider BookLookupSession.get_books_by_parent_genus_type"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_parent_genus_type
catalogs = self._get_provider_session('book_lookup_session').get_books_by_parent_genus_type(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Book(self._provider_manager, cat, self._runtime, self._proxy))
return BookList(cat_list)
def get_books_by_record_type(self, *args, **kwargs):
"""Pass through to provider BookLookupSession.get_books_by_record_type"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_record_type
catalogs = self._get_provider_session('book_lookup_session').get_books_by_record_type(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Book(self._provider_manager, cat, self._runtime, self._proxy))
return BookList(cat_list)
def get_books_by_provider(self, *args, **kwargs):
"""Pass through to provider BookLookupSession.get_books_by_provider"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_by_provider
catalogs = self._get_provider_session('book_lookup_session').get_books_by_provider(*args, **kwargs)
cat_list = []
for cat in catalogs:
cat_list.append(Book(self._provider_manager, cat, self._runtime, self._proxy))
return BookList(cat_list)
def get_books(self):
"""Pass through to provider BookLookupSession.get_books"""
# Implemented from kitosid template for -
# osid.resource.BinLookupSession.get_bins_template
catalogs = self._get_provider_session('book_lookup_session').get_books()
cat_list = []
for cat in catalogs:
cat_list.append(Book(self._provider_manager, cat, self._runtime, self._proxy))
return BookList(cat_list)
books = property(fget=get_books)
##
# The following methods are from osid.commenting.BookAdminSession
def can_create_books(self):
"""Pass through to provider BookAdminSession.can_create_books"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.can_create_bins
return self._get_provider_session('book_admin_session').can_create_books()
def can_create_book_with_record_types(self, *args, **kwargs):
"""Pass through to provider BookAdminSession.can_create_book_with_record_types"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.can_create_bin_with_record_types
return self._get_provider_session('book_admin_session').can_create_book_with_record_types(*args, **kwargs)
def get_book_form_for_create(self, *args, **kwargs):
"""Pass through to provider BookAdminSession.get_book_form_for_create"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_create
return self._get_provider_session('book_admin_session').get_book_form_for_create(*args, **kwargs)
def create_book(self, *args, **kwargs):
"""Pass through to provider BookAdminSession.create_book"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.create_bin
return Book(
self._provider_manager,
self._get_provider_session('book_admin_session').create_book(*args, **kwargs),
self._runtime,
self._proxy)
def can_update_books(self):
"""Pass through to provider BookAdminSession.can_update_books"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.can_update_bins
return self._get_provider_session('book_admin_session').can_update_books()
def get_book_form_for_update(self, *args, **kwargs):
"""Pass through to provider BookAdminSession.get_book_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_update
return self._get_provider_session('book_admin_session').get_book_form_for_update(*args, **kwargs)
def get_book_form(self, *args, **kwargs):
"""Pass through to provider BookAdminSession.get_book_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.get_bin_form_for_update_template
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'book_record_types' in kwargs:
return self.get_book_form_for_create(*args, **kwargs)
else:
return self.get_book_form_for_update(*args, **kwargs)
def update_book(self, *args, **kwargs):
"""Pass through to provider BookAdminSession.update_book"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.update_bin
# OSID spec does not require returning updated catalog
return Book(
self._provider_manager,
self._get_provider_session('book_admin_session').update_book(*args, **kwargs),
self._runtime,
self._proxy)
def save_book(self, book_form, *args, **kwargs):
"""Pass through to provider BookAdminSession.update_book"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.update_bin
if book_form.is_for_update():
return self.update_book(book_form, *args, **kwargs)
else:
return self.create_book(book_form, *args, **kwargs)
def can_delete_books(self):
"""Pass through to provider BookAdminSession.can_delete_books"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.can_delete_bins
return self._get_provider_session('book_admin_session').can_delete_books()
def delete_book(self, *args, **kwargs):
"""Pass through to provider BookAdminSession.delete_book"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.delete_bin
self._get_provider_session('book_admin_session').delete_book(*args, **kwargs)
def can_manage_book_aliases(self):
"""Pass through to provider BookAdminSession.can_manage_book_aliases"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.can_manage_resource_aliases_template
return self._get_provider_session('book_admin_session').can_manage_book_aliases()
def alias_book(self, *args, **kwargs):
"""Pass through to provider BookAdminSession.alias_book"""
# Implemented from kitosid template for -
# osid.resource.BinAdminSession.alias_bin
self._get_provider_session('book_admin_session').alias_book(*args, **kwargs)
##
# The following methods are from osid.commenting.BookHierarchySession
def get_book_hierarchy_id(self):
"""Pass through to provider BookHierarchySession.get_book_hierarchy_id"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.get_bin_hierarchy_id
return self._get_provider_session('book_hierarchy_session').get_book_hierarchy_id()
book_hierarchy_id = property(fget=get_book_hierarchy_id)
def get_book_hierarchy(self):
"""Pass through to provider BookHierarchySession.get_book_hierarchy"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.get_bin_hierarchy
return self._get_provider_session('book_hierarchy_session').get_book_hierarchy()
book_hierarchy = property(fget=get_book_hierarchy)
def can_access_book_hierarchy(self):
"""Pass through to provider BookHierarchySession.can_access_book_hierarchy"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.can_access_bin_hierarchy
return self._get_provider_session('book_hierarchy_session').can_access_book_hierarchy()
def get_root_book_ids(self):
"""Pass through to provider BookHierarchySession.get_root_book_ids"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.get_root_bin_ids
return self._get_provider_session('book_hierarchy_session').get_root_book_ids()
root_book_ids = property(fget=get_root_book_ids)
def get_root_books(self):
"""Pass through to provider BookHierarchySession.get_root_books"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.get_root_bins
return self._get_provider_session('book_hierarchy_session').get_root_books()
root_books = property(fget=get_root_books)
def has_parent_books(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.has_parent_books"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.has_parent_bins
return self._get_provider_session('book_hierarchy_session').has_parent_books(*args, **kwargs)
def is_parent_of_book(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.is_parent_of_book"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.is_parent_of_bin
return self._get_provider_session('book_hierarchy_session').is_parent_of_book(*args, **kwargs)
def get_parent_book_ids(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.get_parent_book_ids"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.get_parent_bin_ids
return self._get_provider_session('book_hierarchy_session').get_parent_book_ids(*args, **kwargs)
def get_parent_books(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.get_parent_books"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.get_parent_bins
return self._get_provider_session('book_hierarchy_session').get_parent_books(*args, **kwargs)
def is_ancestor_of_book(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.is_ancestor_of_book"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.is_ancestor_of_bin
return self._get_provider_session('book_hierarchy_session').is_ancestor_of_book(*args, **kwargs)
def has_child_books(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.has_child_books"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.has_child_bins
return self._get_provider_session('book_hierarchy_session').has_child_books(*args, **kwargs)
def is_child_of_book(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.is_child_of_book"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.is_child_of_bin
return self._get_provider_session('book_hierarchy_session').is_child_of_book(*args, **kwargs)
def get_child_book_ids(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.get_child_book_ids"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.get_child_bin_ids
return self._get_provider_session('book_hierarchy_session').get_child_book_ids(*args, **kwargs)
def get_child_books(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.get_child_books"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.get_child_bins
return self._get_provider_session('book_hierarchy_session').get_child_books(*args, **kwargs)
def is_descendant_of_book(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.is_descendant_of_book"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.is_descendant_of_bin
return self._get_provider_session('book_hierarchy_session').is_descendant_of_book(*args, **kwargs)
def get_book_node_ids(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.get_book_node_ids"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.get_bin_node_ids
return self._get_provider_session('book_hierarchy_session').get_book_node_ids(*args, **kwargs)
def get_book_nodes(self, *args, **kwargs):
"""Pass through to provider BookHierarchySession.get_book_nodes"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchySession.get_bin_nodes
return self._get_provider_session('book_hierarchy_session').get_book_nodes(*args, **kwargs)
##
# The following methods are from osid.commenting.BookHierarchyDesignSession
def can_modify_book_hierarchy(self):
"""Pass through to provider BookHierarchyDesignSession.can_modify_book_hierarchy"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchyDesignSession.can_modify_bin_hierarchy
return self._get_provider_session('book_hierarchy_design_session').can_modify_book_hierarchy()
def create_book_hierarchy(self, *args, **kwargs):
"""Pass through to provider BookHierarchyDesignSession.can_modify_book_hierarchy"""
# Patched in by <EMAIL>, Jul 23, 2014, added by birdland to template on Aug 8, 2014
# Is not part of specs for catalog hierarchy design sessions, but may want to be in hierarchy service instead
# Will not return an actual object, just JSON
# since a BankHierarchy does not seem to be an OSID thing.
return self._get_provider_session('book_hierarchy_design_session').create_book_hierarchy(*args, **kwargs)
def delete_book_hierarchy(self, *args, **kwargs):
"""Pass through to provider BookHierarchyDesignSession.can_modify_book_hierarchy"""
# Patched in by <EMAIL>, Jul 23, 2014, added by birdland to template on Aug 8, 2014
# Is not part of specs for catalog hierarchy design sessions, but may want to be in hierarchy service instead
# Will not return an actual object, just JSON
# since a BankHierarchy does not seem to be an OSID thing.
return self._get_provider_session('book_hierarchy_design_session').delete_book_hierarchy(*args, **kwargs)
def add_root_book(self, *args, **kwargs):
"""Pass through to provider BookHierarchyDesignSession.add_root_book"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchyDesignSession.add_root_bin
self._get_provider_session('book_hierarchy_design_session').add_root_book(*args, **kwargs)
def remove_root_book(self, *args, **kwargs):
"""Pass through to provider BookHierarchyDesignSession.remove_root_book"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchyDesignSession.remove_root_bin
self._get_provider_session('book_hierarchy_design_session').remove_root_book(*args, **kwargs)
def add_child_book(self, *args, **kwargs):
"""Pass through to provider BookHierarchyDesignSession.add_child_book"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchyDesignSession.add_child_bin
self._get_provider_session('book_hierarchy_design_session').add_child_book(*args, **kwargs)
def remove_child_book(self, *args, **kwargs):
"""Pass through to provider BookHierarchyDesignSession.remove_child_book"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchyDesignSession.remove_child_bin
self._get_provider_session('book_hierarchy_design_session').remove_child_book(*args, **kwargs)
def remove_child_books(self, *args, **kwargs):
"""Pass through to provider BookHierarchyDesignSession.remove_child_books"""
# Implemented from kitosid template for -
# osid.resource.BinHierarchyDesignSession.remove_child_bins
self._get_provider_session('book_hierarchy_design_session').remove_child_books(*args, **kwargs)
class CommentingProxyManager(osid.OsidProxyManager, CommentingProfile, commenting_managers.CommentingProxyManager):
"""CommentingProxyManager convenience adapter including related Session methods."""
def get_comment_lookup_session(self, *args, **kwargs):
"""Sends control to Manager"""
# Implemented from kitosid template for -
# osid.resource.ResourceProxyManager.get_resource_lookup_session_template
return CommentingManager.get_comment_lookup_session(*args, **kwargs)
def get_comment_lookup_session_for_book(self, *args, **kwargs):
"""Sends control to Manager"""
# Implemented from kitosid template for -
# osid.resource.ResourceProxyManager.get_resource_lookup_session_for_bin_template
return CommentingManager.get_comment_lookup_session_for_book(*args, **kwargs)
def get_comment_query_session(self, *args, **kwargs):
"""Sends control to Manager"""
# Implemented from kitosid template for -
# osid.resource.ResourceProxyManager.get_resource_lookup_session_template
return CommentingManager.get_comment_query_session(*args, **kwargs)
def get_comment_query_session_for_book(self, *args, **kwargs):
"""Sends control to Manager"""
# Implemented from kitosid template for -
# osid.resource.ResourceProxyManager.get_resource_lookup_session_for_bin_template
return CommentingManager.get_comment_query_session_for_book(*args, **kwargs)
def get_comment_admin_session(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comment_admin_session_for_book(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comment_book_session(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comment_book_assignment_session(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_book_lookup_session(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_book_admin_session(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_book_hierarchy_session(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_book_hierarchy_design_session(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_commenting_batch_proxy_manager(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services')
commenting_batch_proxy_manager = property(fget=get_commenting_batch_proxy_manager)
class Book(abc_commenting_objects.Book, osid.OsidSession, osid.OsidCatalog):
"""Book convenience adapter including related Session methods."""
# WILL THIS EVER BE CALLED DIRECTLY - OUTSIDE OF A MANAGER?
def __init__(self, provider_manager, catalog, runtime, proxy, **kwargs):
self._provider_manager = provider_manager
self._catalog = catalog
self._runtime = runtime
osid.OsidObject.__init__(self, self._catalog) # This is to initialize self._object
osid.OsidSession.__init__(self, proxy) # This is to initialize self._proxy
self._catalog_id = catalog.get_id()
self._provider_sessions = kwargs
self._session_management = AUTOMATIC
self._book_view = DEFAULT
self._object_views = dict()
self._operable_views = dict()
self._containable_views = dict()
def _set_book_view(self, session):
"""Sets the underlying book view to match current view"""
if self._book_view == FEDERATED:
try:
session.use_federated_book_view()
except AttributeError:
pass
else:
try:
session.use_isolated_book_view()
except AttributeError:
pass
def _set_object_view(self, session):
"""Sets the underlying object views to match current view"""
for obj_name in self._object_views:
if self._object_views[obj_name] == PLENARY:
try:
getattr(session, 'use_plenary_' + obj_name + '_view')()
except AttributeError:
pass
else:
try:
getattr(session, 'use_comparative_' + obj_name + '_view')()
except AttributeError:
pass
def _set_operable_view(self, session):
"""Sets the underlying operable views to match current view"""
for obj_name in self._operable_views:
if self._operable_views[obj_name] == ACTIVE:
try:
getattr(session, 'use_active_' + obj_name + '_view')()
except AttributeError:
pass
else:
try:
getattr(session, 'use_any_status_' + obj_name + '_view')()
except AttributeError:
pass
def _set_containable_view(self, session):
"""Sets the underlying containable views to match current view"""
for obj_name in self._containable_views:
if self._containable_views[obj_name] == SEQUESTERED:
try:
getattr(session, 'use_sequestered_' + obj_name + '_view')()
except AttributeError:
pass
else:
try:
getattr(session, 'use_unsequestered_' + obj_name + '_view')()
except AttributeError:
pass
def _get_provider_session(self, session_name):
"""Returns the requested provider session.
Instantiates a new one if the named session is not already known.
"""
agent_key = self._get_agent_key()
if session_name in self._provider_sessions[agent_key]:
return self._provider_sessions[agent_key][session_name]
else:
session_class = getattr(self._provider_manager, 'get_' + session_name + '_for_book')
if self._proxy is None:
if 'notification_session' in session_name:
# Is there something else we should do about the receiver field?
session = session_class('fake receiver', self._catalog.get_id())
else:
session = session_class(self._catalog.get_id())
else:
if 'notification_session' in session_name:
# Is there something else we should do about the receiver field?
session = session_class('fake receiver', self._catalog.get_id(), self._proxy)
else:
session = session_class(self._catalog.get_id(), self._proxy)
self._set_book_view(session)
self._set_object_view(session)
self._set_operable_view(session)
self._set_containable_view(session)
if self._session_management != DISABLED:
self._provider_sessions[agent_key][session_name] = session
return session
def get_book_id(self):
"""Gets the Id of this book."""
return self._catalog_id
def get_book(self):
"""Strange little method to assure conformance for inherited Sessions."""
return self
def __getattr__(self, name):
if '_catalog' in self.__dict__:
try:
return self._catalog[name]
except AttributeError:
pass
raise AttributeError
def close_sessions(self):
"""Close all sessions currently being managed by this Manager to save memory."""
if self._session_management != MANDATORY:
self._provider_sessions = dict()
else:
raise IllegalState()
def use_automatic_session_management(self):
"""Session state will be saved until closed by consumers."""
self._session_management = AUTOMATIC
def use_mandatory_session_management(self):
"""Session state will always be saved and can not be closed by consumers."""
# Session state will be saved and can not be closed by consumers
self._session_management = MANDATORY
def disable_session_management(self):
"""Session state will never be saved."""
self._session_management = DISABLED
self.close_sessions()
def get_book_record(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
##
# The following methods are from osid.commenting.CommentLookupSession
def can_lookup_comments(self):
"""Pass through to provider CommentLookupSession.can_lookup_comments"""
# Implemented from kitosid template for -
# osid.resource.ResourceLookupSession.can_lookup_resources_template
return self._get_provider_session('comment_lookup_session').can_lookup_comments()
def use_comparative_comment_view(self):
"""Pass through to provider CommentLookupSession.use_comparative_comment_view"""
self._object_views['comment'] = COMPARATIVE
# self._get_provider_session('comment_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_comparative_comment_view()
except AttributeError:
pass
def use_plenary_comment_view(self):
"""Pass through to provider CommentLookupSession.use_plenary_comment_view"""
self._object_views['comment'] = PLENARY
# self._get_provider_session('comment_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_plenary_comment_view()
except AttributeError:
pass
def use_federated_book_view(self):
"""Pass through to provider CommentLookupSession.use_federated_book_view"""
self._book_view = FEDERATED
# self._get_provider_session('comment_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_federated_book_view()
except AttributeError:
pass
def use_isolated_book_view(self):
"""Pass through to provider CommentLookupSession.use_isolated_book_view"""
self._book_view = ISOLATED
# self._get_provider_session('comment_lookup_session') # To make sure the session is tracked
for session in self._get_provider_sessions():
try:
session.use_isolated_book_view()
except AttributeError:
pass
def use_effective_comment_view(self):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services')
def use_any_effective_comment_view(self):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services')
def get_comment(self, *args, **kwargs):
"""Pass through to provider CommentLookupSession.get_comment"""
# Implemented from kitosid template for -
# osid.resource.ResourceLookupSession.get_resource_template
return self._get_provider_session('comment_lookup_session').get_comment(*args, **kwargs)
def get_comments_by_ids(self, *args, **kwargs):
"""Pass through to provider CommentLookupSession.get_comments_by_ids"""
# Implemented from kitosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_ids_template
return self._get_provider_session('comment_lookup_session').get_comments_by_ids(*args, **kwargs)
def get_comments_by_genus_type(self, *args, **kwargs):
"""Pass through to provider CommentLookupSession.get_comments_by_genus_type"""
# Implemented from kitosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_genus_type_template
return self._get_provider_session('comment_lookup_session').get_comments_by_genus_type(*args, **kwargs)
def get_comments_by_parent_genus_type(self, *args, **kwargs):
"""Pass through to provider CommentLookupSession.get_comments_by_parent_genus_type"""
# Implemented from kitosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_parent_genus_type_template
return self._get_provider_session('comment_lookup_session').get_comments_by_parent_genus_type(*args, **kwargs)
def get_comments_by_record_type(self, *args, **kwargs):
"""Pass through to provider CommentLookupSession.get_comments_by_record_type"""
# Implemented from kitosid template for -
# osid.resource.ResourceLookupSession.get_resources_by_record_type_template
return self._get_provider_session('comment_lookup_session').get_comments_by_record_type(*args, **kwargs)
def get_comments_on_date(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_by_genus_type_on_date(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_for_commentor(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_for_commentor_on_date(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_by_genus_type_for_commentor(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_by_genus_type_for_commentor_on_date(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_for_reference(self, *args, **kwargs):
"""Pass through to provider CommentLookupSession.get_comments_for_reference"""
# Implemented from kitosid template for -
# osid.relationship.RelationshipLookupSession.get_relationships_for_source_template
return self._get_provider_session('comment_lookup_session').get_comments_for_reference(*args, **kwargs)
def get_comments_for_reference_on_date(self, *args, **kwargs):
"""Pass through to provider CommentLookupSession.get_comments_for_reference_on_date"""
# Implemented from kitosid template for -
# osid.relationship.RelationshipLookupSession.get_relationships_for_source_on_date_template
return self._get_provider_session('comment_lookup_session').get_comments_for_reference_on_date(*args, **kwargs)
def get_comments_by_genus_type_for_reference(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_by_genus_type_for_reference_on_date(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_for_commentor_and_reference(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_for_commentor_and_reference_on_date(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_by_genus_type_for_commentor_and_reference(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments_by_genus_type_for_commentor_and_reference_on_date(self, *args, **kwargs):
"""Pass through to provider unimplemented"""
raise Unimplemented('Unimplemented in dlkit.services - args=' + str(args) + ', kwargs=' + str(kwargs))
def get_comments(self):
"""Pass through to provider CommentLookupSession.get_comments"""
# Implemented from kitosid template for -
# osid.resource.ResourceLookupSession.get_resources_template
return self._get_provider_session('comment_lookup_session').get_comments()
comments = property(fget=get_comments)
##
# The following methods are from osid.commenting.CommentQuerySession
def can_search_comments(self):
"""Pass through to provider CommentQuerySession.can_search_comments"""
# Implemented from kitosid template for -
# osid.resource.ResourceQuerySession.can_search_resources_template
return self._get_provider_session('comment_query_session').can_search_comments()
def get_comment_query(self):
"""Pass through to provider CommentQuerySession.get_comment_query"""
# Implemented from kitosid template for -
# osid.resource.ResourceQuerySession.get_item_query_template
return self._get_provider_session('comment_query_session').get_comment_query()
comment_query = property(fget=get_comment_query)
def get_comments_by_query(self, *args, **kwargs):
"""Pass through to provider CommentQuerySession.get_comments_by_query"""
# Implemented from kitosid template for -
# osid.resource.ResourceQuerySession.get_items_by_query_template
return self._get_provider_session('comment_query_session').get_comments_by_query(*args, **kwargs)
##
# The following methods are from osid.commenting.CommentAdminSession
def can_create_comments(self):
"""Pass through to provider CommentAdminSession.can_create_comments"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.can_create_resources
return self._get_provider_session('comment_admin_session').can_create_comments()
def can_create_comment_with_record_types(self, *args, **kwargs):
"""Pass through to provider CommentAdminSession.can_create_comment_with_record_types"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.can_create_resource_with_record_types
return self._get_provider_session('comment_admin_session').can_create_comment_with_record_types(*args, **kwargs)
def get_comment_form_for_create(self, *args, **kwargs):
"""Pass through to provider CommentAdminSession.get_comment_form_for_create"""
# Implemented from -
# osid.commenting.CommentAdminSession.get_comment_form_for_create
return self._get_provider_session('comment_admin_session').get_comment_form_for_create(*args, **kwargs)
def create_comment(self, *args, **kwargs):
"""Pass through to provider CommentAdminSession.create_comment"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.create_resource
return self._get_provider_session('comment_admin_session').create_comment(*args, **kwargs)
def can_update_comments(self):
"""Pass through to provider CommentAdminSession.can_update_comments"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.can_update_resources
return self._get_provider_session('comment_admin_session').can_update_comments()
def get_comment_form_for_update(self, *args, **kwargs):
"""Pass through to provider CommentAdminSession.get_comment_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
return self._get_provider_session('comment_admin_session').get_comment_form_for_update(*args, **kwargs)
def get_comment_form(self, *args, **kwargs):
"""Pass through to provider CommentAdminSession.get_comment_form_for_update"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
# This method might be a bit sketchy. Time will tell.
if isinstance(args[-1], list) or 'comment_record_types' in kwargs:
return self.get_comment_form_for_create(*args, **kwargs)
else:
return self.get_comment_form_for_update(*args, **kwargs)
def duplicate_comment(self, comment_id):
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.get_resource_form_for_update
return self._get_provider_session('comment_admin_session').duplicate_comment(comment_id)
def update_comment(self, *args, **kwargs):
"""Pass through to provider CommentAdminSession.update_comment"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
# Note: The OSID spec does not require returning updated object
return self._get_provider_session('comment_admin_session').update_comment(*args, **kwargs)
def save_comment(self, comment_form, *args, **kwargs):
"""Pass through to provider CommentAdminSession.update_comment"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.update_resource
if comment_form.is_for_update():
return self.update_comment(comment_form, *args, **kwargs)
else:
return self.create_comment(comment_form, *args, **kwargs)
def can_delete_comments(self):
"""Pass through to provider CommentAdminSession.can_delete_comments"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.can_delete_resources
return self._get_provider_session('comment_admin_session').can_delete_comments()
def delete_comment(self, *args, **kwargs):
"""Pass through to provider CommentAdminSession.delete_comment"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.delete_resource
self._get_provider_session('comment_admin_session').delete_comment(*args, **kwargs)
def can_manage_comment_aliases(self):
"""Pass through to provider CommentAdminSession.can_manage_comment_aliases"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.can_manage_resource_aliases_template
return self._get_provider_session('comment_admin_session').can_manage_comment_aliases()
def alias_comment(self, *args, **kwargs):
"""Pass through to provider CommentAdminSession.alias_comment"""
# Implemented from kitosid template for -
# osid.resource.ResourceAdminSession.alias_resources
self._get_provider_session('comment_admin_session').alias_comment(*args, **kwargs)
class BookList(abc_commenting_objects.BookList, osid.OsidList):
"""BookList convenience adapter including related Session methods."""
def get_next_book(self):
"""Gets next object"""
# Implemented from kitosid template for -
# osid.resource.ResourceList.get_next_resource
try:
next_item = next(self)
except StopIteration:
raise IllegalState('no more elements available in this list')
else:
return next_item
def next(self):
"""next method for enumerator"""
# Implemented from kitosid template for -
# osid.resource.ResourceList.get_next_resource
next_item = osid.OsidList.next(self)
return next_item
__next__ = next
next_book = property(fget=get_next_book)
def get_next_books(self, n):
"""gets next n objects from list"""
# Implemented from kitosid template for -
# osid.resource.ResourceList.get_next_resources
if n > self.available():
# !!! This is not quite as specified (see method docs) !!!
raise IllegalState('not enough elements available in this list')
else:
next_list = []
i = 0
while i < n:
try:
next_list.append(next(self))
except StopIteration:
break
i += 1
return next_list
| 2.15625
| 2
|
service/user.py
|
kittolau/selepy
| 0
|
12784114
|
<filename>service/user.py
from service.logger_manager import logger
class Map(dict):
"""
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(Map, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.iteritems():
if isinstance(v, dict):
self[k] = Map(v)
elif isinstance(v, list):
for idx, elm in enumerate(v):
if isinstance(elm, dict):
v[idx] = Map(elm)
else:
self[k] = v
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise Exception("User missing key {attr}".format(attr=attr))
class User(dict):
"""
Example:
m = Map({'first_name': 'Eduardo'}, last_name='Pool', age=24, sports=['Soccer'])
"""
def __init__(self, *args, **kwargs):
super(User, self).__init__(*args, **kwargs)
for arg in args:
if isinstance(arg, dict):
for k, v in arg.iteritems():
if isinstance(v, dict):
self[k] = Map(v)
elif isinstance(v, list):
for idx, elm in enumerate(v):
if isinstance(elm, dict):
v[idx] = Map(elm)
else:
self[k] = v
def __getattr__(self, attr):
try:
return self[attr]
except KeyError:
raise Exception("User missing key {attr}".format(attr=attr))
def setUsersCollection(self,users_collection):
self.users_collection = users_collection
def isWebAccountExist(self,domain):
accounts = self.web_accounts
for account in accounts:
if account.domains == domain:
return True
return False
def getWebAccount(self,domain):
accounts = self.web_accounts
for account in accounts:
if account.domains == domain:
return account
return None
def getBirthDayDayWithLeadingZero(self):
return "%02d" % self.birth_day_day
def getBirthDayMonthWithLeadingZero(self):
return "%02d" % self.birth_day_month
def generateWebAccountObj(self,domain=None,username=None,password=None):
if not isinstance(domain, basestring):
raise Exception("domain expects string in generateWebAccountObj")
if not isinstance(username, basestring) and username is not None:
raise Exception("username expects string or None in generateWebAccountObj")
if not isinstance(password, basestring) and password is not None:
raise Exception("password expects string or None in generateWebAccountObj")
if username is None:
username = self.default_username
if password is None:
password = <PASSWORD>.default_password
web_acount = {
"domain" : domain,
"username" : username,
"password" : password
}
return web_acount
def addWebAccount(self,web_acount):
logger.info("added web account:\nuser: %s\ndomain: %s\nusername: %s\npassword: %s\n" % (
self.default_username,
web_acount["domain"],
web_acount["username"],
web_acount["password"]
))
self.users_collection.update({"_id":self._id},{"$addToSet":{"web_accounts":web_acount}})
def getWebAccount(self,domain):
web_account = next(d for (index, d) in enumerate(self.web_accounts) if d["domain"] == domain)
if web_account is None:
raise Exception("username %s does not have web account %s" % self.default_username, domain)
return web_account
| 2.984375
| 3
|
project_euler/problem_112/sol1.py
|
kadirbaytin/Python
| 0
|
12784115
|
<reponame>kadirbaytin/Python<filename>project_euler/problem_112/sol1.py<gh_stars>0
"""
Problem:
Working from left-to-right if no digit is exceeded by the digit to its left it is called an increasing number; for example, 134468.
Similarly if no digit is exceeded by the digit to its right it is called a decreasing number; for example, 66420.
We shall call a positive integer that is neither increasing nor decreasing a "bouncy" number; for example, 155349.
Clearly there cannot be any bouncy numbers below one-hundred, but just over half of the numbers below one-thousand (525) are bouncy. In fact, the least number for which the proportion of bouncy numbers first reaches 50% is 538.
Surprisingly, bouncy numbers become more and more common and by the time we reach 21780 the proportion of bouncy numbers is equal to 90%.
Find the least number for which the proportion of bouncy numbers is exactly 99%.
"""
#!/usr/bin/env python
from itertools import *
def pairwise(iterable):
"s -> (s0,s1), (s1,s2), (s2, s3), ..."
a, b = tee(iterable)
next(b, None)
return zip(a, b)
def digits(n):
return list(map(int, str(n)))
def is_increasing(n):
return all(prev <= curr for prev, curr in pairwise(digits(n)))
def is_decreasing(n):
return all(prev >= curr for prev, curr in pairwise(digits(n)))
def is_bouncy(n):
return not is_increasing(n) and not is_decreasing(n)
def running_total(iterable):
total = 0
for element in iterable:
total += element
yield total
def main():
nums = count(1)
bouncy = running_total(map(lambda n: float(is_bouncy(n)), count(1)))
print(next((n for n, b in zip(nums, bouncy) if b / n == 0.99)))
if __name__ == "__main__": main()
| 4.03125
| 4
|
App/gui/server_widget.py
|
Wizard-collab/wizard
| 0
|
12784116
|
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'C:\Users\conta\Documents\script\Wizard\App\work\ui_files\server_widget.ui'
#
# Created by: PyQt5 UI code generator 5.13.0
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_Form(object):
def setupUi(self, Form):
Form.setObjectName("Form")
Form.resize(384, 564)
self.verticalLayout = QtWidgets.QVBoxLayout(Form)
self.verticalLayout.setContentsMargins(2, 2, 2, 2)
self.verticalLayout.setSpacing(1)
self.verticalLayout.setObjectName("verticalLayout")
self.frame_2 = QtWidgets.QFrame(Form)
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame_2)
self.horizontalLayout_2.setContentsMargins(1, 1, 1, 1)
self.horizontalLayout_2.setSpacing(1)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.server_quit_pushButton = QtWidgets.QPushButton(self.frame_2)
self.server_quit_pushButton.setMinimumSize(QtCore.QSize(0, 40))
self.server_quit_pushButton.setObjectName("server_quit_pushButton")
self.horizontalLayout_2.addWidget(self.server_quit_pushButton)
self.server_reduce_pushButton = QtWidgets.QPushButton(self.frame_2)
self.server_reduce_pushButton.setMinimumSize(QtCore.QSize(0, 40))
self.server_reduce_pushButton.setObjectName("server_reduce_pushButton")
self.horizontalLayout_2.addWidget(self.server_reduce_pushButton)
self.verticalLayout.addWidget(self.frame_2)
self.frame_3 = QtWidgets.QFrame(Form)
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.frame_3)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.server_project_label = QtWidgets.QLabel(self.frame_3)
self.server_project_label.setObjectName("server_project_label")
self.horizontalLayout_3.addWidget(self.server_project_label)
self.verticalLayout.addWidget(self.frame_3)
self.frame = QtWidgets.QFrame(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.frame.sizePolicy().hasHeightForWidth())
self.frame.setSizePolicy(sizePolicy)
self.frame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame.setObjectName("frame")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame)
self.horizontalLayout.setObjectName("horizontalLayout")
self.server_ip_label = QtWidgets.QLabel(self.frame)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Fixed)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.server_ip_label.sizePolicy().hasHeightForWidth())
self.server_ip_label.setSizePolicy(sizePolicy)
self.server_ip_label.setObjectName("server_ip_label")
self.horizontalLayout.addWidget(self.server_ip_label)
self.server_connexions_label = QtWidgets.QLabel(self.frame)
self.server_connexions_label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.server_connexions_label.setAlignment(QtCore.Qt.AlignRight|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter)
self.server_connexions_label.setObjectName("server_connexions_label")
self.horizontalLayout.addWidget(self.server_connexions_label)
self.verticalLayout.addWidget(self.frame)
self.server__users_verticalFrame = QtWidgets.QFrame(Form)
sizePolicy = QtWidgets.QSizePolicy(QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Expanding)
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(self.server__users_verticalFrame.sizePolicy().hasHeightForWidth())
self.server__users_verticalFrame.setSizePolicy(sizePolicy)
self.server__users_verticalFrame.setObjectName("server__users_verticalFrame")
self.users_layout_0 = QtWidgets.QVBoxLayout(self.server__users_verticalFrame)
self.users_layout_0.setObjectName("users_layout_0")
self.users_layout = QtWidgets.QVBoxLayout()
self.users_layout.setObjectName("users_layout")
self.users_layout_0.addLayout(self.users_layout)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.users_layout_0.addItem(spacerItem)
self.verticalLayout.addWidget(self.server__users_verticalFrame)
self.log_textEdit = QtWidgets.QTextEdit(Form)
self.log_textEdit.setReadOnly(True)
self.log_textEdit.setTextInteractionFlags(QtCore.Qt.NoTextInteraction)
self.log_textEdit.setObjectName("log_textEdit")
self.verticalLayout.addWidget(self.log_textEdit)
self.retranslateUi(Form)
QtCore.QMetaObject.connectSlotsByName(Form)
def retranslateUi(self, Form):
_translate = QtCore.QCoreApplication.translate
Form.setWindowTitle(_translate("Form", "Form"))
self.server_quit_pushButton.setText(_translate("Form", "Quit"))
self.server_reduce_pushButton.setText(_translate("Form", "Reduce"))
self.server_project_label.setText(_translate("Form", "Project"))
self.server_ip_label.setText(_translate("Form", "IP Label"))
self.server_connexions_label.setText(_translate("Form", "Connexions"))
if __name__ == "__main__":
import sys
app = QtWidgets.QApplication(sys.argv)
Form = QtWidgets.QWidget()
ui = Ui_Form()
ui.setupUi(Form)
Form.show()
sys.exit(app.exec_())
| 1.84375
| 2
|
src/visualization_helpers.py
|
edesz/store-item-demand-forecast
| 3
|
12784117
|
<gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import pandas as pd
from matplotlib.ticker import MaxNLocator
def customize_splines(ax: plt.axis) -> plt.axis:
ax.spines["left"].set_edgecolor("black"),
ax.spines["left"].set_linewidth(2),
ax.spines["left"].set_zorder(3),
ax.spines["bottom"].set_edgecolor("black"),
ax.spines["bottom"].set_linewidth(2),
ax.spines["bottom"].set_zorder(3),
ax.spines["top"].set_edgecolor("lightgrey"),
ax.spines["top"].set_linewidth(1),
ax.spines["right"].set_edgecolor("lightgrey"),
ax.spines["right"].set_linewidth(1),
return ax
def plot_store_item_grid(
df, store_item_dict, year_start, year_end, ts_name, fig_size=(12, 40)
):
fig = plt.figure(figsize=fig_size)
grid = plt.GridSpec(10, 1, hspace=0, wspace=0)
ax0 = fig.add_subplot(grid[0, 0])
stores = store_item_dict["store"]
items = store_item_dict["item"]
for k in range(10):
if k > 0:
ax = fig.add_subplot(grid[k, 0], xticklabels=[], sharex=ax0)
else:
ax = ax0
ts = df[
(df["store"] == stores[k]) & (df["item"] == items[k])
].set_index("date")
ts = ts.loc[year_start:year_end][ts_name]
ts.plot(ax=ax, color="blue")
ax.grid(color="lightgrey")
ax.set_title(
f"Store {stores[k]}, item {items[k]} in {year_start}-{year_end}",
loc="left",
fontweight="bold",
x=0.01,
y=0.85,
)
ax.set_xlabel(None)
ax.set_ylabel(None)
_ = customize_splines(ax)
def show_splits(
train_cv,
test_cv,
fold_number,
ts_name,
n_splits,
ptitle,
ax,
lw,
):
train_cv_slice = train_cv.loc[slice(ts_name), slice(None)]
test_cv_slice = test_cv.loc[slice(ts_name), slice(None)]
ax.plot(
train_cv_slice.index.get_level_values(1),
pd.Series([fold_number] * len(train_cv_slice), dtype=int),
color="black",
alpha=1.0,
label="train",
lw=lw,
)
ax.plot(
test_cv_slice.index.get_level_values(1),
pd.Series([fold_number] * len(test_cv_slice), dtype=int),
color="orange",
alpha=1.0,
label="test",
lw=lw,
)
ax.yaxis.set_major_locator(MaxNLocator(integer=True))
ax_to_annotate = 1 if "Expanding" in ptitle else n_splits
if fold_number == ax_to_annotate:
ax.set_ylabel("Fold Number", fontsize=16)
ax.legend(
loc="upper left",
bbox_to_anchor=(1.00, 1.05),
ncol=1,
handletextpad=0.4,
columnspacing=0.6,
frameon=False,
prop={"size": 12},
)
ax.grid()
ax.set_title(
f"CV folds using {ptitle} with {n_splits} folds",
fontweight="bold",
fontsize=14,
loc="left",
)
customize_splines(ax)
ax.tick_params(axis="x", labelsize=14)
ax.tick_params(axis="y", labelsize=14)
| 2.625
| 3
|
mp_video.py
|
yuangan/A2L
| 0
|
12784118
|
import cv2
import numpy as np
import mediapipe as mp
import glob
import os
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_face_mesh = mp.solutions.face_mesh
#import os
os.environ['JOBLIB_TEMP_FOLDER'] = '/tmp'
# wait for process: "W016","W017","W018","W019","W023","W024","W025","W026","W028","W029","W033","W035","W036","W037","W038","W040"
# M028 M029 M030 M031 M032 M033 M034 M035 M037 M039 M040 M041 W009 W011 W014 M042 W015
# M012 M013 M022 M026 M027 M031 M037 M041 W014
# W016 W018 W019 W023 W024 W025 W026 W028 W029 W033 W035 W036
# W040 W038 W037
in_path = glob.glob('/data3/MEAD/W036/video/front/*/level_*/0*.mp4')
#in_path = glob.glob('/data3/MEAD/M012/video/front/disgusted/level_2/027.mp4')
#print(in_path)
out_path = []
out_path_initlmk = []
out_path_motion = []
for pid,path in enumerate(in_path):
#print(pid,path)
p,f = os.path.split(path)
na,ext = os.path.splitext(f)
#print(p+"/"+na+"_multiland.npy")
out_path.append(p+"/"+na+"_multiland.npy")
out_path_initlmk.append(p+"/"+na+"_initlmk_multiland.npy")
out_path_motion.append(p+"/"+na+"_motion_multiland.npy")
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
def vis_landmark_on_img(img, shape, linewidth=2):
'''
Visualize landmark on images.
'''
def draw_curve(idx_list, color=(0, 255, 0), loop=False, lineWidth=linewidth):
for i in idx_list:
cv2.line(img, (shape[i, 0], shape[i, 1]), (shape[i + 1, 0], shape[i + 1, 1]), color, lineWidth)
if (loop):
cv2.line(img, (shape[idx_list[0], 0], shape[idx_list[0], 1]),
(shape[idx_list[-1] + 1, 0], shape[idx_list[-1] + 1, 1]), color, lineWidth)
draw_curve(list(range(0, 16)), color=(255, 144, 25)) # jaw
draw_curve(list(range(17, 21)), color=(50, 205, 50)) # eye brow
draw_curve(list(range(22, 26)), color=(50, 205, 50))
draw_curve(list(range(27, 35)), color=(208, 224, 63)) # nose
draw_curve(list(range(36, 41)), loop=True, color=(71, 99, 255)) # eyes
draw_curve(list(range(42, 47)), loop=True, color=(71, 99, 255))
draw_curve(list(range(48, 59)), loop=True, color=(238, 130, 238)) # mouth
draw_curve(list(range(60, 67)), loop=True, color=(238, 130, 238))
return img
with mp_face_mesh.FaceMesh(
min_detection_confidence=0.5,
min_tracking_confidence=0.5) as face_mesh:
for vid,vpath in enumerate(in_path):
videoReader = cv2.VideoCapture(in_path[vid])
fs = videoReader.get(cv2.CAP_PROP_FPS)
sz = (int(videoReader.get(cv2.CAP_PROP_FRAME_WIDTH)), int(videoReader.get(cv2.CAP_PROP_FRAME_HEIGHT)))
#vw = cv2.VideoWriter('./output/video.mp4',cv2.VideoWriter_fourcc('M','P','E','G'), fs, sz)
land_res = [] # 帧数 * 3 * landmark数量
motion_res = []
initlmk_res = []
success, frame = videoReader.read()
idx = 0
k = 0
while success:
#print(success)
#print(k)
k += 1
image = frame.copy()
#cv2.imwrite("./imgs/"+str(k)+"_im.png",image)
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if not results.multi_face_landmarks:
success, frame = videoReader.read() #获取下一帧
continue
face_landmarks = results.multi_face_landmarks[0]
land_loc = []
xlis = []
ylis = []
zlis = []
for lm in face_landmarks.landmark:
x = lm.x * sz[0]
y = lm.y * sz[1]
xlis.append(x)
ylis.append(y)
zlis.append(lm.z)
#print(x,y,lm.z)
land_loc.append(xlis)
land_loc.append(ylis)
land_loc.append(zlis)
land_res.append(land_loc)
if idx == 0 : initlmk_res.append(land_loc)
motion_res.append( list( np.array(land_loc) - np.array(land_res[ len(land_res) - 1 ]) ) )
idx += 1
# for face_landmarks in results.multi_face_landmarks:
# mp_drawing.draw_landmarks(
# image=image,
# landmark_list=face_landmarks,
# connections=mp_face_mesh.FACEMESH_CONTOURS,
# landmark_drawing_spec=drawing_spec,
# connection_drawing_spec=drawing_spec)
#cv2.imwrite('./output/video' + str(idx) + '.png', image)
#vw.write(image) # 写视频帧
success, frame = videoReader.read() #获取下一帧
videoReader.release()
#vw.release()
res = np.array(land_res)
np.save(out_path[vid],res)
#np.save(out_path_initlmk[vid],initlmk_res)
#np.save(out_path_motion[vid],motion_res)
print("out:"+out_path[vid])
| 2.234375
| 2
|
py/admin/login.py
|
newdefence/cj
| 1
|
12784119
|
# coding=utf-8
from tornado.web import RequestHandler, HTTPError
from tornado.gen import coroutine
from bson import ObjectId
from bson.json_util import dumps, loads
__author__ = '<EMAIL>'
__date__ = "2018/12/14 下午9:58"
UID_KEY = 'uid'
class LoginHandler(RequestHandler):
def check_xsrf_cookie(self):
pass
def get(self):
if self.application.settings['debug']:
self.set_secure_cookie(UID_KEY, 'TEST_USER')
self.finish({'success': True, 'msg': '登录成功'})
else:
raise HTTPError(404)
def post(self):
user = loads(self.request.body)
self.set_secure_cookie(UID_KEY, user['name'])
self.finish({'success': True, 'msg': '登录成功'})
def put(self):
self.clear_cookie(UID_KEY)
self.finish({'success': True, 'msg': '退出成功'})
| 2.328125
| 2
|
agent/tool/result_logger.py
|
it-akumi/tiss
| 1
|
12784120
|
<reponame>it-akumi/tiss
# -*- coding: utf-8 -*-
from config.log import TASK_RESULT_KEY, EPISODE_RESULT_KEY
import logging
import time
episode_result_logger = logging.getLogger(EPISODE_RESULT_KEY)
task_result_logger = logging.getLogger(TASK_RESULT_KEY)
class ResultLogger(object):
def __init__(self):
self.steps = 0
self.start_time = time.time()
self.episode = 0
self.task = 1
episode_result_logger.info("task,episode,step,time")
task_result_logger.info("task,success,failure")
def initialize(self):
self.start_time = time.time()
self.steps = 0
self.episode += 1
def step(self):
self.steps += 1
def report(self, success, failure, finished):
elapsed_time = time.time() - self.start_time
if finished:
task_result_logger.info('{}, {}, {}'.format(self.task, success, failure))
self.task += 1
self.episode = 0
episode_result_logger.info('{}, {}, {}, {}'.format(self.task, self.episode, self.steps, elapsed_time))
| 2.734375
| 3
|
insights/combiners/multinode.py
|
lhuett/insights-core
| 1
|
12784121
|
from insights import combiner
from insights.combiners.hostname import hostname
from insights.core.context import create_product
from insights.parsers.metadata import MetadataJson
from insights.specs import Specs
@combiner(MetadataJson, [hostname, Specs.machine_id])
def multinode_product(md, hn, machine_id):
hn = hn.fqdn if hn else machine_id.content[0].rstrip()
return create_product(md.data, hn)
@combiner(multinode_product)
def docker(product):
if product and product.name == "docker":
return product
@combiner(multinode_product)
def OSP(product):
if product and product.name == "osp":
return product
@combiner(multinode_product)
def RHEV(product):
if product and product.name == "rhev":
return product
@combiner(multinode_product)
def RHEL(product):
if product and product.name == "rhel":
return product
| 2.109375
| 2
|
src/nwb_datajoint/common/common_subject.py
|
jihyunbak/spyglass
| 1
|
12784122
|
<reponame>jihyunbak/spyglass<filename>src/nwb_datajoint/common/common_subject.py
import datajoint as dj
schema = dj.schema('common_subject')
@schema
class Subject(dj.Manual):
definition = """
subject_id: varchar(80)
---
age = NULL: varchar(200)
description = NULL: varchar(2000)
genotype = NULL: varchar(2000)
sex = "U": enum("M", "F", "U")
species = NULL: varchar(200)
"""
@classmethod
def insert_from_nwbfile(cls, nwbf):
"""Get the subject information from the NWBFile and insert it into the Subject table."""
sub = nwbf.subject
if sub is None:
print('No subject metadata found.\n')
return
subject_dict = dict()
subject_dict['subject_id'] = sub.subject_id
subject_dict['age'] = sub.age
subject_dict['description'] = sub.description
subject_dict['genotype'] = sub.genotype
if sub.sex in ('Male', 'male', 'M', 'm'):
sex = 'M'
elif sub.sex in ('Female', 'female', 'F', 'f'):
sex = 'F'
else:
sex = 'U'
subject_dict['sex'] = sex
subject_dict['species'] = sub.species
cls.insert1(subject_dict, skip_duplicates=True)
| 2.671875
| 3
|
bosonCloud/freq_res_script.py
|
Unoaccaso/freqle
| 0
|
12784123
|
<gh_stars>0
from cProfile import label
from freqle import cluster
import freqle as fr
import matplotlib.pyplot as plt
import numpy as np
import sys
Nbh = int(sys.argv[1])
cluster_eta = float(sys.argv[2])
Tfft = float(sys.argv[3])
binsize = 1/Tfft
mu = float(sys.argv[4])
pal = cluster(Nbh = Nbh, n_mus = 300, cluster_eta = cluster_eta )
pal.populate(v = True)
pal.build_mass_grid()
pal.emit_GW(v = True, maximum_det_freq = 2048)
pal.calc_freq_distr(remove_outliers = False, v = True, nbins = 64, norm_distr=False)
mu_index = np.argmin(np.abs(mu - pal.boson_grid))
freq = pal.get_freqs()[mu_index]
mu = pal.boson_grid[mu_index]
bins, counts, bin_size = fr.hist_by_row(freq, binsize=binsize, normalize=False)
cond = counts[0] > 10
bins = bins [0, :-1]
counts = counts [0]
bin_size = bin_size[0]
cond = counts > 10
sm = np.sum(counts[cond])
peakidx = np.argmin(counts.max())
plt.plot(bins[cond], counts[cond], label = f"Bhs at peak = {counts.max()/Nbh * 100:.2f} % ({int(counts.max())} BHs)\nTfft: {int(Tfft)} $s$\nPeak freq: {bins[peakidx] + binsize/2:.2f} $Hz$\nSomm 1s: {sm}")
plt.yscale('log')
plt.xscale('log')
plt.title(f"Distribuzione delle frequenze per {int(Nbh)} Buchi Neri\n Massa del bosone: {mu * 1e12:.2f} $10^{-12}eV$")
plt.xlabel("Frequenza misurata $[Hz]$")
plt.legend()
plt.axhline(y = 1)
plt.show()
| 2.1875
| 2
|
mcenter_client/parallelm/mcenter_client/client.py
|
lisapm/mlpiper
| 7
|
12784124
|
<reponame>lisapm/mlpiper
#! /usr/bin/python3
from __future__ import print_function
import time
import collections
import copy
import sys
import traceback
from . import rest_api
from .rest_api.rest import ApiException
class _REST(object):
def __init__(self, client):
self._client = client
self.token = None
self.agent = rest_api.AgentsApi(client)
self.default = rest_api.DefaultApi(client)
self.group = rest_api.GroupsApi(client)
self.ppatterns = rest_api.PipelinePatternsApi(client)
self.pprofiles = rest_api.PipelineProfilesApi(client)
self.role = rest_api.RolesApi(client)
self.user = rest_api.UsersApi(client)
def set_token(self, token):
self.token = token
self._client.cookie = 'token=' + token
def clear_token(self):
self.token = None
self._client.cookie = None
class _Base(object):
__slots__ = ("_factory", "_data", "id", "_nd", "_nest")
def __init__(self, factory, data):
object.__init__(self)
self.id = data.id
self._data = data
self._factory = factory
self._nd = None
self._nest = 0
def _getter(self, name):
return getattr(self._data, name)
def _setter(self, name, value):
if self._nd is not None:
setattr(self._nd, name, value)
else:
self.update({name: value})
def _deleter(self, name):
if self._nd is not None:
delattr(self._nd, name)
else:
raise AttributeError("Cannot delete this attribute")
def __enter__(self):
if self._nest == 0:
self._nd = self._copydata()
self._nest += 1
return self._nd
def __exit__(self, exc_type, exc_val, exc_tb):
if self._nest == 0:
raise RuntimeError("__exit__ without __enter__")
self._nest -= 1
if self._nest == 0:
try:
self._data = self._update(self._nd)._data
finally:
self._nd = None
return False
def update(self, *args, **kwargs):
with self as new_data:
for d in args:
for k, v in d.items():
setattr(new_data, k, v)
for k, v in kwargs.items():
setattr(new_data, k, v)
return self
def _update(self, data):
raise NotImplementedError("Update is not supported")
def delete(self):
if self._nest != 0:
raise RuntimeError("Cannot delete object in the middle of update")
self._delete()
self.id = None
def _delete(self):
raise NotImplementedError("Delete is not supported")
def _copydata(self):
return copy.copy(self._data)
def _populate(cls):
for field in cls.RestType.openapi_types:
if field != "id":
f = "_%s" % field
setattr(cls, field,
property(lambda self, f=f: self._getter(f),
lambda self, value, f=f: self._setter(f, value),
lambda self, f=f: self._deleter(f)))
class _BaseWithID(_Base):
__slots__ = ()
def _copydata(self):
data = _Base._copydata(self)
data.created = None
data.created_by = None
data.id = None
return data
def _update(self, data):
return self._factory.update(self.id, data)
def _delete(self):
self._factory.delete(self.id)
class _BaseFactory(object):
__slots__ = ("_rest")
ClientType = None
def __init__(self, rest):
self._rest = rest
def _get_id(self, id):
if isinstance(id, (self.ClientType, self.ClientType.RestType)):
return id.id
if isinstance(id, collections.Mapping):
return id['id']
return id
def _unwrap(self, obj):
if isinstance(obj, self.ClientType):
return obj._data
return obj
def _wrap(self, obj):
return self.ClientType(self, obj)
class _User(_BaseWithID):
__slots__ = ()
RestType = rest_api.User
def __str__(self):
return "<User %s>" % self.id
_populate(_User)
class _UserFactory(_BaseFactory):
__slots__ = ()
ClientType = _User
def __init__(self, rest):
_BaseFactory.__init__(self, rest.user)
def me(self):
return self._wrap(self._rest.me_get())
def list(self):
return [self._wrap(u) for u in self._rest.users_get()]
def create(self, user):
return self._wrap(self._rest.users_post(self._unwrap(user)))
def get(self, user_id):
return self._wrap(self._rest.users_user_id_get(self._get_id(user_id)))
def update(self, user_id, user):
return self._wrap(self._rest.users_user_id_put(self._get_id(user_id),
self._unwrap(user)))
def delete(self, user_id):
return self._rest.users_user_id_delete(self._get_id(user_id))
def validate(self, token):
return self._rest.auth_validate_post("Bearer %s" % token)
class _Group(_BaseWithID):
__slots__ = ()
RestType = rest_api.Group
def __str__(self):
return "<Group %s>" % self.id
_populate(_Group)
class _GroupFactory(_BaseFactory):
__slots__ = ()
ClientType = _Group
def __init__(self, rest):
_BaseFactory.__init__(self, rest.group)
def list(self):
return [self._wrap(u) for u in self._rest.onboarding_groups_get()]
def create(self, group):
return self._wrap(
self._rest.onboarding_groups_post(self._unwrap(group)))
def get(self, group_id):
return self._wrap(
self._rest.onboarding_groups_group_id_get(self._get_id(group_id)))
def update(self, group_id, group):
return self._wrap(
self._rest.onboarding_groups_group_id_put(self._get_id(group_id),
self._unwrap(group)))
def delete(self, group_id):
return self._rest.onboarding_groups_group_id_delete(
self._get_id(group_id))
class _Agent(_BaseWithID):
__slots__ = ()
RestType = rest_api.Agent
def __str__(self):
return "<Agent %s>" % self.id
_populate(_Agent)
class _AgentFactory(_BaseFactory):
__slots__ = ()
ClientType = _Agent
def __init__(self, rest):
_BaseFactory.__init__(self, rest.agent)
def list(self):
return [self._wrap(u) for u in self._rest.agents_get()]
def create(self, agent):
return self._wrap(self._rest.agents_post(self._unwrap(agent)))
def get(self, agent_id):
return self._wrap(
self._rest.agents_agent_id_get(self._get_id(agent_id)))
def update(self, agent_id, agent):
return self._wrap(
self._rest.agents_agent_id_put(self._get_id(agent_id),
self._unwrap(agent)))
def delete(self, agent_id):
self._rest.agents_agent_id_delete(self._get_id(agent_id))
class _Role(_BaseWithID):
__slots__ = ()
RestType = rest_api.Role
def __str__(self):
return "<Role %s>" % self.id
_populate(_Role)
class _RoleFactory(_BaseFactory):
__slots__ = ()
ClientType = _Role
def __init__(self, rest):
_BaseFactory.__init__(self, rest.role)
def list(self):
return [self._wrap(u) for u in self._rest.roles_get()]
def create(self, role):
return self._wrap(self._rest.roles_post(self._unwrap(role)))
def get(self, role_id):
return self._wrap(self._rest.roles_role_id_get(self._get_id(role_id)))
def update(self, role_id, role):
return self._wrap(self._rest.roles_role_id_put(self._get_id(role_id),
self._unwrap(role)))
def delete(self, role_id):
self._rest.roles_role_id_delete(self._get_id(role_id))
class _SettingsFactory(object):
__slots__ = ("_rest")
def __init__(self, rest):
object.__init__(self)
self._rest = rest.default
def global_parameters(self):
return self._rest.settings_global_parameters_get()
def build_manifest(self):
return self._rest.settings_build_manifest_get()
class Client(object):
def __init__(self, server=None, user=None,
configuration=rest_api.Configuration()):
if server is not None:
if '//' in server:
configuration.host = server
elif ']:' in server or (':' in server and ']' not in server):
configuration.host = "http://%s/v2" % server
else:
configuration.host = "http://%s:3456/v2" % server
self._rest = _REST(rest_api.ApiClient(configuration))
self.agent = _AgentFactory(self._rest)
self.group = _GroupFactory(self._rest)
self.role = _RoleFactory(self._rest)
self.settings = _SettingsFactory(self._rest)
self.user = _UserFactory(self._rest)
if user is not None:
self.login(user)
def login(self, user):
res = self._rest.user.auth_login_post(user)
self._rest.set_token(res.token)
def logout(self):
self._rest.clear_token()
def get_token(self):
return self._rest.token
| 2.140625
| 2
|
grammar-pyml/__init__.py
|
lschaack/grammar-pyml
| 0
|
12784125
|
<reponame>lschaack/grammar-pyml<gh_stars>0
from __future__ import division
import reader
| 0.851563
| 1
|
model/sketch.nyu/test_something_here.py
|
denyingmxd/Torchssc
| 0
|
12784126
|
<gh_stars>0
import torch
import torch.nn as nn
import os
import numpy as np
from utils.ply_utils import *
colorMap = np.array([[22, 191, 206], # 0 empty, free space
[214, 38, 40], # 1 ceiling red tp
[43, 160, 4], # 2 floor green fp
[158, 216, 229], # 3 wall blue fn
[114, 158, 206], # 4 window
[204, 204, 91], # 5 chair new: 180, 220, 90
[255, 186, 119], # 6 bed
[147, 102, 188], # 7 sofa
[30, 119, 181], # 8 table
[188, 188, 33], # 9 tvs
[255, 127, 12], # 10 furn
[196, 175, 214], # 11 objects
[153, 153, 153], # 12 Accessible area, or label==255, ignore
]).astype(np.int32)
def print_iou(iou):
tp_occ, fp_occ, fn_occ, intersection, union = iou
IOU_sc = intersection / union
precision_sc = tp_occ / (tp_occ + fp_occ)
recall_sc = tp_occ / (tp_occ + fn_occ)
return [IOU_sc,precision_sc,recall_sc]
def cal_prec_recall_iou(left,right,label_weight):
nonefree = np.where((label_weight > 0))
left=left[nonefree]
right=right[nonefree]
tp_occ = ((left > 0) & (right > 0)).astype(np.int8).sum()
fp_occ = ((left == 0) & (right > 0)).astype(np.int8).sum()
fn_occ = ((left > 0) & (right == 0)).astype(np.int8).sum()
union = ((left > 0) | (right > 0)).astype(np.int8).sum()
intersection = ((left > 0) & (right > 0)).astype(np.int8).sum()
# IOU_sc = intersection / union
# precision_sc = tp_occ / (tp_occ + fp_occ)
# recall_sc = tp_occ / (tp_occ + fn_occ)
return np.array([tp_occ,fp_occ,fn_occ,intersection,union])
class Projection(nn.Module):
def __init__(self,):
super(Projection, self).__init__()
def forward(self,feature2d,depth_mapping_3d):
b, c, h, w = feature2d.shape
feature2d = feature2d.view(b, c, h * w).permute(0, 2, 1) # b x h*w x c
zerosVec = torch.zeros(b, 1,c).cuda() # for voxels that could not be projected from the depth map, we assign them zero vector
# zerosVec = (-1*torch.ones(b, 1,c)).cuda() # for voxels that could not be projected from the depth map, we assign them zero vector
segVec = torch.cat((feature2d, zerosVec), 1)
segres = [torch.index_select(segVec[i], 0, depth_mapping_3d[i]) for i in range(b)]
segres = torch.stack(segres).permute(0, 2, 1).contiguous().view(b, c, 60, 36, 60) # B, (channel), 60, 36, 60
return segres
def test_projection_and_one_hot():
seg_2d_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/2DSSGT_npz/0001.npz'
depth_mapping_3d_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/Mapping/0001.npz'
seg_2d=torch.from_numpy(np.load(seg_2d_path)['gt2d'].astype(np.int64)).cuda()
seg_2d = seg_2d.view(1,1,seg_2d.shape[0],seg_2d.shape[1])
depth_mapping_3d=torch.from_numpy(np.load(depth_mapping_3d_path)['arr_0'].astype(np.int64)).view(1,-1).cuda()
print('a',depth_mapping_3d.shape)
print('b',seg_2d.shape)
seg_2d+=1
seg_2d[seg_2d == 256] = 0
seg_2d = seg_2d.float()
b = seg_2d.clone()
projection = Projection().cuda()
seg_2d_one_hot_1 = projection(seg_2d, depth_mapping_3d)
print('c',seg_2d_one_hot_1.shape)
print('ff',(b==seg_2d).all())
# exit()
seg_2d_one_hot = nn.functional.one_hot(seg_2d_one_hot_1.long(), num_classes=12)[0].float().permute(0,4,1,2,3)
print('dd',seg_2d.shape)
seg_3d = nn.functional.one_hot(seg_2d.long(),num_classes=12).float()
print('d',seg_3d.shape)
seg_3d=seg_3d[0].permute(0,3,1,2)
print('ddd', seg_3d.shape)
seg_3d_one_hot = projection(seg_3d, depth_mapping_3d)
print('e',seg_3d_one_hot.shape)
ind1 = torch.argmax(seg_2d_one_hot, dim=1)
ind2 = torch.argmax(seg_3d_one_hot, dim=1)
print(ind1.shape)
print(seg_2d_one_hot_1.shape)
print((ind1[0]==seg_2d_one_hot_1[0][0]).all())
exit()
# print(ind1.shape)
# print(ind2.shape)
# print((ind1==ind2).sum())
#
#
# print((seg_2d_one_hot==seg_3d_one_hot).sum())
# voxel_complete_ply(ind1[0].cpu().numpy(), './0001.ply')
# voxel_complete_ply(ind2[0].cpu().numpy(), './00001.ply')
##be careful using predicted result or gt specially 0 or 255
def save_seg_2d_to_3d_one_hot_npz():
out_base='/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d_to_3d_30000_npz/{}'
projection = Projection().cuda()
files = os.listdir('/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d_30000/')
files.sort()
for file in files:
print(file)
# exit()
seg_2d_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d_30000/{}'.format(file)
depth_mapping_3d_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/Mapping/{}'.format(file)
seg_2d = torch.from_numpy(np.load(seg_2d_path)['arr_0']).float().cuda()
seg_2d = seg_2d.view(1, 1, seg_2d.shape[0], seg_2d.shape[1])
depth_mapping_3d = torch.from_numpy(np.load(depth_mapping_3d_path)['arr_0'].astype(np.int64)).view(1, -1).cuda()
# seg_2d += 1
# seg_2d[seg_2d == 256] = 0
# seg_2d = seg_2d.float()
# print(seg_2d.shape)
# print(torch.unique(seg_2d))
# exit()
# print(torch.max(seg_2d))
# continue
seg_2d_one_hot = projection(seg_2d, depth_mapping_3d)
# ind2 = seg_2d_one_hot.clone().int()
# ind2 = ind2[0][0].cpu().numpy()
# print(np.unique(ind2))
# exit()
seg_2d_one_hot = nn.functional.one_hot(seg_2d_one_hot.long(), num_classes=12)[0].float().permute(0, 4, 1, 2, 3)
# print(torch.unique(seg_2d_one_hot))
seg_2d_one_hot=seg_2d_one_hot.cpu().numpy()[0].astype(np.uint8)
# ind =
# voxel_complete_ply(ind2,'0001.ply')
np.savez(out_base.format(file),seg_2d_one_hot)
# exit()
#
def from_3d_one_hot_npz_to_3d_sketch():
out_base ='/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d_to_3d_sketch_npz/{}'
from utils.Sobel import Sobel3D
sobel = Sobel3D().cuda()
for file in os.listdir('/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg2d_to_3d_one_hot/'):
print(file)
data = np.load('/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg2d_to_3d_one_hot/{}'.format(file))['arr_0']
data = torch.from_numpy(data).cuda()
data=torch.argmax(data,dim=0)
# voxel_complete_ply(data.cpu().numpy(), '0001.ply')
data = data.view(1,1,60,36,60).float()
sketch = sobel(data).int()[0][0].cpu().numpy().astype(np.uint8)
# voxel_complete_ply(sketch,'sketch_0001.ply')
np.savez(out_base.format(file),sketch)
# exit()
def test_3d_sketch_from_2d_seg():
sketch_iou = np.array([0, 0, 0, 0, 0], dtype=float)
sketch_dir = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d_to_3d_sketch_npz/'
for file in os.listdir(sketch_dir):
print(file)
sketch = np.load('/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d_to_3d_sketch_npz/{}'.format(file))['arr_0'].reshape(-1,)
gt_sketch_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/sketch3D/{}'.format(file.replace('npz','npy'))
gt_sketch = np.load(gt_sketch_path).reshape(-1, )
label_weight_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/TSDF/{}'.format(file)
label_weight = np.load(label_weight_path)['arr_1'].astype(np.float32)
sketch_iou+=cal_prec_recall_iou(gt_sketch, sketch, label_weight)
print(print_iou(sketch_iou))
def save_2d_seg_to_sketch_to_ply():
out_dir = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d_to_3d_sketch_Deeplabv3_ply/{}'
projection = Projection().cuda()
files = os.listdir('/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d/')
files.sort()
for file in files:
print(file)
# exit()
seg_2d_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d/{}'.format(file)
depth_mapping_3d_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/Mapping/{}'.format(file)
seg_2d = torch.from_numpy(np.load(seg_2d_path)['arr_0']).float().cuda()
seg_2d = seg_2d.view(1, 1, seg_2d.shape[0], seg_2d.shape[1])
depth_mapping_3d = torch.from_numpy(np.load(depth_mapping_3d_path)['arr_0'].astype(np.int64)).view(1, -1).cuda()
seg_2d += 1
seg_2d_to_3d = projection(seg_2d, depth_mapping_3d)
seg_2d_to_3d=seg_2d_to_3d[0][0].cpu().numpy().astype(np.uint8)
voxel_complete_ply(seg_2d_to_3d,out_dir.format(file.replace('npz','ply')))
# np.savez(out_base.format(file), seg_2d_one_hot)
# exit()
# def test_iou_on_edges():
def test_tsdf_and_S_0_together_ssc():
def hist_info(n_cl, pred, gt):
assert (pred.shape == gt.shape)
k = (gt >= 0) & (gt < n_cl) # exclude 255
labeled = np.sum(k)
correct = np.sum((pred[k] == gt[k]))
return np.bincount(n_cl * gt[k].astype(int) + pred[k].astype(int),
minlength=n_cl ** 2).reshape(n_cl,
n_cl), correct, labeled
def compute_score(hist, correct, labeled):
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
mean_IU = np.nanmean(iu)
mean_IU_no_back = np.nanmean(iu[1:])
freq = hist.sum(1) / hist.sum()
freq_IU = (iu[freq > 0] * freq[freq > 0]).sum()
mean_pixel_acc = correct / labeled
return iu, mean_IU, mean_IU_no_back, mean_pixel_acc
epoch=249
network_tsdf_path='/ssd/lyding/SSC/TorchSSC/model/sketch.nyu/results/network_tsdf/results/epoch-{}'.format(epoch)
S_0_Deeplabv3_path = '/ssd/lyding/SSC/TorchSSC/model/sketch.nyu/results/network_s0_Deeplabv3/results/epoch-{}'.format(epoch)
files=os.listdir(network_tsdf_path)
files.sort()
hist_ssc = np.zeros((12,12))
correct_ssc = 0
labeled_ssc = 0
for file in files:
print(file)
network_tsdf_res = np.load(os.path.join(network_tsdf_path.format(epoch),file)).reshape(-1,)
S_0_Deeplabv3_res = np.load(os.path.join(S_0_Deeplabv3_path.format(epoch),file)).reshape(-1,)
target = np.load('/ssd/lyding/SSC/TorchSSC/DATA/NYU/Label/{}'.format(file.replace('npy','npz')))['arr_0']
label_weight = np.load('/ssd/lyding/SSC/TorchSSC/DATA/NYU/TSDF/{}'.\
format(file.replace('npy','npz')))['arr_1'].astype(np.float32)
pred = np.zeros_like(target)
for i in range(len(pred)):
if network_tsdf_res[i]==target[i]:
pred[i]=target[i]
continue
if S_0_Deeplabv3_res[i]==target[i]:
pred[i]=target[i]
continue
# if network_tsdf_res[i]==S_0_Deeplabv3_res[i]:
# pred[i]=S_0_Deeplabv3_res[i]
# pred[i]=network_tsdf_res[i]
nonefree_pred = pred[label_weight == 1]
nonefree_label = target[label_weight == 1]
h_ssc, c_ssc, l_ssc = hist_info(12, nonefree_pred, nonefree_label)
hist_ssc += h_ssc
correct_ssc += c_ssc
labeled_ssc += l_ssc
# break
score_ssc = compute_score(hist_ssc, correct_ssc, labeled_ssc)
print(score_ssc)
def test_occupied_voxels_tsdf_and_S_0():
epoch = 249
network_tsdf_path = '/ssd/lyding/SSC/TorchSSC/model/sketch.nyu/results/network_tsdf/results/epoch-{}'.format(epoch)
S_0_Deeplabv3_path = '/ssd/lyding/SSC/TorchSSC/model/sketch.nyu/results/network_s0_Deeplabv3/results/epoch-{}'.format(
epoch)
files = os.listdir(network_tsdf_path)
files.sort()
nonempty_tsdf=0
nonempty_s0=0
for file in files:
print(file)
network_tsdf_res = np.load(os.path.join(network_tsdf_path.format(epoch), file)).reshape(-1, )
S_0_Deeplabv3_res = np.load(os.path.join(S_0_Deeplabv3_path.format(epoch), file)).reshape(-1, )
target = np.load('/ssd/lyding/SSC/TorchSSC/DATA/NYU/Label/{}'.format(file.replace('npy', 'npz')))['arr_0']
label_weight = np.load('/ssd/lyding/SSC/TorchSSC/DATA/NYU/TSDF/{}'. \
format(file.replace('npy', 'npz')))['arr_1'].astype(np.float32)
network_tsdf_res=network_tsdf_res[(label_weight == 1) & (target < 255)]
S_0_Deeplabv3_res = S_0_Deeplabv3_res[(label_weight == 1) & (target < 255)]
nonempty_s0+=np.sum(S_0_Deeplabv3_res>0)
nonempty_tsdf+=np.sum(network_tsdf_res>0)
print(nonempty_tsdf,nonempty_s0)
def rgb_complete_ply(vox_rgb, ply_filename,my_color_map,depth_mapping_3d):
def _get_xyz(size):
####attention
####sometimes you need to change the z dierction by multiply -1
"""x 姘村钩 y楂樹綆 z娣卞害"""
_x = np.zeros(size, dtype=np.int32)
_y = np.zeros(size, dtype=np.int32)
_z = np.zeros(size, dtype=np.int32)
for i_h in range(size[0]): # x, y, z
_x[i_h, :, :] = i_h # x, left-right flip
for i_w in range(size[1]):
_y[:, i_w, :] = i_w # y, up-down flip
for i_d in range(size[2]):
_z[:, :, i_d] = i_d # z, front-back flip
return _x, _y, _z
#this receive voxel containging value ranging from 0 to 255*255*255 of size 60x36x60
new_vertices = []
num_vertices = 0
new_faces = []
num_faces = 0
size = vox_rgb.shape
vox_labeled = vox_rgb.flatten()
_x, _y, _z = _get_xyz(size)
_x = _x.flatten()
_y = _y.flatten()
_z = _z.flatten()
# vox_labeled[vox_labeled == 255] = 0
_rgb = my_color_map[vox_labeled[:]]
xyz_rgb = np.stack((_x, _y, _z, _rgb[:, 0], _rgb[:, 1], _rgb[:, 2]), axis=-1)
xyz_rgb = np.array(xyz_rgb)
ply_data = xyz_rgb[np.where(vox_labeled >= 0)]
# bb=time.time()
nnn = torch.sum(depth_mapping_3d<307200).item()
if len(ply_data)!=nnn:
print('some wrong here')
exit()
for i in range(len(ply_data)):
num_of_line = i
z = float(ply_data[i][0])
y = float(ply_data[i][1])
x = float(ply_data[i][2])
r = ply_data[i][3]
g = ply_data[i][4]
b = ply_data[i][5]
vertice_one = [x, y, z, r, g, b]
vertice_two = [x, y, z + 1, r, g, b]
vertice_three = [x, y + 1, z + 1, r, g, b]
vertice_four = [x, y + 1, z, r, g, b]
vertice_five = [x + 1, y, z, r, g, b]
vertice_six = [x + 1, y, z + 1, r, g, b]
vertice_seven = [x + 1, y + 1, z + 1, r, g, b]
vertice_eight = [x + 1, y + 1, z, r, g, b]
vertices = [vertice_one, vertice_two, vertice_three, vertice_four,
vertice_five, vertice_six, vertice_seven, vertice_eight]
new_vertices.append(vertices)
num_vertices += len(vertices)
base = 8 * num_of_line
face_one = [4, 0 + base, 1 + base, 2 + base, 3 + base]
face_two = [4, 7 + base, 6 + base, 5 + base, 4 + base]
face_three = [4, 0 + base, 4 + base, 5 + base, 1 + base]
face_four = [4, 1 + base, 5 + base, 6 + base, 2 + base]
face_five = [4, 2 + base, 6 + base, 7 + base, 3 + base]
face_six = [4, 3 + base, 7 + base, 4 + base, 0 + base]
faces = [face_one, face_two, face_three, face_four,
face_five, face_six]
new_faces.append(faces)
num_faces += len(faces)
# print(time.time()-bb)
# cc = time.time()
new_vertices = np.array(new_vertices).reshape(-1, 6)
new_faces = np.array(new_faces).reshape(-1, 5)
if len(ply_data) == 0:
raise Exception("Oops! That was no valid ply data.")
ply_head = 'ply\n' \
'format ascii 1.0\n' \
'element vertex {}\n' \
'property float x\n' \
'property float y\n' \
'property float z\n' \
'property uchar red\n' \
'property uchar green\n' \
'property uchar blue\n' \
'element face {}\n' \
'property list uint8 int32 vertex_indices\n' \
'end_header'.format(num_vertices, num_faces)
# ---- Save ply data to disk
# if not os.path.exists('/'.join(ply_filename.split('/')[:-1])):
# os.mkdir('/'.join(ply_filename.split('/')[:-1]))
np.savetxt(ply_filename, new_vertices, fmt="%d %d %d %d %d %d", header=ply_head, comments='') # It takes 20s
with open(ply_filename, 'ab') as f:
np.savetxt(f, new_faces, fmt="%d %d %d %d %d", comments='') # It takes 20s
del vox_labeled, _x, _y, _z, _rgb, xyz_rgb, ply_data, ply_head
def test_rgb_proj_to_3d():
rgb_dir = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/RGB/'
label_dir = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/Label/'
tsdf_dir = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/TSDF/'
mapping_dir = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/Mapping/'
files = os.listdir(rgb_dir)
files.sort()
import cv2
projection = Projection().cuda()
for file in files:
num = file[3:7]
rgb = torch.from_numpy(np.array(cv2.imread(os.path.join(rgb_dir,file))[:, :, ::-1])).float()
h,w,c = rgb.shape
rgb = rgb.view(1,c,h,w).cuda()
label_path=os.path.join(label_dir,'{}.npz'.format(num))
tsdf_path =os.path.join(tsdf_dir,'{}.npz'.format(num))
mapping_path = os.path.join(mapping_dir,'{}.npz'.format(num))
label = np.load(label_path)['arr_0'].astype(np.int64)
label_weight = np.load(tsdf_path)['arr_1'].astype(np.float32)
depth_mapping_3d = torch.from_numpy(np.load(mapping_path)['arr_0'].astype(np.int64)).view(1, -1).cuda()
rrgb = torch.zeros((480,640)).cuda()
for i in range(480):
for j in range(640):
r,g,b = rgb[0,:,i,j]
rrgb[i,j] = r*1000*1000+g*1000+b
rrgb = rrgb.view(1,1,480,640)
rgb = rrgb
#
# rrgb_3d = projection(rrgb,depth_mapping_3d)
# rrgb_3d = rrgb_3d[0].cpu().numpy()
# for i in range(60):
# for j in range(36):
# for k in range(60):
# num = rrgb_3d[0,0,i,j,k]
# r,g,b=num//1000//1000,(num//1000)%1000,num%1000
rgb_3d = projection(rgb, depth_mapping_3d)
rgb_3d = rgb_3d[0].cpu().numpy()
rgb_complete_ply(rgb_3d[0],'rgb3d.ply')
break
def test_2d_label_proj_to_3d():
# label2d_path = '/ssd/lyding/SSC/repositories/DeepLabV3Plus-Pytorch-master/results-latest_deeplabv3_resnet50_nyu_os16-13300/0001_pred_rgb.png'
label2d_path = "/ssd/lyding/SSC/TorchSSC/DATA/NYU/RGB/NYU0001_colors.png"
mapping_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/Mapping/0001.npz'
import cv2
projection = Projection().cuda()
label2d = np.array(cv2.imread(label2d_path)[:, :, ::-1])
color_label_2d = np.zeros((480, 640))
for i in range(480):
for j in range(640):
r,g,b = label2d[i,j]
color_label_2d[i,j]=r*256*256+g*256+b
color_label_2d = torch.from_numpy(color_label_2d).float()
color_label_2d = color_label_2d.view(1,1,480,640).cuda()
depth_mapping_3d = torch.from_numpy(np.load(mapping_path)['arr_0'].astype(np.int64)).view(1, -1).cuda()
label2d_3d = projection(color_label_2d,depth_mapping_3d)
my_color_map = np.array([[i//256//256,(i//256)%256,i%256] for i in range(256**3)])
label2d_3d = label2d_3d[0][0].cpu().numpy().astype(np.int64)
rgb_complete_ply(label2d_3d,'rgb2d_3d.ply',my_color_map,depth_mapping_3d)
def save_2d_rgb_to_3d_ply():
import cv2
projection = Projection().cuda()
my_color_map = np.array([[i // 256 // 256, (i // 256) % 256, i % 256] for i in range(256 ** 3)])
out_base = "/ssd/lyding/SSC/TorchSSC/DATA/NYU/RGB3D_ply/{}.ply"
for file in range(1,1450):
num = '{:0>4d}'.format(file)
print(num)
label2d_path = "/ssd/lyding/SSC/TorchSSC/DATA/NYU/RGB/NYU{}_colors.png".format(num)
mapping_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/Mapping/{}.npz'.format(num)
label2d = np.array(cv2.imread(label2d_path)[:, :, ::-1])
color_label_2d = np.zeros((480, 640))
for i in range(480):
for j in range(640):
r, g, b = label2d[i, j]
color_label_2d[i, j] = r * 256 * 256 + g * 256 + b
color_label_2d = torch.from_numpy(color_label_2d).float()
color_label_2d = color_label_2d.view(1, 1, 480, 640).cuda()
depth_mapping_3d = torch.from_numpy(np.load(mapping_path)['arr_0'].astype(np.int64)).view(1, -1).cuda()
label2d_3d = projection(color_label_2d, depth_mapping_3d)
label2d_3d = label2d_3d[0][0].cpu().numpy().astype(np.int64)
rgb_complete_ply(label2d_3d, out_base.format(num), my_color_map, depth_mapping_3d)
# break
def test_0_in_rgb():
import cv2
label2d_path = "/ssd/lyding/SSC/TorchSSC/DATA/NYU/RGB/NYU0001_colors.png"
label2d = np.array(cv2.imread(label2d_path)[:, :, ::-1])
mapping_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/Mapping/0001.npz'
depth_mapping_3d = torch.from_numpy(np.load(mapping_path)['arr_0'].astype(np.int64)).view(1, -1).cuda()
print(depth_mapping_3d)
def test_save_2d_rgb_normed():
label2d_path = "/ssd/lyding/SSC/TorchSSC/DATA/NYU/RGB/NYU0001_colors.png"
mapping_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/Mapping/0001.npz'
import cv2
projection = Projection().cuda()
label2d = np.array(cv2.imread(label2d_path)[:, :, ::-1])
color_label_2d = np.zeros((480, 640))
for i in range(480):
for j in range(640):
r, g, b = label2d[i, j]
color_label_2d[i, j] = r * 256 * 256 + g * 256 + b
color_label_2d = torch.from_numpy(color_label_2d).float()
color_label_2d = color_label_2d.view(1, 1, 480, 640).cuda()
depth_mapping_3d = torch.from_numpy(np.load(mapping_path)['arr_0'].astype(np.int64)).view(1, -1).cuda()
label2d_3d = projection(color_label_2d, depth_mapping_3d)
def test_some_small_thing():
from utils.ply_utils import voxel_complete_ply_torchssc
one_path = "/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d/0001.npz"
two_path = "/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg2d_to_3d_one_hot/0001.npz"
three_path = "/ssd/lyding/SSC/TorchSSC/DATA/NYU/seg_2d_to_3d_sketch_Deeplabv3_npz/0001.npz"
label_path = "/ssd/lyding/SSC/TorchSSC/DATA/NYU/Label/0001.npz"
tsdf_path = "/ssd/lyding/SSC/TorchSSC/DATA/NYU/TSDF/0001.npz"
label = np.load(label_path)['arr_0'].astype(np.int64)
label_weight = np.load(tsdf_path)['arr_1'].astype(np.float32)
a = np.load(one_path)['arr_0']
b = np.load(two_path)['arr_0']
c = np.load(three_path)['arr_0']
b = np.argmax(b,axis=0)
c = np.argmax(c,axis=0)
voxel_complete_ply_torchssc(b,'b.ply',label_weight,label)
voxel_complete_ply_torchssc(c,'c.ply',label_weight,label)
print()
def test_0_in_depth():
base='/ssd/lyding/datasets/SSC/{}'
case='NYUtrain_npz'
files = os.listdir(base.format(case))
files.sort()
zero_num = 0
for file in files:
print(file)
data = np.load(os.path.join(base.format(case),file))
depth = data['depth'][0]
zero_num+=(depth==0.0).sum()
ratio = zero_num/(len(files)*depth.shape[0]*depth.shape[1])
print(ratio)
def visualize_tsdf():
file='0001'
label_weight_path = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/TSDF/{}.npz'.format(file)
label_weight = np.load(label_weight_path)['arr_1'].astype(np.float32)
tsdf = np.load(label_weight_path)['arr_0'].astype(np.float32)
from utils.ply_utils import voxel_complete_ply
tsdf_minus1 = tsdf==-1
tsdf_zero = tsdf==0
tsdf_1 = tsdf==1
tsdf_neg = (tsdf<0) & (tsdf>-0.2)
tsdf_pos =(tsdf>0) & (tsdf<1)
tsdf_minus1 = tsdf_minus1.astype(int).reshape(60,36,60)
tsdf_zero = tsdf_zero.astype(int).reshape(60,36,60)
tsdf_1 = tsdf_1.astype(int).reshape(60,36,60)
tsdf_neg = tsdf_neg.astype(int).reshape(60,36,60)
tsdf_pos = tsdf_pos.astype(int).reshape(60,36,60)
voxel_complete_ply(tsdf_minus1,'tsdf_minus_one.ply')
voxel_complete_ply(tsdf_zero,'tsdf_zero.ply')
voxel_complete_ply(tsdf_1,'tsdf_one.ply')
voxel_complete_ply(tsdf_neg,'tsdf_neg.ply')
voxel_complete_ply(tsdf_pos,'tsdf_pos.ply')
return
def _downsample_label_my(label, voxel_size=(240, 144, 240), downscale=4):
r"""downsample the labeled data,
code taken from https://github.com/waterljwant/SSC/blob/master/dataloaders/dataloader.py#L262
Shape:
label, (240, 144, 240)
label_downscale, if downsample==4, then (60, 36, 60)
"""
if downscale == 1:
return label
ds = downscale
small_size = (
voxel_size[0] // ds,
voxel_size[1] // ds,
voxel_size[2] // ds,
) # small size
label_downscale = np.zeros( (
voxel_size[0] // ds,
voxel_size[1] // ds,
voxel_size[2] // ds,
12
) , dtype=np.float)
empty_t = 0.95 * ds * ds * ds # threshold
s01 = small_size[0] * small_size[1]
label_i = np.zeros((ds, ds, ds), dtype=np.int32)
for i in range(small_size[0] * small_size[1] * small_size[2]):
z = int(i / s01)
y = int((i - z * s01) / small_size[0])
x = int(i - z * s01 - y * small_size[0])
label_i[:, :, :] = label[
x * ds : (x + 1) * ds, y * ds : (y + 1) * ds, z * ds : (z + 1) * ds
]
label_bin = label_i.flatten()
zero_count_0 = np.array(np.where(label_bin == 0)).size
zero_count_255 = np.array(np.where(label_bin == 255)).size
zero_count = zero_count_0 + zero_count_255
if zero_count > empty_t:
label_downscale[x, y, z] = 0 if zero_count_0 > zero_count_255 else 255
else:
label_i_s = label_bin[
np.where(np.logical_and(label_bin > 0, label_bin < 255))
]
classes, cnts = np.unique(label_i_s, return_counts=True)
class_counts = np.zeros(12)
class_counts[classes] = cnts
target_classes = class_counts / np.sum(class_counts)
label_downscale[x, y, z] = target_classes
return label_downscale
def downsample_high_res_gt():
data_dir = '/ssd/lyding/datasets/SSC/NYUCADtest_npz/'
out_dir = '/ssd/lyding/SSC/TorchSSC/DATA/NYU/Label_multi/'
files = os.listdir(data_dir)
files.sort()
from utils.ply_utils import voxel_complete_ply
for file in files:
print(file)
data = np.load(os.path.join(data_dir,file))
target_lr = data['target_lr']
target_hr = data['target_hr']
print(np.sum((target_lr>0)&(target_lr<255)))
# voxel_complete_ply(target_lr,'low_1.ply')
# voxel_complete_ply(target_hr,'high_1.ply')
target_hr = torch.from_numpy(target_hr)
down_from_high = _downsample_label_my(target_hr)
# down_from_high = down_from_high.transpose(3,0,1,2)
# np.savez(os.path.join(out_dir,num+'.npz'),down_from_high)
abc = np.zeros((60,36,60),dtype=np.uint8)
abc[(down_from_high.sum(axis=-1)==1)&(down_from_high.max(axis=-1)<1.)]=1
# abc[(down_from_high.sum(axis=-1)==1)]=1
print(np.sum(abc))
# xxx=0
# abc=np.zeros((60,36,60),dtype=np.uint8)
# for x in range(60):
# for y in range(36):
# for z in range(60):
# probs = down_from_high[:,x,y,z]
# if np.sum(probs)==1 and np.max(probs)<1:
# xxx+=1
# abc[x,y,z]=1
voxel_complete_ply(abc, 'abc.ply')
print(12)
# down_from_high = np.argmax(down_from_high,axis=0)
# voxel_complete_ply(down_from_high,'down_from_high_1.ply')
break
def test_tsdf_divided_acc():
def hist_info(n_cl, pred, gt):
assert (pred.shape == gt.shape)
k = (gt >= 0) & (gt < n_cl) # exclude 255
labeled = np.sum(k)
correct = np.sum((pred[k] == gt[k]))
return np.bincount(n_cl * gt[k].astype(int) + pred[k].astype(int),
minlength=n_cl ** 2).reshape(n_cl,
n_cl), correct, labeled
def compute_score(hist, correct, labeled):
iu = np.diag(hist) / (hist.sum(1) + hist.sum(0) - np.diag(hist))
mean_IU = np.nanmean(iu)
mean_IU_no_back = np.nanmean(iu[1:])
freq = hist.sum(1) / hist.sum()
freq_IU = (iu[freq > 0] * freq[freq > 0]).sum()
mean_pixel_acc = correct / labeled
return iu, mean_IU, mean_IU_no_back, mean_pixel_acc
epoch=249
network_tsdf_path='/ssd/lyding/SSC/TorchSSC/model/sketch.nyu/results/network_tsdf/results/epoch-{}'.format(epoch)
# S_0_Deeplabv3_path = '/ssd/lyding/SSC/TorchSSC/model/sketch.nyu/results/network_seg_2d/results/epoch-{}'.format(epoch)
S_0_Deeplabv3_path = '/ssd/lyding/SSC/TorchSSC/model/sketch.nyu/results/network_s0_multi_label_loss1/results/epoch-{}'.format(epoch)
files=os.listdir(network_tsdf_path)
files.sort()
hist_ssc = np.zeros((12,12))
correct_ssc = 0
labeled_ssc = 0
a, b =-0.2,0
for file in files:
print(file)
network_tsdf_res = np.load(os.path.join(network_tsdf_path.format(epoch),file)).reshape(-1,)
S_0_Deeplabv3_res = np.load(os.path.join(S_0_Deeplabv3_path.format(epoch),file)).reshape(-1,)
label_multi = np.load('/ssd/lyding/SSC/TorchSSC/DATA/NYU/Label_multi/{}'.format(file.replace('.npy','.npz')))['arr_0']
label_multi = ((label_multi.sum(axis=0)==1)&(label_multi.max(axis=0)<1)).flatten()
target = np.load('/ssd/lyding/SSC/TorchSSC/DATA/NYU/Label/{}'.format(file.replace('npy','npz')))['arr_0']
label_weight = np.load('/ssd/lyding/SSC/TorchSSC/DATA/NYU/TSDF/{}'.\
format(file.replace('npy','npz')))['arr_1'].astype(np.float32)
tsdf = np.load('/ssd/lyding/SSC/TorchSSC/DATA/NYU/TSDF/{}'.\
format(file.replace('npy','npz')))['arr_0'].astype(np.float32)
# pred = np.zeros_like(target)
pred = S_0_Deeplabv3_res
# for i in range(len(pred)):
# if tsdf[i]>a and tsdf[i]<b:
# pred[i] = network_tsdf_res[i]
# continue
# pred[(tsdf>a) & (tsdf<b)]=network_tsdf_res[(tsdf>a) & (tsdf<b)]
cond = (label_weight == 1) & (label_multi == 1)
# cond = (label_weight == 1)
nonefree_pred = pred[cond]
nonefree_label = target[cond]
h_ssc, c_ssc, l_ssc = hist_info(12, nonefree_pred, nonefree_label)
hist_ssc += h_ssc
correct_ssc += c_ssc
labeled_ssc += l_ssc
# break
score_ssc = compute_score(hist_ssc, correct_ssc, labeled_ssc)
print(score_ssc)
# test_projection_and_one_hot()
# save_seg_2d_to_3d_one_hot_npz()
# from_3d_one_hot_npz_to_3d_sketch()
# test_3d_sketch_from_2d_seg()
# save_2d_seg_to_sketch_to_ply()
# test_rgb_proj_to_3d()
# test_2d_label_proj_to_3d()
# test_0_in_rgb()
# save_2d_rgb_to_3d_ply()
# test_tsdf_and_S_0_together_ssc()
# test_occupied_voxels_tsdf_and_S_0()
# test_some_small_thing()
# test_0_in_depth()
# visualize_tsdf()
# downsample_high_res_gt()
test_tsdf_divided_acc()
| 1.757813
| 2
|
ashtadhyayi_data/writer/__init__.py
|
ashtadhyayi/data_curation
| 0
|
12784127
|
<filename>ashtadhyayi_data/writer/__init__.py
import logging
import os
import ashtadhyayi_data
from doc_curation.md import library
from doc_curation.md.file import MdFile
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(
level=logging.DEBUG,
format="%(levelname)s:%(asctime)s:%(module)s:%(lineno)d %(message)s"
)
shared_repo_path = "/home/vvasuki/sanskrit/ashtadhyayi"
def get_output_path(base_dir, vritti_id, suutra_id):
if vritti_id in ["padachcheda", "full_sutra", "anuvritti", "adhikara", "sumit_garg_english", 'topic']:
extension = "txt"
else:
extension = "md"
outpath = os.path.join(base_dir, vritti_id, "pada-" + ashtadhyayi_data.get_adhyaya_pada_id(suutra_id),
suutra_id + "." + extension)
return outpath
def dump_tsv_vritti(vritti_id):
from ashtadhyayi_data.reader import vritti_tsv
vritti_tsv.setup_vritti(vritti_id=vritti_id)
for suutra_id in ashtadhyayi_data.suutra_df.index:
vritti = vritti_tsv.get_vritti(vritti_id=vritti_id, suutra_id=suutra_id)
if vritti is not None:
outpath = get_output_path(base_dir=shared_repo_path, vritti_id=vritti_id, suutra_id=suutra_id)
os.makedirs(os.path.dirname(outpath), exist_ok=True)
with open(outpath, 'w', encoding="utf8") as outfile:
outfile.write(vritti)
def dump_per_suutra_mds(outpath, dry_run=False):
md_file = MdFile(file_path="/home/vvasuki/ashtadhyayi/ashtadhyayi.github.io/content/sutra-details.md")
(_, template_content) = md_file.read()
suutra_df = ashtadhyayi_data.get_suutra_df()
for suutra_id in suutra_df.index:
dest_path = os.path.join(outpath, ashtadhyayi_data.get_adhyaya_pada_id(suutra_id), "%s.md" % suutra_id)
md_file = MdFile(file_path=dest_path)
title = suutra_df.loc[suutra_id, "sutra"]
title = "%s %s" % (suutra_id, title)
[adhyaaya, paada, suutra] = suutra_id.split(".")
content = template_content.replace("ADHYAAYA", adhyaaya).replace("PAADA", paada).replace("SUUTRA", suutra)
md_file.dump_to_file(metadata={"title": title}, content=content, dry_run=dry_run)
library.fix_index_files(dir_path=outpath)
if __name__ == '__main__':
pass
dump_per_suutra_mds(outpath="/home/vvasuki/ashtadhyayi/ashtadhyayi.github.io/content/suutra/")
# dump_tsv_vritti("topic")
# delete_empty_vritti_files("padamanjari")
| 2.234375
| 2
|
Desafios/Desafio 049.py
|
tiagosm1/Pyhton_CEV
| 0
|
12784128
|
num = int(input('Escolha um número para saber sua tabuada:'))
tab = 0
for c in range(1, 11):
print('{} x {:2} = {:2}'.format(num, c, num*c))
| 3.90625
| 4
|
stock_notifier/__init__.py
|
saswatraj/stock_notifier
| 0
|
12784129
|
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Package file for Stock Notifier package.
@author: rajsaswa
"""
| 0.894531
| 1
|
util/apis.py
|
lucasxlu/MMNet
| 3
|
12784130
|
"""
Performance Comparision with Commercial APIs like Face++, Google, MS and Amazon
"""
import sys
import os
import requests
import numpy as np
import pandas as pd
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score
sys.path.append('../')
from config.cfg import cfg
def prepare_test_imgs(type='basic'):
face_with_emt = {}
manual_annotation_dir = 'E:\DataSet\CV\TreeCNN\RAF-Face/basic\Annotation\manual'
emotion_label_txt_path = os.path.join(cfg['root'], 'RAF-Face', "%s/EmoLabel/list_patition_label.txt" % type)
emotion_dict = dict(np.loadtxt(emotion_label_txt_path, dtype=np.str))
for _ in os.listdir(manual_annotation_dir):
if _.startswith('test_'):
face_fname = _.replace('_manu_attri', '_aligned').replace('.txt', '.jpg')
face_with_emt[os.path.join(cfg['root'], 'RAF-Face', '%s/Image/aligned' % type, face_fname)] = int(
emotion_dict[face_fname.replace('_aligned', '')].strip()) - 1
return face_with_emt
def facepp(img_path):
"""
Recognition with Face++ Emotion Recognition API
:param img_path:
:return:
"""
apikey = ''
apisecret = ''
url = 'https://api-cn.faceplusplus.com/facepp/v3/detect'
files = {'image_file': open(img_path, 'rb')}
payload = {'api_key': apikey, 'api_secret': apisecret,
# 'return_landmark': 0, 'face_tokens': 'none',
'return_attributes': 'emotion'}
response = requests.post(url, data=payload, files=files)
if response.status_code == 200:
res_json = response.json()
max_k = ''
max_v = 0
for k, v in res_json['faces'][0]['attributes']['emotion'].items():
if v > max_v:
max_v = v
max_k = k
return max_k
else:
print(response)
return None
if __name__ == '__main__':
img_files = prepare_test_imgs()
print(img_files)
basic_emt_map = {
'surprise': 0,
'fear': 1,
'disgust': 2,
'happiness': 3,
'sadness': 4,
'anger': 5,
'neutral': 6
}
gt = []
pred = []
for imgf, e in img_files.items():
try:
emt = facepp(imgf)
print(emt)
gt.append(e)
pred.append(basic_emt_map[emt])
except:
pass
print('Accuracy of Emotion Recognition: %s' % str(accuracy_score(gt, pred)))
print('Confusion Matrix on FER: ')
print(confusion_matrix(np.array(gt).ravel().tolist(), np.array(pred).ravel().tolist()))
| 2.546875
| 3
|
bitcoin_networkapis.py
|
vizeet/bitcoin_core_python_client
| 4
|
12784131
|
import pycurl
from io import BytesIO
import json
import datetime
import pandas as pd
myaddress = input('Enter Bitcoin Address: ')
btcval = 100000000.0 # in santoshis
block_time_in_min = 10
block_time_in_sec = block_time_in_min*60
def getBalance(address: str):
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/unspent?active=%s" % (address))
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
balance = 0.0
# print("getreq = %s" % (getreq.getvalue()))
allunspenttx = json.loads(strbuf.getvalue())['unspent_outputs']
for eachtx in allunspenttx:
balance += eachtx['value']
return balance
def getTxnHistoryOfAddress(address: str):
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/address/%s?format=json" % (address))
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
new_txn_list = []
alltxns = json.loads(strbuf.getvalue())['txs']
for eachtxn in alltxns:
new_txn = {}
input_list = eachtxn['inputs']
input_value = 0
address_input_value = 0
for each_input in input_list:
input_value += each_input['prev_out']['value']
if each_input['prev_out']['addr'] == address:
address_input_value += each_input['prev_out']['value']
output_list = eachtxn['out']
output_value = 0
address_output_value = 0
for each_output in output_list:
output_value += each_output['value']
if each_output['addr'] == address:
address_output_value += each_output['value']
if address_input_value > address_output_value:
new_txn['credit_in_btc'] = (address_input_value - address_output_value) / btcval
else:
new_txn['debit_in_btc'] = (address_output_value - address_input_value) / btcval
network_fees = input_value - output_value
new_txn['network_fees'] = network_fees / btcval
new_txn['network_fees_in_inr'] = new_txn['network_fees'] * getCurrentSellPriceInInr()
dt = datetime.datetime.fromtimestamp(eachtxn['time'])
new_txn['date_time'] = dt.strftime("%d-%B-%Y %H:%M:%S")
new_txn_list.append(new_txn)
return new_txn_list
def getCurrentBlockHeight():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/blocks?format=json")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
current_block_height = json.loads(strbuf.getvalue())['blocks'][0]['height']
return current_block_height
def getTxCountInBlock(block_height: int):
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/block-height/%d?format=json" % (block_height))
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
txlist = json.loads(strbuf.getvalue())['blocks'][0]['tx']
return len(txlist)
#def getListOfTxnsOnAddress(address: str):
#
#def getInputBitcoinInTx(txn: str):
#
#def getOutputBitcoinInTx(txn: str):
#
#def getChangeInTx(txn: str):
#
#def getNetworkFeesInTxn(txn: str):
def getTxRate(tx_count_in_block: int):
return tx_count_in_block/block_time_in_sec
# return block_time_in_sec/tx_count_in_block
def getAverageTxRateInLast24Hrs():
current_block_height = getCurrentBlockHeight()
min_in_a_day = 60*24
blocks_in_a_day = int(min_in_a_day/block_time_in_min)
sum_tx_rate = 0
print("Transaction rate::")
for block_height in range(current_block_height - blocks_in_a_day, current_block_height):
tx_count = getTxCountInBlock(block_height)
tx_rate = getTxRate(tx_count)
sum_tx_rate += tx_rate
print("block height %d ===> Tx Rate %.6f" % (block_height, tx_rate))
average_tx_rate = sum_tx_rate / blocks_in_a_day
return average_tx_rate
def getAverageTxRateInLastWeek():
current_block_height = getCurrentBlockHeight()
min_in_a_week = 60*24*7
blocks_in_a_week = int(min_in_a_week/block_time_in_min)
sum_tx_rate = 0
print("Transaction rate::")
for block_height in range(current_block_height - blocks_in_a_week, current_block_height):
tx_count = getTxCountInBlock(block_height)
tx_rate = getTxRate(tx_count)
sum_tx_rate += tx_rate
print("block height %d ===> Tx Rate %.6f" % (block_height, tx_rate))
average_tx_rate = sum_tx_rate / blocks_in_a_week
return average_tx_rate
def getAverageTxRateInLastMonth():
current_block_height = getCurrentBlockHeight()
min_in_a_month = 60*24*7
blocks_in_a_month = int(min_in_a_month/block_time_in_min)
sum_tx_rate = 0
print("Transaction rate::")
for block_height in range(current_block_height - blocks_in_a_month, current_block_height):
tx_count = getTxCountInBlock(block_height)
tx_rate = getTxRate(tx_count)
sum_tx_rate += tx_rate
print("block height %d ===> Tx Rate %.6f" % (block_height, tx_rate))
average_tx_rate = sum_tx_rate / blocks_in_a_month
return average_tx_rate
def getCurrentNetworkHashRate():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/q/hashrate")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: text/plain'])
getreq.perform()
getreq.close()
current_network_hash_rate = int(strbuf.getvalue()) * 10**9
return current_network_hash_rate
def getCurrentBlockReward():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/q/bcperblock")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: text/plain'])
getreq.perform()
getreq.close()
block_reward_abs = int(strbuf.getvalue())
block_reward = block_reward_abs / btcval
return block_reward
def getCurrentBuyPriceInInr():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://www.zebapi.com/api/v1/market/ticker-new/btc/inr")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
current_buy_rate_in_inr = int(json.loads(strbuf.getvalue())['buy'])
return current_buy_rate_in_inr
def getCurrentSellPriceInInr():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://www.zebapi.com/api/v1/market/ticker-new/btc/inr")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
current_buy_rate_in_inr = int(json.loads(strbuf.getvalue())['sell'])
return current_buy_rate_in_inr
def getCurrentValueOfBitcoinInAddressInInr(address: str):
btc = getBalance(address) / btcval
price_in_inr = getCurrentSellPriceInInr()
value = btc * price_in_inr
return value
def getUnconfirmedTransactionCount():
strbuf = BytesIO()
getreq = pycurl.Curl()
getreq.setopt(getreq.URL, "https://blockchain.info/q/unconfirmedcount")
getreq.setopt(getreq.WRITEDATA, strbuf)
getreq.setopt(getreq.HTTPHEADER, ['Accept: application/json'])
getreq.perform()
getreq.close()
unconfirmed_transaction_count = int(strbuf.getvalue())
return unconfirmed_transaction_count
def convertToRupeeFormat(num: float):
numstr = "%.2f" % (num)
# print("numstr = %s" % (numstr))
# print("numstr len = %s" % (len(numstr)))
commaloc = 6
while commaloc < len(numstr):
numstr = numstr[:-commaloc] + ',' + numstr[-commaloc:]
commaloc += 3
rupees = "\u20B9%s" % (numstr)
return rupees
electricity_rates = {"rate_slabs": [{"min": 1, "max": 30, "unit_price": 3.25}, {"min": 31, "max": 100, "unit_price": 4.7}, {"min": 101, "max": 200, "unit_price": 6.25}, {"min": 201, "unit_price": 7.3}]}
def getPriceFromUnit(unit: float):
rate_slabs = electricity_rates['rate_slabs']
price = 0
for slab in rate_slabs:
if slab['min'] > unit:
countinue
elif ('max' in slab and slab['max']) > unit or 'max' not in slab:
# if 'max' in slab:
# print("min = %.2f, max = %.2f, unit = %.2f" % (slab['min'], slab['max'], unit))
# else:
# print("min = %.2f, unit = %.2f" % (slab['min'], unit))
price += (unit - slab['min']) * slab['unit_price']
else:
price += (slab['max'] - slab['min']) * slab['unit_price']
return price
def getUnitFromPower(power: float):
unit = power * 24 * 30 / 1000
return unit
def getBlockMiningRatePer10Min(hashrate: int):
network_hashrate = getCurrentNetworkHashRate()
block_mining_rate = hashrate/network_hashrate
return block_mining_rate
def getBitcoinMiningRate(hashrate: int):
block_mining_rate = getBlockMiningRatePer10Min(hashrate)
mining_reward = getCurrentBlockReward()
bitcoin_mining_rate = block_mining_rate * mining_reward
return bitcoin_mining_rate
def getMiningPowerExpense(power: float):
unit = getUnitFromPower(power)
expense = getPriceFromUnit(unit)
return expense
def getBitcoinMinedPerMonth(hashrate: int):
bitcoin_mined_per_month = getBitcoinMiningRate(hashrate) * 6 * 24 * 30
return bitcoin_mined_per_month
def miningReturn(power: float, hashrate: int):
expense = getMiningPowerExpense(power)
bitcoin_mined_per_month = getBitcoinMinedPerMonth(hashrate)
revenue = bitcoin_mined_per_month * getCurrentSellPriceInInr()
profit = revenue - expense
return profit
def costOfMiningBitcoin(power: float, hashrate: int):
unit = getUnitFromPower(power)
price_per_month = getPriceFromUnit(unit)
bitcoin_mined_per_month = getBitcoinMiningRate(hashrate) * 6 * 24 * 30
cost_of_mining_bitcoin = price_per_month/bitcoin_mined_per_month
return cost_of_mining_bitcoin
if __name__ == "__main__":
balance = getBalance(myaddress) / btcval
print("Current Bitcoin balance = %.8f at Address = %s" % (balance, myaddress))
value = getCurrentValueOfBitcoinInAddressInInr(myaddress)
print("Current Value of Bitcoin = %.2f for Address = %s" % (value, myaddress))
current_block_height = getCurrentBlockHeight()
print("current block height = %d" % (current_block_height))
tx_count_in_last_block = getTxCountInBlock(current_block_height)
print("Number of transactions in last block = %d" % (tx_count_in_last_block))
tx_rate = getTxRate(tx_count_in_last_block)
print("Current transaction rate = %.6f" % (tx_rate))
# average_tx_rate = getAverageTxRateInLast24Hrs()
# print("Average Transaction Rate in last 24 Hrs = %.6f" % (average_tx_rate))
current_network_hash_rate = getCurrentNetworkHashRate()
print("Current Network Hash Rate = %d" % (current_network_hash_rate))
block_reward = getCurrentBlockReward()
print("Current Block Reward = %.8f" % (block_reward))
current_buy_rate_in_inr = getCurrentBuyPriceInInr()
current_buy_rate_in_rupees = convertToRupeeFormat(current_buy_rate_in_inr)
print("Current Buy Rate in Indian Rupees = %s" % (current_buy_rate_in_rupees))
miner_hashrate = 13.5 * 10**12
print("Miner hashrate = %d" % (miner_hashrate))
miner_power = 1323
print ("Miner Power in Watt = %f" % (miner_power))
expense = getMiningPowerExpense(miner_power)
print ("Miner Power Expense Per Month = %.2f" % (expense))
bitcoin_mined_per_month = getBitcoinMinedPerMonth(miner_hashrate)
print("Bitcoin Mined Per Month = %.8f from Miner with hashrate = %d" % (bitcoin_mined_per_month, miner_hashrate))
mining_return = miningReturn(miner_power, miner_hashrate)
print("Mining Return Per Month = %s" % (mining_return))
cost_of_mining_bitcoin = costOfMiningBitcoin(miner_power, miner_hashrate)
print("Cost of Mining Bitcoin = %.2f" % (cost_of_mining_bitcoin))
unconfirmed_transaction_count = getUnconfirmedTransactionCount()
print("Total Unconfirmed Transaction Count = %d" % (unconfirmed_transaction_count))
txn_history = getTxnHistoryOfAddress(myaddress)
txn_history_table = pd.DataFrame(txn_history)
print("Transaction History::\n%s" % (txn_history_table))
| 2.625
| 3
|
sandbox/check-mcmc.py
|
ligo-asimov/asimov
| 2
|
12784132
|
<reponame>ligo-asimov/asimov<gh_stars>1-10
import ast
import subprocess
from shutil import copy
from asimov import gitlab, mattermost
from asimov import config
from asimov import condor, git
from asimov.ini import RunConfiguration
import os, glob, datetime
from subprocess import check_output as bash
import otter
server = gitlab.gitlab.Gitlab('https://git.ligo.org', private_token=config.get("gitlab", "token"))
repository = server.projects.get(config.get("olivaw", "tracking_repository"))
uber_repository = git.MetaRepository(config.get("olivaw", "metarepository"))
events = gitlab.find_events(repository, milestone=config.get("olivaw", "milestone"))
mattermost = mattermost.Mattermost()
from pesummary.gw.file.read import read
def get_all_jobs():
all_jobs = bash('condor_q -af:j Iwd'.split(' '), universal_newlines=True)
#print(all_jobs)
all_jobs = str(all_jobs).split('\n')
#print(all_jobs)
all_jobs = filter( lambda x: x, all_jobs)
wds = []
ids = []
for z in all_jobs:
wds.append(z.split()[1])
ids.append(z.split()[0])
return ids, wds
def main():
report = otter.Otter(filename="/home/daniel.williams/public_html/LVC/projects/O3/C01/audit_mcmc.html",
author="<NAME>",
title="Asimov/Olivaw : Preferred run audit report"
)
with report:
report + "This report contains the latest updates on the run status of the various PE runs ongoing at CIT."
report + "Supervisor last checked these events at " + str(datetime.datetime.now())
all_ids, all_wds = get_all_jobs()
for event in events:
print(event.title)
if event.state == None:
continue
if "Special" in event.labels:
continue
try:
repo = uber_repository.get_repo(event.title)
except:
print(f"{event.title} missing from the uberrepo")
continue
repo.update(stash=True)
with report:
report + f"#{event.title}"
try:
event_prods = repo.find_prods(config.get("olivaw", "run_category"))
except:
print(f"No runs in this repository")
continue
if event.state in ["Generating PSDs", "Productions running", "Stuck"]:
psds_dict = {}
prod_keys = [key for key in event.data.keys() if "Prod" in key[0:5]]
n_productions = len(event_prods)
event_psds_l = []
pref_prod = []
for prod in event_prods:
prod = prod.split("_")[0]
if prod in event.data:
if "blocked" in event.data[prod].lower(): continue
cluster = event.data[prod]
prod_rundir = event.data[f"{prod}_rundir"]
run_ini = os.path.join(prod_rundir, "config.ini")
actual_config = RunConfiguration(run_ini)
try:
engine_data = actual_config.get_engine()
except:
continue
if not "daniel.williams" in prod_rundir:
continue
if actual_config.ini.get("analysis", "engine") == "lalinferencemcmc":
# Keep only lists that correspond to the working directory
job_ids = [ number for number, directory in zip(all_ids, all_wds) if (prod in directory) and (event.title in directory) ]
print(job_ids)
if len(job_ids) > 0:
report += job_ids
tmp = "tmp"
try:
os.makedirs(tmp)
except:
pass
try:
os.makedirs(f"{prod_rundir}/{tmp}/html")
#os.popen(f"rm -r /home/john.veitch/projects/O3/SEOBNRv4P_rota_runs/{event.title}/{prod}")
os.makedirs(f"/home/john.veitch/projects/O3/SEOBNRv4P_rota_runs/{event.title}/{prod}-robot")
except:
pass
raw_pp_str = os.popen(f'grep cbcBayesPostProc {prod_rundir}/lalinference*.sh').read()
pspath0 = raw_pp_str.split('hdf5_snr.txt ')[-1].split(' ')[0]
for job in job_ids:
os.system(f'condor_ssh_to_job -ssh scp {job} remote:./*.hdf* {prod_rundir}/{tmp}')
for h5file in glob.glob(f"{prod_rundir}/{tmp}/*.hdf5"):
pspath1 = h5file # os.path.join(prod_rundir, tmp,'*.hdf5')
# print(pspath1)
file = h5file.split("/")[-1]
copy(h5file, f"/home/john.veitch/projects/O3/SEOBNRv4P_rota_runs/{event.title}/{prod}-robot/{file}")
pspath = raw_pp_str.replace(pspath0,pspath1)
webpath = pspath.split("--outpath")[1].split()[0]
new_webpath = f"{prod_rundir}/{tmp}/html"
print(pspath.replace(webpath, new_webpath))
# os.popen(pspath.replace(webpath, new_webpath))
main()
| 2.09375
| 2
|
pyweno/__init__.py
|
alexfikl/PyWENO
| 1
|
12784133
|
<reponame>alexfikl/PyWENO
"""The PyWENO module."""
import points
import kernels
import nonuniform
import symbolic
import symbols
import version
import weno
import cweno
| 0.992188
| 1
|
complaints/forms.py
|
Three-Dev-Musketeers/Mumbai_Police
| 5
|
12784134
|
<filename>complaints/forms.py
from django.forms import ModelForm
| 1.125
| 1
|
recaman_test.py
|
sfrebel/recamansvg
| 0
|
12784135
|
<filename>recaman_test.py<gh_stars>0
import pytest
from recaman import calculate_racaman
def test_uniqeness():
sequence = calculate_racaman(24)
for elem in sequence:
assert sequence.count(elem) == 1
# sequences longer that 24 can contain duplicates (42 for example)
def test_sequencelength():
sequence = calculate_racaman(200)
assert len(sequence) == 200
sequence = calculate_racaman(42)
assert len(sequence) == 42
def test_hardcoded():
referencesequence = [0, 1, 3, 6, 2, 7, 13, 20, 12, 21, 11, 22, 10, 23, 9, 24, 8, 25, 43, 62, 42, 63, 41, 18, 42, 17, 43, 16, 44, 15, 45, 14, 46, 79, 113, 78, 114, 77, 39, 78, 38, 79, 37, 80, 36, 81, 35, 82, 34, 83, 33, 84, 32, 85, 31, 86, 30, 87, 29, 88, 28, 89, 27, 90, 26, 91, 157, 224, 156, 225, 155] # taken from wikipedia
sequence = calculate_racaman(len(referencesequence))
assert referencesequence == sequence
| 2.96875
| 3
|
api/classification/feat_peak.py
|
xiafanzeng/Raman-Spectroscopy
| 4
|
12784136
|
from scipy.signal import find_peaks
import numpy as np
import math
def search_peaks(x_data, y_data, height=0.1, distance=10):
prominence = np.mean(y_data)
peak_list = find_peaks(y_data, height=height, prominence=prominence, distance=distance)
peaks = []
for i in peak_list[0]:
peak = (x_data[i], y_data[i])
peaks.append(peak)
return peaks
def search_database_peaks(all_spectrum, height=0.1, distance=10):
peaks_database = {}
for key in list(all_spectrum.keys()):
x_data = all_spectrum[key][0]
y_data = all_spectrum[key][1]
peaks = search_peaks(x_data, y_data, height=height, distance=distance)
peaks_database.update({key: peaks})
return peaks_database
def compare_peaks(peaks_database, peaks, abs_tol=5):
coincide_information = {}
for key in list(peaks_database.keys()):
coincide_list = []
for peak_d in peaks_database[key]:
for peak in peaks:
if math.isclose(peak[0], peak_d[0], abs_tol=abs_tol):
coincide_list.append([peak_d[0], peak[0]])
coincide_information.update(
{key: {'coincide_list': coincide_list, 'coincide_number': [len(peaks_database[key]), len(coincide_list)]}})
return coincide_information
def judge_matter(coincide_information, criterion=0.99):
contain_dict = {}
for key in list(coincide_information.keys()):
coincide_number = coincide_information[key]['coincide_number']
key_criterion = coincide_number[1] / coincide_number[0]
if key_criterion >= criterion:
contain_dict.update({key: key_criterion})
return contain_dict
def classify(x_data, y_data, all_spectrum):
peaks = search_peaks(x_data,y_data)
database_peaks = search_database_peaks(all_spectrum)
print(database_peaks)
compare_result = compare_peaks(database_peaks,peaks)
# pass
# print(compare_result)
return compare_result
compare_result=judge_matter(compare_result)
| 2.453125
| 2
|
pytorch_ares/third_party/free_adv_train/free_train.py
|
thu-ml/realsafe
| 107
|
12784137
|
<reponame>thu-ml/realsafe
"""Trains a model, saving checkpoints and tensorboard summaries along
the way."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from datetime import datetime
import os
import shutil
from timeit import default_timer as timer
import tensorflow as tf
import numpy as np
import sys
from free_model import Model
import cifar10_input
import cifar100_input
import pdb
import config
def get_path_dir(data_dir, dataset, **_):
path = os.path.join(data_dir, dataset)
if os.path.islink(path):
path = os.readlink(path)
return path
def train(tf_seed, np_seed, train_steps, out_steps, summary_steps, checkpoint_steps, step_size_schedule,
weight_decay, momentum, train_batch_size, epsilon, replay_m, model_dir, dataset, **kwargs):
tf.set_random_seed(tf_seed)
np.random.seed(np_seed)
model_dir = model_dir + '%s_m%d_eps%.1f_b%d' % (dataset, replay_m, epsilon, train_batch_size) # TODO Replace with not defaults
# Setting up the data and the model
data_path = get_path_dir(dataset=dataset, **kwargs)
if dataset == 'cifar10':
raw_data = cifar10_input.CIFAR10Data(data_path)
else:
raw_data = cifar100_input.CIFAR100Data(data_path)
global_step = tf.contrib.framework.get_or_create_global_step()
model = Model(mode='train', dataset=dataset, train_batch_size=train_batch_size)
# Setting up the optimizer
boundaries = [int(sss[0]) for sss in step_size_schedule][1:]
values = [sss[1] for sss in step_size_schedule]
learning_rate = tf.train.piecewise_constant(tf.cast(global_step, tf.int32), boundaries, values)
optimizer = tf.train.MomentumOptimizer(learning_rate, momentum)
# Optimizing computation
total_loss = model.mean_xent + weight_decay * model.weight_decay_loss
grads = optimizer.compute_gradients(total_loss)
# Compute new image
pert_grad = [g for g, v in grads if 'perturbation' in v.name]
sign_pert_grad = tf.sign(pert_grad[0])
new_pert = model.pert + epsilon * sign_pert_grad
clip_new_pert = tf.clip_by_value(new_pert, -epsilon, epsilon)
assigned = tf.assign(model.pert, clip_new_pert)
# Train
no_pert_grad = [(tf.zeros_like(v), v) if 'perturbation' in v.name else (g, v) for g, v in grads]
with tf.control_dependencies([assigned]):
min_step = optimizer.apply_gradients(no_pert_grad, global_step=global_step)
tf.initialize_variables([model.pert]) # TODO: Removed from TF
# Setting up the Tensorboard and checkpoint outputs
if not os.path.exists(model_dir):
os.makedirs(model_dir)
saver = tf.train.Saver(max_to_keep=1)
tf.summary.scalar('accuracy', model.accuracy)
tf.summary.scalar('xent', model.xent / train_batch_size)
tf.summary.scalar('total loss', total_loss / train_batch_size)
merged_summaries = tf.summary.merge_all()
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1.0)
with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
print('\n\n********** free training for epsilon=%.1f using m_replay=%d **********\n\n' % (epsilon, replay_m))
print('important params >>> \n model dir: %s \n dataset: %s \n training batch size: %d \n' % (model_dir, dataset, train_batch_size))
if dataset == 'cifar100':
print('the ride for CIFAR100 is bumpy -- fasten your seatbelts! \n \
you will probably see the training and validation accuracy fluctuating a lot early in trainnig \n \
this is natural especially for large replay_m values because we see that mini-batch so many times.')
# initialize data augmentation
if dataset == 'cifar10':
data = cifar10_input.AugmentedCIFAR10Data(raw_data, sess, model)
else:
data = cifar100_input.AugmentedCIFAR100Data(raw_data, sess, model)
# Initialize the summary writer, global variables, and our time counter.
summary_writer = tf.summary.FileWriter(model_dir + '/train', sess.graph)
eval_summary_writer = tf.summary.FileWriter(model_dir + '/eval')
sess.run(tf.global_variables_initializer())
# Main training loop
for ii in range(train_steps):
if ii % replay_m == 0:
x_batch, y_batch = data.train_data.get_next_batch(train_batch_size, multiple_passes=True)
nat_dict = {model.x_input: x_batch, model.y_input: y_batch}
x_eval_batch, y_eval_batch = data.eval_data.get_next_batch(train_batch_size, multiple_passes=True)
eval_dict = {model.x_input: x_eval_batch, model.y_input: y_eval_batch}
# Output to stdout
if ii % summary_steps == 0:
train_acc, summary = sess.run([model.accuracy, merged_summaries], feed_dict=nat_dict)
summary_writer.add_summary(summary, global_step.eval(sess))
val_acc, summary = sess.run([model.accuracy, merged_summaries], feed_dict=eval_dict)
eval_summary_writer.add_summary(summary, global_step.eval(sess))
print('Step {}: ({})'.format(ii, datetime.now()))
print(' training nat accuracy {:.4}% -- validation nat accuracy {:.4}%'.format(train_acc * 100,
val_acc * 100))
sys.stdout.flush()
# Tensorboard summaries
elif ii % out_steps == 0:
nat_acc = sess.run(model.accuracy, feed_dict=nat_dict)
print('Step {}: ({})'.format(ii, datetime.now()))
print(' training nat accuracy {:.4}%'.format(nat_acc * 100))
# Write a checkpoint
if (ii+1) % checkpoint_steps == 0:
saver.save(sess, os.path.join(model_dir, 'checkpoint'), global_step=global_step)
# Actual training step
sess.run(min_step, feed_dict=nat_dict)
if __name__ == '__main__':
args = config.get_args()
train(**vars(args))
| 2.453125
| 2
|
DataGenSalientIU_DUC_allAlignments_CDLM.py
|
oriern/ClusterProp
| 2
|
12784138
|
import pandas as pd
from transformers import BertTokenizer, RobertaTokenizer, AutoTokenizer
import os
import numpy as np
import re
import glob
from nltk import sent_tokenize
from utils import num_tokens
import math
def read_generic_file(filepath):
""" reads any generic text file into
list containing one line as element
"""
text = []
with open(filepath, 'r') as f:
for line in f.read().splitlines():
text.append(line.strip())
return text
def offset_str2list(offset):
return [[int(start_end) for start_end in offset.split(',')] for offset in offset.split(';')]
def offset_decreaseSentOffset(sentOffset, scu_offsets):
return [[start_end[0] - sentOffset, start_end[1] - sentOffset] for start_end in scu_offsets]
def insert_string(string, index, value):
return string[:index] + value + string[index:]
# the next *four* functions are taken from PreSumm implementation
def _get_ngrams(n, text):
"""Calcualtes n-grams.
Args:
n: which n-grams to calculate
text: An array of tokens
Returns:
A set of n-grams
"""
ngram_set = set()
text_length = len(text)
max_index_ngram_start = text_length - n
for i in range(max_index_ngram_start + 1):
ngram_set.add(tuple(text[i:i + n]))
return ngram_set
def _get_word_ngrams(n, sentences):
"""Calculates word n-grams for multiple sentences.
"""
assert len(sentences) > 0
assert n > 0
# words = _split_into_words(sentences)
words = sum(sentences, [])
# words = [w for w in words if w not in stopwords]
return _get_ngrams(n, words)
def cal_rouge(evaluated_ngrams, reference_ngrams):
reference_count = len(reference_ngrams)
evaluated_count = len(evaluated_ngrams)
overlapping_ngrams = evaluated_ngrams.intersection(reference_ngrams)
overlapping_count = len(overlapping_ngrams)
if evaluated_count == 0:
precision = 0.0
else:
precision = overlapping_count / evaluated_count
if reference_count == 0:
recall = 0.0
else:
recall = overlapping_count / reference_count
f1_score = 2.0 * ((precision * recall) / (precision + recall + 1e-8))
return {"f": f1_score, "p": precision, "r": recall}
def greedy_selection(doc_sent_list, abstract_sent_list, summary_size=1000):
def _rouge_clean(s):
return re.sub(r'[^a-zA-Z0-9 ]', '', s)
max_rouge = 0.0
abstract = sum(abstract_sent_list, [])
abstract = _rouge_clean(' '.join(abstract)).split()
sents = [_rouge_clean(' '.join(s)).split() for s in doc_sent_list]
evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
reference_1grams = _get_word_ngrams(1, [abstract])
evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
reference_2grams = _get_word_ngrams(2, [abstract])
selected = []
for s in range(summary_size):
cur_max_rouge = max_rouge
cur_id = -1
for i in range(len(sents)):
if (i in selected):
continue
c = selected + [i]
candidates_1 = [evaluated_1grams[idx] for idx in c]
candidates_1 = set.union(*map(set, candidates_1))
candidates_2 = [evaluated_2grams[idx] for idx in c]
candidates_2 = set.union(*map(set, candidates_2))
rouge_1 = cal_rouge(candidates_1, reference_1grams)['f']
rouge_2 = cal_rouge(candidates_2, reference_2grams)['f']
rouge_score = rouge_1 + rouge_2
if rouge_score > cur_max_rouge:
cur_max_rouge = rouge_score
cur_id = i
if (cur_id == -1):
return selected
selected.append(cur_id)
max_rouge = cur_max_rouge
return sorted(selected)
def greedy_selection_MDS(doc_sent_list, abstracts, summary_size=1000):
def _rouge_clean(s):
return re.sub(r'[^a-zA-Z0-9 ]', '', s)
max_rouge = 0.0
# abstract = sum(abstract_sent_list, [])
abstracts = [_rouge_clean(abstract.lower().replace('...',' ... ')).split() for abstract in abstracts]
# abstract = _rouge_clean(' '.join(abstract)).split()
sents = [_rouge_clean(s.lower().replace('...',' ... ')).split() for s in doc_sent_list]
evaluated_1grams = [_get_word_ngrams(1, [sent]) for sent in sents]
evaluated_2grams = [_get_word_ngrams(2, [sent]) for sent in sents]
references_1grams = []
references_2grams = []
for abstract in abstracts:
references_1grams.append(_get_word_ngrams(1, [abstract]))
references_2grams.append(_get_word_ngrams(2, [abstract]))
selected = []
for s in range(summary_size):
cur_max_rouge = max_rouge
cur_id = -1
for i in range(len(sents)):
if (i in selected):
continue
c = selected + [i]
candidates_1 = [evaluated_1grams[idx] for idx in c]
candidates_1 = set.union(*map(set, candidates_1))
candidates_2 = [evaluated_2grams[idx] for idx in c]
candidates_2 = set.union(*map(set, candidates_2))
rouge_1 = np.mean([cal_rouge(candidates_1, reference_1grams)['f'] for reference_1grams in references_1grams])
rouge_2 = np.mean([cal_rouge(candidates_2, reference_2grams)['f'] for reference_2grams in references_2grams])
rouge_score = rouge_1 + rouge_2
if rouge_score > cur_max_rouge:
cur_max_rouge = rouge_score
cur_id = i
if (cur_id == -1):
return selected
selected.append(cur_id)
max_rouge = cur_max_rouge
return sorted(selected)
def add_sent_special_tok(document, OIE_row = None):
doc_sents = sent_tokenize(document)#[:20]
if OIE_row is not None: #if main document
doc_sents = doc_sents[:MAX_SENT_MAIN_DOC]
sent_found_flag = False
for sent_idx, sent in enumerate(doc_sents):
if sent == OIE_row['docSentText']:
sent_found_flag = True
doc_sents[sent_idx] = add_OIE_special_tok(OIE_row['docSpanOffsets'], OIE_row['docSentCharIdx'], sent)
if num_tokens('<doc-s> ' + '<s> ' + ' </s> <s> '.join(doc_sents[:sent_idx+1]) + ' </s>' + ' </doc-s>', tokenizer,
add_special_tokens=True)> MAX_TOKENS:
return None
break
if not sent_found_flag:
return None
else: #if context document
doc_sents = doc_sents[:MAX_SENT_CONTEXT_DOC]
document = '<s> ' + ' </s> <s> '.join(doc_sents) + ' </s>'
return document
def adding_files_context(file_context_combination, data_path, topic_dir):
documents = []
for file_context in file_context_combination:
text = read_generic_file(os.path.join(data_path, topic_dir, file_context))
document = " ".join(text)
document = add_sent_special_tok(document)
document = add_doc_special_tok(document)
documents.append(document)
context = ' '.join(documents)
return context
def add_special_tok(row, document):
document_tmp = document[:]#add_OIE_special_tok(docSpanOffsets, document)
document_tmp = add_sent_special_tok(document_tmp, row)
if document_tmp is not None:
document_tmp = add_doc_special_tok(document_tmp)
return document_tmp
def add_doc_special_tok(document_tmp):
return '<doc-s> ' + document_tmp + ' </doc-s>'
def add_OIE_special_tok(docSpanOffsets, docSentCharIdx, sent, special_tokens_for_global_attn = True):
# document_tmp = document[:]
span_offsets = offset_str2list(docSpanOffsets)
offsets = offset_decreaseSentOffset(docSentCharIdx, span_offsets)
# assume we have max 2 parts
if special_tokens_for_global_attn:
for offset in offsets[::-1]: #[::-1] start from the end so the remain offsets won't be shifted
sent = insert_string(sent, offset[1], ' <OIE1_END> ')
sent = insert_string(sent, offset[0], ' <OIE1_START> ')
else:
for offset in offsets[::-1]: #[::-1] start from the end so the remain offsets won't be shifted
sent = insert_string(sent, offset[1], ' > ')
sent = insert_string(sent, offset[0], ' < ')
return sent
def read_abstracts(DATASET, data_path, topic_dir):
abstracts = []
if DATASET.startswith('TAC'):
# for summary_path in glob.iglob(
# data_path + '/summaries/' + topic_dir[:-3].upper() + topic_dir[-2:].upper() + '.*'):
for summary_path in glob.iglob(
data_path + '/summaries/' + topic_dir[:-3].upper() + '*'):
summary = ' '.join(read_generic_file(summary_path))
abstracts.append(summary)
else:
for summary_path in glob.iglob(data_path + '/summaries/' + topic_dir[:-1].upper() + '.*'):
summary = ' '.join(read_generic_file(summary_path))
abstracts.append(summary)
return abstracts
def add_instance(full_instance, tokenizer, row, highlights_list, highlights_metadata_list, file_context_combination, alignment_label='alignment_label'):
full_instance, global_attention_idx = extract_global_attention_idx(full_instance, tokenizer)
print('num tokens:', num_tokens(full_instance, tokenizer, add_special_tokens=False))
highlights_list.append([full_instance, row[alignment_label], global_attention_idx, row['greedyMaxRouge']])
highlights_metadata_list.append(row.tolist()+ [file_context_combination])
def replace_special_token(text, special_token_char_idxes, old_special_token, new_special_token):
text = text[:special_token_char_idxes[-1]] + new_special_token + text[special_token_char_idxes[-1] + len(
old_special_token):] # replace '<OIE1_START>' with '<'
special_token_char_idxes[-1] += 1 # include new special token '<'
return text, special_token_char_idxes
def extract_global_attention_idx(text, tokenizer, model_max_tokens = None):
if model_max_tokens is None:
model_max_tokens = MAX_TOKENS
#and replace new special tokens with '<' '>' so the model wont have to learn new tokens.
special_tokens_idx_list = []
special_token_char_idxes = []
mark_start_idx = text.find('<OIE1_START>')
while mark_start_idx > -1:
# find special_token_char_idxes
special_token_char_idxes.append(mark_start_idx)
text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_START>', '<')
special_token_char_idxes.append(text.find('<OIE1_END>'))
text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_END>', '>')
mark_start_idx = text.find('<OIE1_START>')
# #find special_token_char_idxes
# special_token_char_idxes = []
# special_token_char_idxes.append(text.find('<OIE1_START>'))
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_START>', '<')
# special_token_char_idxes.append(text.find('<OIE1_END>'))
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE1_END>', '>')
# start_idx2 = text.find('<OIE2_START>')
# if start_idx2 > -1: #if exists
# special_token_char_idxes.append(start_idx2)
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE2_START>', '<')
# special_token_char_idxes.append(text.find('<OIE2_END>'))
# text, special_token_char_idxes = replace_special_token(text, special_token_char_idxes, '<OIE2_END>', '>')
# find special token idxes
for special_token_char_idx in special_token_char_idxes:
special_token_prev_text = text[:special_token_char_idx]
special_token_idx = num_tokens(special_token_prev_text, tokenizer) # special token start sent included as we take len of tokens which is the idx+1
assert(('<' in tokenizer.tokenize(text)[special_token_idx-1]) or ('>' in tokenizer.tokenize(text)[special_token_idx-1])) # check it finds the special token. special_token_idx-1 as we omit special start sent token, as tokemize function doesnt include it.
assert(special_token_idx < model_max_tokens) #it shouldnt be longer then 2048 (0-2047), and the last token is special end of sentence token.
special_tokens_idx_list.append(special_token_idx)
return text, special_tokens_idx_list
def createGT_labels(OIEs_topic, data_path, topic_dir, DATASET):
labels_column_name = 'greedyMaxRouge'
OIEs_topic['original_idx'] = range(len(OIEs_topic))
abstracts = read_abstracts(DATASET, data_path, topic_dir)
docFile_summSpan_cands = list(OIEs_topic['docSpanText'].values)
positive_summSpan_idx = greedy_selection_MDS(docFile_summSpan_cands, abstracts)
positive_summSpan_original_idx = [OIEs_topic['original_idx'].values[cand_idx] for cand_idx in positive_summSpan_idx]
scnd_filter_label = np.zeros(len(OIEs_topic), dtype=int)
scnd_filter_label[positive_summSpan_original_idx] = 1
if labels_column_name in OIEs_topic.columns:
scnd_filter_label = np.array(OIEs_topic[labels_column_name].to_list()) + scnd_filter_label
OIEs_topic[labels_column_name] = scnd_filter_label
##validation for correct indexes
positive_labeled_spans = OIEs_topic[OIEs_topic[labels_column_name] == 1]['docSpanText'].to_list()
positive_labeled_spans_validation = [docFile_summSpan_cands[cand_idx] in positive_labeled_spans for cand_idx in positive_summSpan_idx]
assert(all(positive_labeled_spans_validation))
return OIEs_topic
def add_sent_in_file_idx(OIEs_topic, data_path, topic_dir):
doc_sent_idx = np.zeros(len(OIEs_topic), dtype=int)
OIEs_topic['original_idx'] = range(len(OIEs_topic))
topic_files = os.listdir(os.path.join(data_path, topic_dir))
for file_idx, file in enumerate(topic_files):
OIEs_topic_file = OIEs_topic[OIEs_topic['documentFile']==file]
text = read_generic_file(os.path.join(data_path, topic_dir, file))
document = " ".join(text)
doc_sents = sent_tokenize(document)
for sent_idx, doc_sent in enumerate(doc_sents):
OIEs_topic_file_sent_original_idx = (OIEs_topic_file['original_idx'][OIEs_topic_file['docSentText'] == doc_sent]).values
doc_sent_idx[OIEs_topic_file_sent_original_idx] = sent_idx
OIEs_topic['inFile_sentIdx'] = doc_sent_idx
return OIEs_topic
def positive_augmentation(num_negative, num_positive, highlights_df, highlights_metadata_df, label_tag = 'label', SAFE_BUFFER = 100):
original_len_highlights_df = len(highlights_df)
augmentation_factor = (num_negative- num_positive - SAFE_BUFFER)/num_positive
if label_tag != 'label':
augmentation_factor = (num_negative - num_positive - SAFE_BUFFER) / len(highlights_df[highlights_df[label_tag]==1])
#threshold = 0.75
augmentation_factor = math.floor(augmentation_factor) #if augmentation_factor < (math.floor(augmentation_factor) + threshold) else math.ceil(augmentation_factor)
positive_highlights_df = highlights_df[highlights_df[label_tag] == 1]
positive_highlights_metadata_df = highlights_metadata_df.loc[positive_highlights_df.index, :]
if augmentation_factor >= 1:
for i in range(augmentation_factor):
highlights_df = highlights_df.append(positive_highlights_df)
highlights_metadata_df = highlights_metadata_df.append(positive_highlights_metadata_df)
num_negative = len(highlights_df[highlights_df['label'] == 0])
num_positive = len(highlights_df[highlights_df['label'] == 1])
print('negative samples:', num_negative)
print('positive samples:', num_positive)
# augmentation_factor = (num_negative - num_positive) / num_positive # if still not equal- add part of positive samples.
# if augmentation_factor > 0.5:
if num_negative - num_positive > SAFE_BUFFER:
selected_index = np.random.choice(positive_highlights_df.index.to_list(),num_negative - num_positive -SAFE_BUFFER,replace=False)
selected_positive_highlights_df = highlights_df[:original_len_highlights_df].loc[selected_index, :] #copy from original highlights_df (before augmentation) so rows won't be double augmented by their index
selected_positive_highlights_metadata_df = highlights_metadata_df[:original_len_highlights_df].loc[selected_index, :]
highlights_df = highlights_df.append(selected_positive_highlights_df)
highlights_metadata_df = highlights_metadata_df.append(selected_positive_highlights_metadata_df)
num_negative = len(highlights_df[highlights_df['label'] == 0])
num_positive = len(highlights_df[highlights_df['label'] == 1])
print('negative samples:', num_negative)
print('positive samples:', num_positive)
return highlights_df, highlights_metadata_df
##################################
###### main ##############
##################################
if __name__ == "__main__":
np.random.seed(42)
SET = 'train'
DATASETS = ['TAC2008','TAC2009','TAC2010']
NUM_CONTEXT_FILES = 9
MAX_TOKENS = 4096
filter_negative = False
FILTER_RATE = 0.4
over_sample_positive = False
MAX_SENT_MAIN_DOC = 20
MAX_SENT_CONTEXT_DOC = 9
sentences_level = False
if SET == 'train':
filter_negative = True
over_sample_positive = True
positive_label = 'greedyMaxRouge'
if filter_negative:
filter_negative_label = '_filter_negative'
else:
filter_negative_label = ''
if over_sample_positive:
over_sample_positive_label = '_over_sample_positive'
else:
over_sample_positive_label = ''
if sentences_level:
sentences_level_label = '_sentence_based'
else:
sentences_level_label = ''
OUTPUT_PATH = 'OIE_highlights/{}_{}_CDLM{}{}{}_fixed_truncated.csv'.format("_".join(DATASETS), SET,
filter_negative_label,
over_sample_positive_label,
sentences_level_label)
highlights_list = []
highlights_metadata_list = []
# tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
tokenizer = AutoTokenizer.from_pretrained('./CDLM/')
tokenizer.add_special_tokens(
{'additional_special_tokens': ['<OIE1_START>', '<OIE1_END>', '<OIE2_START>', '<OIE2_END>']})
for DATASET in DATASETS:
data_path = 'data/{}/'.format(DATASET)
OIEs = pd.read_csv('OIE_cands/OIE_cands_{}.csv'.format(DATASET))
if sentences_level:
OIEs['docSpanText'] = OIEs['docSentText']
OIEs['docSpanOffsets'] = OIEs['docSentCharIdx'].apply(str) + ', ' + (
OIEs['docSentCharIdx'] + OIEs['docSentText'].apply(len)).apply(str)
used_positive_spans = 0
for topic_dir in os.listdir(data_path):
print(topic_dir)
if topic_dir == 'summaries':
continue
OIEs_topic = OIEs[OIEs['topic'] == topic_dir]
if DATASET.startswith('TAC'):
topic_dir_tac2011 = topic_dir[:-3].upper() + topic_dir[-2:].upper()
OIEs_topic = OIEs[OIEs['topic'] == topic_dir_tac2011]
OIEs_topic = add_sent_in_file_idx(OIEs_topic, data_path, topic_dir)
OIEs_topic = OIEs_topic[OIEs_topic['inFile_sentIdx'] < MAX_SENT_MAIN_DOC]
OIEs_topic = createGT_labels(OIEs_topic, data_path, topic_dir, DATASET)
topic_files = os.listdir(os.path.join(data_path, topic_dir))
topic_dates = [topic[re.search(r"\d", topic).start():] for topic in topic_files]#[topic_file[3:] for topic_file in topic_files]
topic_files = [x for _, x in sorted(zip(topic_dates, topic_files))]
for file_idx, file in enumerate(topic_files):
text = read_generic_file(os.path.join(data_path, topic_dir, file))
document = " ".join(text)
post_context_files = topic_files[file_idx + 1:file_idx + 1 + NUM_CONTEXT_FILES]
pre_context_files = []
if len(post_context_files) < NUM_CONTEXT_FILES:
diff_len = NUM_CONTEXT_FILES - len(post_context_files)
pre_context_files = topic_files[max(0, file_idx - diff_len):file_idx] # + context_files
assert (len(post_context_files + pre_context_files) == min(NUM_CONTEXT_FILES, len(topic_files) - 1))
# trunced_document = truncated_text_for_openie(document, tokenizer)
OIEs_topic_docFile = OIEs_topic[
OIEs_topic['documentFile'] == file]
for index, row in OIEs_topic_docFile.iterrows():
main_document = add_special_tok(row, document)
if main_document is None:
continue
if row[positive_label]:
used_positive_spans += 1
else:
if filter_negative:
if np.random.choice([0, 1], p=[FILTER_RATE,
1 - FILTER_RATE]): # 'continue' in random (1 - FILTER_RATE) of negative cases.
continue
# for file_context_combination in [context_files]:# combinations(topic_files_tmp,NUM_CONTEXT_FILES): # all context combinations of 2 files
pre_documents_context = adding_files_context(pre_context_files, data_path, topic_dir)
post_documents_context = adding_files_context(post_context_files, data_path, topic_dir)
file_context_combination = pre_context_files + post_context_files
full_instance = pre_documents_context + ' ' + main_document + ' ' + post_documents_context
add_instance(full_instance, tokenizer, row, highlights_list,
highlights_metadata_list, file_context_combination, alignment_label=positive_label)
print(len(highlights_list))
highlights_df = pd.DataFrame(highlights_list, columns=['', 'label', 'global_attention_idx', 'greedyMaxRouge'])
highlights_metadata_df = pd.DataFrame(highlights_metadata_list,
columns=OIEs_topic.columns.tolist() + ['doc_context'])
num_negative = len(highlights_df[highlights_df['label'] == 0])
num_positive = len(highlights_df[highlights_df['label'] == 1])
print('negative samples:', num_negative)
print('positive samples:', num_positive)
if over_sample_positive:
highlights_df, highlights_metadata_df = positive_augmentation(num_negative, num_positive, highlights_df,
highlights_metadata_df)
highlights_df = highlights_df[['', 'label', 'global_attention_idx']]
highlights_df.to_csv(OUTPUT_PATH, index=False)
highlights_metadata_df.to_csv(OUTPUT_PATH[:-4] + '_metadata.csv', index=False)
| 2.921875
| 3
|
Code/grace/source.py
|
AndreasMadsen/grace
| 1
|
12784139
|
<gh_stars>1-10
__all__ = ["filelist", "getgrid", "getdata", "getposition"]
import os
import os.path as path
import csv
import datetime
import numpy as np
# Resolve the absolute directory path the grids directory
grids = path.join(path.dirname(path.realpath(__file__)), 'grids')
# Generate a list of all names for the datafiles in the grids directory
# nor the directory path or the file extension is a part of the final
# name
filelist = []
for name in os.listdir(grids):
filepath = path.join(grids, name)
if path.isdir(filepath) or (name[0] == '.') or ('\r' in filepath): continue
filelist.append(os.path.splitext(name)[0])
def julian2date(julian):
"""
Return a python date object from a julian date given as an int.
"""
dt = datetime.timedelta(julian - 18262)
return datetime.date(2000, 01, 01) + dt;
def getdate(name):
"""
Return two python dates as a tuple from the given datafile name,
first is the start day, last is the end day.
"""
start = int(name[-11:-6])
end = int(name[-5:])
return (julian2date(start), julian2date(end))
def getgrid(name):
"""
Return a numpy array with the shape (180, 360). The index is then
mat[latitude, longitude]
Latitude is from +89.5 to -89.5
Longitude is from -179.5 to +179.5
"""
filepath = path.join(grids, name + '.txt')
reader = csv.reader(open(filepath), skipinitialspace=True, delimiter=" ")
rows = [map(float, row) for row in reader]
return np.asarray(rows).reshape((180, 360))
def getposition(index):
"""
Return (latitude, longitude) as a tuple from a truble containing
the (row, collum) index associated with the numpy matrix meaning
(latitudeIndex, longitudeIndex)
"""
if (index[0] < 0 or index[0] >= 180): raise LookupError('latitude index is out of range')
if (index[1] < 0 or index[1] >= 360): raise LookupError('longitude index is out of range')
return (89.5 - index[0], index[1] - 179.5)
if __name__ == "__main__":
# test filelist
assert len(filelist) == 341
# test getdate
assert map(str, getdate(filelist[0])) == ['2002-07-29', '2002-08-07']
# test getdata
assert getdata(filelist[0]).shape == (180, 360)
# test getposition
assert getposition((0 ,0 )) == (+89.5, -179.5)
assert getposition((179,359)) == (-89.5, +179.5)
print "all tests completed"
| 3.140625
| 3
|
setpasswd/src/__init__.py
|
Haehnchen/enigma2-plugins
| 1
|
12784140
|
<reponame>Haehnchen/enigma2-plugins
# -*- coding: ISO-8859-1 -*-
from Components.Language import language
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, SCOPE_LANGUAGE
import os,gettext
PluginLanguageDomain = "SetPasswd"
PluginLanguagePath = "SystemPlugins/SetPasswd/locale"
def localeInit():
lang = language.getLanguage()[:2] # getLanguage returns e.g. "fi_FI" for "language_country"
os.environ["LANGUAGE"] = lang # Enigma doesn't set this (or LC_ALL, LC_MESSAGES, LANG). gettext needs it!
print "[SetPasswd] set language to ", lang
gettext.bindtextdomain(PluginLanguageDomain, resolveFilename(SCOPE_PLUGINS, PluginLanguagePath))
def _(txt):
t = gettext.dgettext(PluginLanguageDomain, txt)
if t == txt:
print "[SetPasswd] fallback to default translation for", txt
t = gettext.gettext(txt)
return t
localeInit()
language.addCallback(localeInit)
| 2.3125
| 2
|
boot_django.py
|
Volkova-Natalia/django_rest_auth_email_confirm_reset
| 0
|
12784141
|
<reponame>Volkova-Natalia/django_rest_auth_email_confirm_reset
# This file sets up and configures Django. It's used by scripts that need to
# execute as if running in a Django server.
import os
import django
from django.conf import settings
BASE_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), "django_rest_auth_email_confirm_reset"))
FRONTEND_SCHEME = 'http'
FRONTEND_HOST = 'localhost:3000'
def boot_django():
if not settings.configured:
settings.configure(
BASE_DIR=BASE_DIR,
DEBUG=True,
ALLOWED_HOSTS=[
'127.0.0.1',
'localhost',
],
INSTALLED_APPS=[
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'django_rest_auth_email_confirm_reset',
],
MIDDLEWARE=[
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
],
ROOT_URLCONF='django_rest_auth_email_confirm_reset.urls',
TEMPLATES=[
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [
os.path.join(BASE_DIR, 'templates'),
]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
],
DATABASES={
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
},
AUTH_USER_MODEL='django_rest_auth_email_confirm_reset.User',
AUTH_PASSWORD_VALIDATORS=[
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
],
LANGUAGE_CODE='en-us',
TIME_ZONE="UTC",
USE_I18N=True,
USE_L10N=True,
USE_TZ=True,
STATIC_URL='/static/',
STATICFILES_DIRS=[
os.path.join(BASE_DIR, 'static'),
os.path.join(BASE_DIR), # for swagger.json
os.path.join(os.path.dirname(BASE_DIR)), # for swagger.json
],
EMAIL_USE_TLS=True,
EMAIL_HOST='smtp.gmail.com',
EMAIL_HOST_USER='<EMAIL>',
EMAIL_HOST_PASSWORD='<PASSWORD>',
EMAIL_PORT=587,
EMAIL_BACKEND='django.core.mail.backends.console.EmailBackend',
FRONTEND_SCHEME=FRONTEND_SCHEME,
FRONTEND_HOST=FRONTEND_HOST,
CORS_ALLOW_ALL_ORIGINS=False,
CORS_ALLOWED_ORIGINS=[FRONTEND_SCHEME + r'://' + FRONTEND_HOST],
CORS_ALLOW_CREDENTIALS=True,
CSRF_TRUSTED_ORIGINS=[FRONTEND_HOST],
)
django.setup()
| 2.03125
| 2
|
tests/testLock.py
|
geek-plus/vlcp
| 1
|
12784142
|
'''
Created on 2015/12/14
:author: hubo
'''
from __future__ import print_function
import unittest
from vlcp.server.server import Server
from vlcp.event.runnable import RoutineContainer
from vlcp.event.lock import Lock, Semaphore
from vlcp.config.config import manager
class Test(unittest.TestCase):
def setUp(self):
self.server = Server()
def tearDown(self):
pass
def testLock(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
def routineLock(key):
l = Lock(key, rc.scheduler)
for m in l.lock(rc):
yield m
t = obj[0]
for m in rc.waitWithTimeout(0.5):
yield m
obj[0] = t + 1
l.unlock()
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
self.server.serve()
self.assertEqual(obj[0], 2)
def testWith(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
def routineLock(key):
l = Lock(key, rc.scheduler)
for m in l.lock(rc):
yield m
with l:
t = obj[0]
for m in rc.waitWithTimeout(0.5):
yield m
obj[0] = t + 1
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
self.server.serve()
self.assertEqual(obj[0], 2)
def testLock2(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
def routineLock(key):
l = Lock(key, rc.scheduler)
for m in l.lock(rc):
yield m
t = obj[0]
for m in rc.waitWithTimeout(0.5):
yield m
obj[0] = t + 1
l.unlock()
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj2'))
self.server.serve()
self.assertEqual(obj[0], 1)
def testTrylock(self):
rc = RoutineContainer(self.server.scheduler)
result = []
def routineTrylock(key):
l = Lock(key, rc.scheduler)
locked = l.trylock()
result.append(locked)
for m in rc.waitWithTimeout(0.5):
yield m
l.unlock()
rc.subroutine(routineTrylock('testobj'))
rc.subroutine(routineTrylock('testobj'))
self.server.serve()
self.assertEqual(result, [True, False])
def testBeginlock(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
def routineLock(key):
l = Lock(key, rc.scheduler)
locked = l.beginlock(rc)
if not locked:
for m in rc.waitWithTimeout(1.0):
yield m
locked = l.trylock()
if not locked:
raise ValueError('Not locked')
t = obj[0]
for m in rc.waitWithTimeout(0.5):
yield m
obj[0] = t + 1
l.unlock()
for m in rc.doEvents():
yield m
for m in l.lock(rc):
yield m
t = obj[0]
for m in rc.waitWithTimeout(1.0):
yield m
obj[0] = t + 1
l.unlock()
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
self.server.serve()
self.assertEqual(obj[0], 4)
def testBeginlock2(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
def routineLock(key):
l = Lock(key, rc.scheduler)
locked = l.beginlock(rc)
if not locked:
for m in rc.waitWithTimeout(0.5):
yield m
for m in l.lock(rc):
yield m
t = obj[0]
for m in rc.waitWithTimeout(1.0):
yield m
obj[0] = t + 1
l.unlock()
for m in rc.doEvents():
yield m
for m in l.lock(rc):
yield m
t = obj[0]
if t != 2:
obj[0] = t - 1
l.unlock()
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
self.server.serve()
self.assertEqual(obj[0], 2)
def testSemaphore(self):
rc = RoutineContainer(self.server.scheduler)
obj = [0]
smp = Semaphore('testobj', 2, rc.scheduler)
smp.create()
def routineLock(key):
l = Lock(key, rc.scheduler)
for m in l.lock(rc):
yield m
t = obj[0]
for m in rc.waitWithTimeout(0.5):
yield m
obj[0] = t + 1
l.unlock()
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
rc.subroutine(routineLock('testobj'))
self.server.serve()
self.assertEqual(obj[0], 2)
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| 2.328125
| 2
|
src/tests/__init__.py
|
miguelarbesu/asd-asd
| 0
|
12784143
|
"""Test suite for asd asd"""
| 0.894531
| 1
|
src/pokeapp/views.py
|
luisgdev/djangopoke
| 0
|
12784144
|
<filename>src/pokeapp/views.py
from django.http import JsonResponse
from django.core.serializers import serialize
from django.forms.models import model_to_dict
from .models import Pokemon, Stat, Evolution
# Create your views here.
def pokesearch(request, name):
pokemon: Pokemon = Pokemon.objects.get(name=name)
stats = []
for item in Stat.objects.filter(pokemon=pokemon):
stats.append(model_to_dict(item))
evolutions = []
for item in Evolution.objects.filter(evo_id=pokemon.evo_chain):
evolutions.append(model_to_dict(item))
response: dict = model_to_dict(pokemon)
response["stats"] = stats
response["evolutions"] = evolutions
return JsonResponse(response)
| 2.234375
| 2
|
Prep/ProgrammingPrac/temp.py
|
talk2sunil83/UpgradLearning
| 0
|
12784145
|
<reponame>talk2sunil83/UpgradLearning<filename>Prep/ProgrammingPrac/temp.py
# %%
from typing import Sequence
from numba import njit
# %%
def fib(n: int):
if(n <= 2):
return 1
return fib(n-1) + fib(n-2)
# print(fib(3))
# print(fib(5))
# print(fib(8))
def fib_memo(n: int, memo={1: 1, 2: 1}):
if n >= 1:
if (n in memo):
return memo[n]
memo[n] = fib_memo(n-1, memo) + fib_memo(n-2, memo)
return memo[n]
else:
print("Provide Positive int")
# %%
# print(fib_memo(100))
# @njit(fastmath=True, cache=True)
def grid_traveller(x: int, y: int, memo={}) -> int:
if (x, y) in memo.keys():
return memo[(x, y)]
if x*y == 0:
return 0
if x*y == 1:
return 1
res = grid_traveller(x-1, y, memo) + grid_traveller(x, y-1, memo)
memo[(x, y)] = res
return res
print(grid_traveller(1, 1))
print(grid_traveller(3, 3))
print(grid_traveller(10, 10))
# %%
def can_sum(target: int, arr: Sequence[int]) -> bool:
pass
# %%
def how_sum(target: int, arr: Sequence[int]) -> Sequence[int]:
pass
# %%
def can_sum(target: int, arr: Sequence[int]):
pass
| 3.625
| 4
|
pandoc-convert/convert-directory.py
|
toffer/docker-data-only-container-demo
| 17
|
12784146
|
#!/usr/bin/env python
"""Convert Directory.
Usage: convert_directory.py <src_dir> <dest_dir>
-h --help show this
"""
import errno
import os
import subprocess
from docopt import docopt
def convert_directory(src, dest):
# Convert the files in place
for root, dirs, files in os.walk(src):
for filename in files:
name, ext = os.path.splitext(filename)
if ext in ['.md', '.markdown']:
html_filename = '.'.join([name, 'html'])
md_path = os.path.join(root, filename)
html_path = os.path.join(root, html_filename)
subprocess.call(['pandoc', md_path, '-s', '-o', html_path])
# Incredibly hacky way to move all files, except markdown files
# (Making sure image files get transferred to dest directory.)
subprocess.call(['rsync', '-a', src + '/', dest])
subprocess.call(['find', dest, '-name', '*.md', '-exec', 'rm', '{}', ';'])
subprocess.call(['find', dest, '-name', '*.markdown', '-exec', 'rm', '{}', ';'])
# Clean out generated html files in src directory.
subprocess.call(['find', src, '-name', '*.html', '-exec', 'rm', '{}', ';'])
if __name__ == '__main__':
args = docopt(__doc__, version='Convert Directory 0.1')
src = args['<src_dir>']
dest = args['<dest_dir>']
convert_directory(src, dest)
| 3.234375
| 3
|
build_site_col.py
|
griffij/eq_hazmap_tests
| 1
|
12784147
|
"""Test building site collection at interpolated points and
writing to nrml format
Can run in parallel in order to deal with large site collections
<NAME>
Geoscience Australia
"""
import os, sys
import numpy as np
from time import localtime, strftime, gmtime
import string
import pypar
from get_site_model import get_site_collection
from openquake.hazardlib.geo.point import Point
#from estimate_magnitude import build_site_col
def chunks(l, n):
"""Yield successive n-sized chunks from l."""
for i in range(0, len(l), n):
yield l[i:i + n]
def build_site_col(sites_data, site_model_file, filename=None):
"""Interpolate vs30 values to sites of interest
"""
# sites_data = np.genfromtxt(site_file)
site_points = []
for i in range(len(sites_data[:,0])):
site_pt = Point(sites_data[i,0], sites_data[i,1])
site_points.append(site_pt)
sitecol = get_site_collection(site_model_file, site_points, None, filename)
return sitecol
# Set up paralell
proc = pypar.size() # Number of processors as specified by mpirun
myid = pypar.rank() # Id of of this process (myid in [0, proc-1])
node = pypar.get_processor_name() # Host name on which current process is running
print 'I am proc %d of %d on node %s' % (myid, proc, node)
#nruns = 320 # currently hard coded - need to improve this
t0 = pypar.time()
locations_file = 'data/jawa_bali_nt_sulawesi_sites_clean.csv'#jawa_sites.csv'
sites_file = 'data/jawa_bali_nt_sulawesi_site_model.xml'#jawa_site_model.xml'
outfile = 'data/jawa_bali_nt_sulawesi_site_model_full.xml'
#locations_file = 'data/jawa_sites_test.csv'
#sites_file = 'data/jawa_site_model.xml'
#outfile = 'data/jawa_site_model_test.xml'
#if myid == 0:
sites_data = np.genfromtxt(locations_file, delimiter=',')
#print sites_data
# Split sources
list_length = len(sites_data) / (proc)
print list_length
if (len(sites_data) % proc) > 0:
list_length +=1
pt_list = list(chunks(sites_data, list_length))
#print pt_list
for i in range(0, len(pt_list), 1):
if i % proc == myid:
run = "%03d" % i
# Apply geometrical filtering
print 'Building site model for run %s' % run
tmp_outfile = outfile[:-4] + '_' + str(run) + '.xml'
build_site_col(pt_list[i], sites_file, filename=tmp_outfile)
pypar.barrier()
if myid == 0:
outlines = []
tmp_pt_source_filename_list = []
tmp_pt_source_list = []
# Now combine into one file
for j in range(0, len(pt_list), 1):
tmp_pt_filename = outfile[:-4] + '_%03d.xml' % j
tmp_pt_source_filename_list.append(tmp_pt_filename)
for tmp_pt_source_file in tmp_pt_source_filename_list:
f_in = open(tmp_pt_source_file, 'r')
for line in f_in.readlines():
if line.lstrip().startswith('<site '):
outlines.append(line)
f_in.close()
f_out = open(outfile, 'w')
# write header
f_out.write('<?xml version="1.0" encoding="utf-8"?>\n')
f_out.write('<nrml\n')
f_out.write('xmlns="http://openquake.org/xmlns/nrml/0.4"\n')
f_out.write('xmlns:gml="http://www.opengis.net/gml"\n')
f_out.write(' <siteModel>\n')
#outlines = [oxml + '\n' for oxml in outlines]
f_out.writelines(outlines)
f_out.write(' </siteModel>\n')
f_out.write('</nrml>')
ss = int(pypar.time() - t0)
h = ss / 3600
m = (ss % 3600) / 60
s = (ss % 3600) % 60
print "--------------------------------------------------------"
print 'P0: Total time (%i seconds): %s:%s:%s (hh:mm:ss)' % (ss,
string.zfill(h, 2),
string.zfill(m, 2),
string.zfill(s,2))
print "--------------------------------------------------------"
pypar.finalize()
| 2.234375
| 2
|
hknweb/candidate/tests/models/requirements/committee_project/test_committee_project.py
|
jyxzhang/hknweb
| 20
|
12784148
|
from django.test import TestCase
from hknweb.candidate.tests.models.utils import ModelFactory
class CommitteeProjectRequirementModelTests(TestCase):
def setUp(self):
semester = ModelFactory.create_semester(
semester="Spring",
year=0,
)
committeeproject = ModelFactory.create_committeeproject_requirement(
candidateSemesterActive=semester,
)
self.semester = semester
self.committeeproject = committeeproject
def test_str(self):
expected = "{} - {}".format(self.committeeproject.name, self.semester)
actual = str(self.committeeproject)
self.assertEqual(expected, actual)
| 2.609375
| 3
|
ogb_lsc/pcq/conformer_utils.py
|
kawa-work/deepmind-research
| 10,110
|
12784149
|
# Copyright 2021 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Conformer utilities."""
import copy
from typing import List, Optional
from absl import logging
import numpy as np
import rdkit
from rdkit import Chem
from rdkit.Chem import AllChem
import tensorflow.compat.v2 as tf
def generate_conformers(
molecule: Chem.rdchem.Mol,
max_num_conformers: int,
*,
random_seed: int = -1,
prune_rms_thresh: float = -1.0,
max_iter: int = -1,
fallback_to_random: bool = False,
) -> Chem.rdchem.Mol:
"""Generates conformers for a given molecule.
Args:
molecule: molecular representation of the compound.
max_num_conformers: maximum number of conformers to generate. If pruning is
done, the returned number of conformers is not guaranteed to match
max_num_conformers.
random_seed: random seed to use for conformer generation.
prune_rms_thresh: RMSD threshold which allows to prune conformers that are
too similar.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
fallback_to_random: if conformers cannot be obtained, use random coordinates
to initialise.
Returns:
Copy of a `molecule` with added hydrogens. The returned molecule contains
force field-optimised conformers. The number of conformers is guaranteed to
be <= max_num_conformers.
"""
mol = copy.deepcopy(molecule)
mol = Chem.AddHs(mol)
mol = _embed_conformers(
mol,
max_num_conformers,
random_seed,
prune_rms_thresh,
fallback_to_random,
use_random=False)
if max_iter > 0:
mol_with_conformers = _minimize_by_mmff(mol, max_iter)
if mol_with_conformers is None:
mol_with_conformers = _minimize_by_uff(mol, max_iter)
else:
mol_with_conformers = mol
# Aligns conformations in a molecule to each other using the first
# conformation as the reference.
AllChem.AlignMolConformers(mol_with_conformers)
# We remove hydrogens to keep the number of atoms consistent with the graph
# nodes.
mol_with_conformers = Chem.RemoveHs(mol_with_conformers)
return mol_with_conformers
def atom_to_feature_vector(
atom: rdkit.Chem.rdchem.Atom,
conformer: Optional[np.ndarray] = None,
) -> List[float]:
"""Converts rdkit atom object to feature list of indices.
Args:
atom: rdkit atom object.
conformer: Generated conformers. Returns -1 values if set to None.
Returns:
List containing positions (x, y, z) of each atom from the conformer.
"""
if conformer:
pos = conformer.GetAtomPosition(atom.GetIdx())
return [pos.x, pos.y, pos.z]
return [np.nan, np.nan, np.nan]
def compute_conformer(smile: str, max_iter: int = -1) -> np.ndarray:
"""Computes conformer.
Args:
smile: Smile string.
max_iter: Maximum number of iterations to perform when optimising MMFF force
field. If set to <= 0, energy optimisation is not performed.
Returns:
A tuple containing index, fingerprint and conformer.
Raises:
RuntimeError: If unable to convert smile string to RDKit mol.
"""
mol = rdkit.Chem.MolFromSmiles(smile)
if not mol:
raise RuntimeError('Unable to convert smile to molecule: %s' % smile)
conformer_failed = False
try:
mol = generate_conformers(
mol,
max_num_conformers=1,
random_seed=45,
prune_rms_thresh=0.01,
max_iter=max_iter)
except IOError as e:
logging.exception('Failed to generate conformers for %s . IOError %s.',
smile, e)
conformer_failed = True
except ValueError:
logging.error('Failed to generate conformers for %s . ValueError', smile)
conformer_failed = True
except: # pylint: disable=bare-except
logging.error('Failed to generate conformers for %s.', smile)
conformer_failed = True
atom_features_list = []
conformer = None if conformer_failed else list(mol.GetConformers())[0]
for atom in mol.GetAtoms():
atom_features_list.append(atom_to_feature_vector(atom, conformer))
conformer_features = np.array(atom_features_list, dtype=np.float32)
return conformer_features
def get_random_rotation_matrix(include_mirror_symmetry: bool) -> tf.Tensor:
"""Returns a single random rotation matrix."""
rotation_matrix = _get_random_rotation_3d()
if include_mirror_symmetry:
random_mirror_symmetry = _get_random_mirror_symmetry()
rotation_matrix = tf.matmul(rotation_matrix, random_mirror_symmetry)
return rotation_matrix
def rotate(vectors: tf.Tensor, rotation_matrix: tf.Tensor) -> tf.Tensor:
"""Batch of vectors on a single rotation matrix."""
return tf.matmul(vectors, rotation_matrix)
def _embed_conformers(
molecule: Chem.rdchem.Mol,
max_num_conformers: int,
random_seed: int,
prune_rms_thresh: float,
fallback_to_random: bool,
*,
use_random: bool = False,
) -> Chem.rdchem.Mol:
"""Embeds conformers into a copy of a molecule.
If random coordinates allowed, tries not to use random coordinates at first,
and uses random only if fails.
Args:
molecule: molecular representation of the compound.
max_num_conformers: maximum number of conformers to generate. If pruning is
done, the returned number of conformers is not guaranteed to match
max_num_conformers.
random_seed: random seed to use for conformer generation.
prune_rms_thresh: RMSD threshold which allows to prune conformers that are
too similar.
fallback_to_random: if conformers cannot be obtained, use random coordinates
to initialise.
*:
use_random: Use random coordinates. Shouldn't be set by any caller except
this function itself.
Returns:
A copy of a molecule with embedded conformers.
Raises:
ValueError: if conformers cannot be obtained for a given molecule.
"""
mol = copy.deepcopy(molecule)
# Obtains parameters for conformer generation.
# In particular, ETKDG is experimental-torsion basic knowledge distance
# geometry, which allows to randomly generate an initial conformation that
# satisfies various geometric constraints such as lower and upper bounds on
# the distances between atoms.
params = AllChem.ETKDGv3()
params.randomSeed = random_seed
params.pruneRmsThresh = prune_rms_thresh
params.numThreads = -1
params.useRandomCoords = use_random
conf_ids = AllChem.EmbedMultipleConfs(mol, max_num_conformers, params)
if not conf_ids:
if not fallback_to_random or use_random:
raise ValueError('Cant get conformers')
return _embed_conformers(
mol,
max_num_conformers,
random_seed,
prune_rms_thresh,
fallback_to_random,
use_random=True)
return mol
def _minimize_by_mmff(
molecule: Chem.rdchem.Mol,
max_iter: int,
) -> Optional[Chem.rdchem.Mol]:
"""Minimizes forcefield for conformers using MMFF algorithm.
Args:
molecule: a datastructure containing conformers.
max_iter: number of maximum iterations to use when optimising force field.
Returns:
A copy of a `molecule` containing optimised conformers; or None if MMFF
cannot be performed.
"""
molecule_props = AllChem.MMFFGetMoleculeProperties(molecule)
if molecule_props is None:
return None
mol = copy.deepcopy(molecule)
for conf_id in range(mol.GetNumConformers()):
ff = AllChem.MMFFGetMoleculeForceField(
mol, molecule_props, confId=conf_id, ignoreInterfragInteractions=False)
ff.Initialize()
# minimises a conformer within a mol in place.
ff.Minimize(max_iter)
return mol
def _minimize_by_uff(
molecule: Chem.rdchem.Mol,
max_iter: int,
) -> Chem.rdchem.Mol:
"""Minimizes forcefield for conformers using UFF algorithm.
Args:
molecule: a datastructure containing conformers.
max_iter: number of maximum iterations to use when optimising force field.
Returns:
A copy of a `molecule` containing optimised conformers.
"""
mol = copy.deepcopy(molecule)
conf_ids = range(mol.GetNumConformers())
for conf_id in conf_ids:
ff = AllChem.UFFGetMoleculeForceField(mol, confId=conf_id)
ff.Initialize()
# minimises a conformer within a mol in place.
ff.Minimize(max_iter)
return mol
def _get_symmetry_rotation_matrix(sign: tf.Tensor) -> tf.Tensor:
"""Returns the 2d/3d matrix for mirror symmetry."""
zero = tf.zeros_like(sign)
one = tf.ones_like(sign)
# pylint: disable=bad-whitespace,bad-continuation
rot = [sign, zero, zero,
zero, one, zero,
zero, zero, one]
# pylint: enable=bad-whitespace,bad-continuation
shape = (3, 3)
rot = tf.stack(rot, axis=-1)
rot = tf.reshape(rot, shape)
return rot
def _quaternion_to_rotation_matrix(quaternion: tf.Tensor) -> tf.Tensor:
"""Converts a batch of quaternions to a batch of rotation matrices."""
q0 = quaternion[0]
q1 = quaternion[1]
q2 = quaternion[2]
q3 = quaternion[3]
r00 = 2 * (q0 * q0 + q1 * q1) - 1
r01 = 2 * (q1 * q2 - q0 * q3)
r02 = 2 * (q1 * q3 + q0 * q2)
r10 = 2 * (q1 * q2 + q0 * q3)
r11 = 2 * (q0 * q0 + q2 * q2) - 1
r12 = 2 * (q2 * q3 - q0 * q1)
r20 = 2 * (q1 * q3 - q0 * q2)
r21 = 2 * (q2 * q3 + q0 * q1)
r22 = 2 * (q0 * q0 + q3 * q3) - 1
matrix = tf.stack([r00, r01, r02,
r10, r11, r12,
r20, r21, r22], axis=-1)
return tf.reshape(matrix, [3, 3])
def _get_random_rotation_3d() -> tf.Tensor:
random_quaternions = tf.random.normal(
shape=[4], dtype=tf.float32)
random_quaternions /= tf.linalg.norm(
random_quaternions, axis=-1, keepdims=True)
return _quaternion_to_rotation_matrix(random_quaternions)
def _get_random_mirror_symmetry() -> tf.Tensor:
random_0_1 = tf.random.uniform(
shape=(), minval=0, maxval=2, dtype=tf.int32)
random_signs = tf.cast((2 * random_0_1) - 1, tf.float32)
return _get_symmetry_rotation_matrix(random_signs)
| 2.109375
| 2
|
remu/plotting.py
|
ast0815/likelihood-machine
| 0
|
12784150
|
"""Module for handling plotting functions
This module contains plotting classes to plot :class:`.Binning` objects.
Examples
--------
::
plt = plotting.get_plotter(binning)
plt.plot_values()
plt.savefig('output.png')
"""
from itertools import cycle
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import ticker
from . import binning
def get_plotter(obj, *args, **kwargs):
"""Return a suitable plotting class instance for the object.
Parameters
----------
obj : object
The object for which a plotter should be returned.
*args : optional
**kwargs : optional
Additional arguments are passed to the init method of the plotter.
"""
if isinstance(obj, binning.RectilinearBinning):
return RectilinearBinningPlotter(obj, *args, **kwargs)
if isinstance(obj, binning.LinearBinning):
return LinearBinningPlotter(obj, *args, **kwargs)
if isinstance(obj, binning.CartesianProductBinning):
return CartesianProductBinningPlotter(obj, *args, **kwargs)
if isinstance(obj, binning.Binning):
return BinningPlotter(obj, *args, **kwargs)
if isinstance(obj, np.ndarray):
return ArrayPlotter(obj, *args, **kwargs)
raise TypeError(f"No known Plotter class for type {type(obj)}")
class Plotter:
"""Plotting base class.
Parameters
----------
figax : (Figure, Axes), optional
The figure and axis to plot in.
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
"""
def __init__(self, figax=None):
self.figax = figax
self.color = cycle("C%d" % (i,) for i in range(0, 10))
self.hatch = cycle([r"//", r"\\", r"O", "*"])
def __del__(self):
"""Clean up figures."""
if self.figax is not None:
plt.close(self.figax[0])
def subplots(self, *args, **kwargs):
"""Return the ``(Figure, Axes)`` tuple of the binning.
Creates one using Matplotlib's ``subplots``, if necessary.
"""
if self.figax is None:
self.figax = plt.subplots(*args, **kwargs)
return self.figax
def savefig(self, *args, **kwargs):
"""Save the figure."""
kwargs2 = {"bbox_inches": "tight"}
kwargs2.update(kwargs)
self.figax[0].savefig(*args, **kwargs2)
class ArrayPlotter(Plotter):
"""Plotting class for numpy arrays.
Parameters
----------
array : ndarray
The ndarray to be plotted.
bins_per_row : int, optional
How many bins are going to be plotted per row.
**kwargs : optional
Addittional keyword arguments are passed to :class:`Plotter`.
See also
--------
Plotter
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
array : ndarray
The ndarray to be plotted.
bins_per_row : int, optional
How many bins are going to be plotted per row.
"""
def __init__(self, array, bins_per_row=25, **kwargs):
self.array = array
self.bins_per_row = bins_per_row
Plotter.__init__(self, **kwargs)
def _get_array(self, array):
if array is None:
array = self.array
else:
array = np.asarray(array)
if array.shape != self.array.shape:
raise TypeError("Array must be of equal shape as the initial one.")
return array
def _get_arrays(self, arrays):
try:
ret = [self._get_array(a) for a in arrays]
except (TypeError, IndexError):
ret = [self._get_array(arrays)]
return np.array(ret)
def get_bin_edges(self, i_min, i_max):
"""Get the bin edges corresponding to bins i_min to i_max."""
x = np.arange(i_min, i_max)
return np.append(x - 0.5, x[-1] + 0.5) # Bins centred on integers
def get_axis_label(self):
"""Return the default label for the axis."""
return "Bin #"
@staticmethod
def _get_stack_functions(stack_function):
try:
# A number?
np.isfinite(stack_function)
except TypeError:
# Nope
pass
else:
# A number.
lobound = (1.0 - stack_function) / 2.0
hibound = 1.0 - lobound
def lower(x, axis=0, bound=lobound):
return np.quantile(x, bound, axis=axis)
def upper(x, axis=0, bound=hibound):
return np.quantile(x, bound, axis=axis)
return lower, upper
# No number
try:
# Tuple of functions?
lower, upper = stack_function
except TypeError:
# Nope
def lower(x, axis=0):
return np.sum(np.zeros_like(x), axis=axis)
upper = stack_function
return lower, upper
def plot_array(
self,
array=None,
density=False,
stack_function=np.mean,
margin_function=None,
**kwargs,
):
"""Plot an array.
Parameters
----------
array : ndarray
The thing to plot.
density : bool, optional
Divide the data by the relative bin width: ``width / total_plot_range``.
stack_function : float or function or (lower_function, function)
How to deal with multiple arrays.
When `float`, plot the respective quantile as equal-tailed interval.
When `function`, apply this function to the stack after marginalisation.
When `(function, function)`, use these functions to calculate lower and
upper bounds of the area to be plotted respectively.
Functions must accept ``axis`` keyword argument.
"""
# The `margin_function` parameter is only here so it can be
# safely used with all plotting methods
arrays = self._get_arrays(array)
lower, upper = self._get_stack_functions(stack_function)
bins_per_row = self.bins_per_row
if bins_per_row >= 1:
n_rows = int(np.ceil(arrays.shape[-1] / bins_per_row))
else:
n_rows = 1
bins_per_row = arrays.shape[-1]
figax = self.subplots(
nrows=n_rows,
sharey=True,
figsize=(6.4, max(2.4 * n_rows, 4.8)),
squeeze=False,
)
color = kwargs.get("color", next(self.color))
hatch = kwargs.get("hatch", next(self.hatch))
for i, ax in enumerate(figax[1][:, 0]):
i_min = i * bins_per_row
i_max = min((i + 1) * bins_per_row, arrays.shape[-1])
y_hi = np.asfarray(upper(arrays[:, i_min:i_max], axis=0))
y_lo = np.asfarray(lower(arrays[:, i_min:i_max], axis=0))
bins = np.asfarray(self.get_bin_edges(i_min, i_max))
# Divide by relative bin widths
if density:
total_width = bins[-1] - bins[0]
rel_widths = (bins[1:] - bins[:-1]) / total_width
y_hi /= np.asfarray(rel_widths)
y_lo /= np.asfarray(rel_widths)
args = {
"step": "post",
"edgecolor": color,
"hatch": hatch,
"facecolor": "none",
}
args.update(kwargs)
y_lo = np.append(y_lo, y_lo[-1])
y_hi = np.append(y_hi, y_hi[-1])
poly = ax.fill_between(bins, y_hi, y_lo, **args)
# Add sticky y edge so histograms get plotted more beautifully
poly.sticky_edges.y.append(np.min(y_lo))
ax.autoscale_view()
ax.get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
ax.set_xlabel(self.get_axis_label())
def legend(self, **kwargs):
"""Draw a legend in the first axis."""
args = {
"loc": "best",
}
args.update(kwargs)
self.figax[1][0, 0].legend(**args)
class BinningPlotter(ArrayPlotter):
"""Plotting class for the simplest :class:`.Binning` class.
Parameters
----------
binning : Binning
The binning to be plotted.
marginalize_subbinnings : bool, optional
Plot the contents of subbinnings as a single bin.
**kwargs : optional
Addittional keyword arguments are passed to :class:`ArrayPlotter`.
See also
--------
ArrayPlotter
.Binning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : Binning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
"""
def __init__(self, binning, marginalize_subbinnings=False, **kwargs):
self.binning = binning
self.marginalize_subbinnings = marginalize_subbinnings
array = self.binning.value_array
if marginalize_subbinnings:
array = self.binning.marginalize_subbinnings_on_ndarray(array)
ArrayPlotter.__init__(self, array, **kwargs)
def _get_array(self, array):
if array is None:
array = self.array
else:
array = np.asarray(array)
# Marginalize subbinnings if necessary
if self.marginalize_subbinnings and array.shape != self.array.shape:
array = self.binning.marginalize_subbinnings_on_ndarray(array)
if array.shape != self.array.shape:
raise TypeError("Array must be of equal shape as the initial one.")
return array
def _get_binning(self, binning):
if binning is None:
binning = self.binning
return binning
def plot_values(self, binning=None, **kwargs):
"""Plot the values of a Binning."""
binning = self._get_binning(binning)
return self.plot_array(binning.value_array, **kwargs)
def plot_entries(self, binning=None, **kwargs):
"""Plot the entries of a Binning."""
binning = self._get_binning(binning)
return self.plot_array(binning.entries_array, **kwargs)
def plot_sumw2(self, binning=None, **kwargs):
"""Plot the sumw2 of a Binning."""
binning = self._get_binning(binning)
return self.plot_array(binning.sumw2_array, **kwargs)
class CartesianProductBinningPlotter(BinningPlotter):
"""Plotting class for :class:`.CartesianProductBinning`
Parameters
----------
binning : CartesianProductBinning
The binning to be plottet
x_axis_binnings : list of int, optional
The indices of binnings to be plotted on the x-axis.
y_axis_binnings : list of int, optional
The indices of binnings to be plotted on the y-axis.
**kwargs : optional
Additional keyword arguments are passed to :class:`BinningPlotter`.
Notes
-----
This plotter does always marginalize the subbinnings.
See also
--------
BinningPlotter
.CartesianProductBinning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : CartesianProductBinning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
x_axis_binnings : list of int
The indices of binnings to be plotted on the x-axis.
y_axis_binnings : list of int
The indices of binnings to be plotted on the y-axis.
"""
def __init__(self, binning, x_axis_binnings=None, y_axis_binnings=None, **kwargs):
if x_axis_binnings is None:
x_axis_binnings = list(range(int(np.ceil(len(binning.binnings) / 2.0))))
self.x_axis_binnings = x_axis_binnings
if y_axis_binnings is None:
y_axis_binnings = list(
range(int(np.ceil(len(binning.binnings) / 2.0)), len(binning.binnings))
)
self.y_axis_binnings = y_axis_binnings
kwargs["marginalize_subbinnings"] = True
kwargs["bins_per_row"] = -1
BinningPlotter.__init__(self, binning, **kwargs)
def get_bin_edges(self, i_min, i_max, j_binning):
"""Get the bin edges corresponding to bins i_min to i_max."""
x = np.arange(i_min, i_max)
return np.append(x - 0.5, x[-1] + 0.5) # Bins centred on integers
def get_axis_label(self, j_binning):
"""Return the default label for the axis."""
return "Binning %d Bin #" % (j_binning,)
def plot_array(
self,
array=None,
density=True,
stack_function=np.mean,
margin_function=np.sum,
scatter=-1,
**kwargs,
):
"""Plot an array.
Parameters
----------
array : ndarray, optional
The data to be plotted.
density : bool, optional
Divide the data by the relative bin width: ``width / total_plot_range``.
Dividing by the relative bin width, rather than the bin width directly,
ensures that the maximum values in all 1D projections are comparable.
stack_function : float or function or (lower_function, function)
How to deal with multiple arrays.
When `float`, plot the respective quantile as equal-tailed interval.
When `function`, apply this function to the stack after marginalisation.
When `(function, function)`, use these functions to calculate lower and
upper bounds of the area to be plotted respectively.
Functions must accept ``axis`` keyword argument.
margin_function : function, optional
The function used to marginalize the data.
scatter : int, optional
Use a pseudo scatter plot with `scatter` number of points instead
of a 2D histogram. Allows to draw multiple sets of 2D data in the
same plot. The number of points in each cell is proportional to
the value being plotted. Using the `scatter` option is thus
implicitly replicating the behaviour of the `density` option for
the 2D plots. The `density` argument has no effect on the scatter
plots.
"""
arrays = self._get_arrays(array)
lower, upper = self._get_stack_functions(stack_function)
shape = self.binning.bins_shape
arrays = arrays.reshape(arrays.shape[:1] + shape)
n_col = len(self.x_axis_binnings) + 1 # "+1" for the 1D projections
n_row = len(self.y_axis_binnings) + 1
# Widths and heights according to number of bins,
# 10 px (= 0.1") per bin
widths = [
0.1 * self.binning.binnings[i].data_size for i in self.x_axis_binnings
]
heights = [
0.1 * self.binning.binnings[i].data_size for i in self.y_axis_binnings
]
# Axes are counted top to bottom, but we want binnings bottom to top
heights.reverse()
# Total figure size
total_width = np.sum(widths)
total_height = np.sum(heights)
scale = 4.0 / min(max(total_width, total_height), 4.0)
# Room for the 1D histograms
if total_width == 0.0:
widths.append(6 / scale)
else:
widths.append(1.5 / scale)
if total_height == 0.0:
heights.insert(0, 4 / scale)
else:
heights.insert(0, 1.5 / scale)
# Update total sizes
total_width = np.sum(widths)
total_height = np.sum(heights)
fig_x = total_width * scale
fig_y = total_height * scale
# Subplot spacing is specified as multiple of average axis size
# We want it to be relative to the 1D projections
wspace = 0.1 * widths[-1] / (total_width / len(widths))
hspace = 0.1 * heights[0] / (total_height / len(heights))
figax = self.subplots(
nrows=n_row,
ncols=n_col,
sharex="col",
sharey="row",
figsize=(fig_x, fig_y),
gridspec_kw={
"width_ratios": widths,
"height_ratios": heights,
"wspace": wspace,
"hspace": hspace,
},
squeeze=False,
)
color = kwargs.get("color", next(self.color))
hatch = kwargs.get("hatch", next(self.hatch))
# 2D histograms
for x, i in enumerate(self.x_axis_binnings):
for y, j in enumerate(self.y_axis_binnings):
# Get axis to plot in
ax = figax[1][-y - 1, x] # rows are counted top to bottom
# Project array
axis = list(range(arrays.ndim - 1)) # -1 because of stack axis 0
for k in sorted((i, j), reverse=True):
del axis[k]
axis = tuple(x + 1 for x in axis) # +1 because of stack axis 0
data = np.asfarray(margin_function(arrays, axis=axis))
# 2D plots only show upper limit of stack
data = upper(data, axis=0)
# Flip axes if necessary
if i < j:
data = data.T
# Bin edges
x_edg = self.get_bin_edges(0, data.shape[1], i)
y_edg = self.get_bin_edges(0, data.shape[0], j)
# Plot the data
if scatter >= 0:
# Draw a set of random points and plot these
# Get bin numbers
csum = np.asfarray(data.cumsum())
csum /= np.max(csum)
indices = np.digitize(np.random.uniform(size=scatter), csum)
# Get x and y bin numbers
x_indices = indices % data.shape[1]
y_indices = indices // data.shape[1]
# Throw X and Y for each event
x = []
y = []
for ix, iy in zip(x_indices, y_indices):
x_min = x_edg[ix]
x_max = x_edg[ix + 1]
y_min = y_edg[iy]
y_max = y_edg[iy + 1]
x.append(np.random.uniform(x_min, x_max))
y.append(np.random.uniform(y_min, y_max))
# Plot the points
if data.sum() > 0:
# Only actually draw something if we have some events
ax.scatter(x, y, 1, color=color, marker=",")
else:
# Plot a regular 2D histogram
# Bin centres
x = np.convolve(x_edg, np.ones(2) / 2, mode="valid")
y = np.convolve(y_edg, np.ones(2) / 2, mode="valid")
xx = np.broadcast_to(x, (len(y), len(x))).flatten()
yy = np.repeat(y, len(x))
# Plot it
if data.sum() == 0:
# Empty data messes with the normalisation
data.fill(0.001)
ax.hist2d(
xx, yy, weights=data.flat, bins=(x_edg, y_edg), density=density
)
# 1D vertical histograms
for x, i in enumerate(self.x_axis_binnings):
# Get axis to plot in
ax = figax[1][0, x]
# Project array
axis = list(range(arrays.ndim - 1)) # -1 because of stack axis 0
del axis[i]
axis = tuple(x + 1 for x in axis) # +1 because of stack axis 0
data = np.asfarray(margin_function(arrays, axis=axis))
# Upper and lower limit of area
data_hi = upper(data, axis=0)
data_lo = lower(data, axis=0)
# Divide by relative bin widths
bins = np.asfarray(self.get_bin_edges(0, data.shape[1], i))
if density:
total_width = bins[-1] - bins[0]
rel_widths = (bins[1:] - bins[:-1]) / total_width
data_hi /= rel_widths
data_lo /= rel_widths
# Plot the data
args = {
"step": "post",
"edgecolor": color,
"hatch": hatch,
"facecolor": "none",
}
args.update(kwargs)
data_lo = np.append(data_lo, data_lo[-1])
data_hi = np.append(data_hi, data_hi[-1])
poly = ax.fill_between(bins, data_hi, data_lo, **args)
# Add sticky y edge so histograms get plotted more beautifully
poly.sticky_edges.y.append(np.min(data_lo))
ax.autoscale_view()
# Only int tick label
ax.get_xaxis().set_major_locator(ticker.MaxNLocator(integer=True))
# Add labels at the appropriate axes
ax = figax[1][-1, x]
ax.set_xlabel(self.get_axis_label(i))
# 1D horizontal histograms
for y, i in enumerate(self.y_axis_binnings):
# Get axis to plot in
ax = figax[1][-y - 1, -1] # Rows are counted top to bottom
# Project array
axis = list(range(arrays.ndim - 1)) # -1 because of stack axis 0
del axis[i]
axis = tuple(x + 1 for x in axis) # +1 because of stack axis 0
data = np.asfarray(margin_function(arrays, axis=axis))
# Upper and lower limit of area
data_hi = upper(data, axis=0)
data_lo = lower(data, axis=0)
# Divide by relative bin widths
bins = np.asfarray(self.get_bin_edges(0, data.shape[1], i))
if density:
total_width = bins[-1] - bins[0]
rel_widths = (bins[1:] - bins[:-1]) / total_width
data_hi /= rel_widths
data_lo /= rel_widths
# Plot the data
args = {
"step": "post",
"edgecolor": color,
"hatch": hatch,
"facecolor": "none",
}
args.update(kwargs)
data_lo = np.append(data_lo, data_lo[-1])
data_hi = np.append(data_hi, data_hi[-1])
poly = ax.fill_betweenx(bins, data_hi, data_lo, **args)
# Add sticky x edge so histograms get plotted more beautifully
poly.sticky_edges.x.append(np.min(data_lo))
ax.autoscale_view()
# Only int tick label
ax.get_yaxis().set_major_locator(ticker.MaxNLocator(integer=True))
# Add labels at the appropriate axes
ax = figax[1][-y - 1, 0] # Rows are counted top to bottom
ax.set_ylabel(self.get_axis_label(i))
# Hide empty axes
figax[1][0, -1].set_axis_off()
def legend(self, **kwargs):
"""Draw a legend in the upper right corner of the plot."""
handles, labels = self.figax[1][0, 0].get_legend_handles_labels()
args = {
"loc": "center",
"borderaxespad": 0.0,
"frameon": False,
}
args.update(kwargs)
self.figax[1][0, -1].legend(handles, labels, **args)
class LinearBinningPlotter(BinningPlotter):
"""Plotting class for :class:`.LinearBinning`
Parameters
----------
binning : LinearBinning
The binning to be plottet
**kwargs : optional
Additional keyword arguments are passed to :class:`BinningPlotter`.
Notes
-----
This plotter does always marginalize the subbinnings.
See also
--------
BinningPlotter
.LinearBinning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : LinearBinning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
"""
def __init__(self, binning, **kwargs):
kwargs["marginalize_subbinnings"] = True
args = {
"bins_per_row": -1,
}
args.update(kwargs)
BinningPlotter.__init__(self, binning, **args)
def plot_array(self, *args, **kwargs):
"""Plot an array.
See :meth:`ArrayPlotter.plot_array`.
"""
# Change default behaviour of `density`
kwargs["density"] = kwargs.get("density", True)
return ArrayPlotter.plot_array(self, *args, **kwargs)
def get_bin_edges(self, i_min, i_max):
"""Get the finite bin edges."""
bins = self.binning.bin_edges[i_min : i_max + 1]
ret = list(bins)
if not np.isfinite(ret[0]):
if len(ret) >= 3 and np.isfinite(ret[2]):
ret[0] = ret[1] - (ret[2] - ret[1])
elif np.isfinite(ret[1]):
ret[0] = ret[1] - 1
else:
ret[0] = -0.5
if not np.isfinite(ret[-1]):
if len(ret) >= 3 and np.isfinite(ret[-3]):
ret[-1] = ret[-2] + (ret[-2] - ret[-3])
else:
ret[-1] = ret[-2] + 1
return np.array(ret)
def get_axis_label(self):
"""Return variable name."""
return self.binning.variable
class RectilinearBinningPlotter(CartesianProductBinningPlotter):
"""Plotting class for :class:`.RectilinearBinning`
Parameters
----------
binning : RectilinearBinning
The binning to be plottet
x_axis_binnings : list of int/str, optional
The indices of binnings to be plotted on the x-axis.
y_axis_binnings : list of int/str, optional
The indices of binnings to be plotted on the y-axis.
**kwargs : optional
Additional keyword arguments are passed to :class:`CartesianProductBinningPlotter`.
Notes
-----
This plotter does always marginalize the subbinnings.
See also
--------
CartesianProductBinningPlotter
.RectilinearBinning
Attributes
----------
figax : (Figure, [[Axes, ...], ...])
The figure and axes that are used for the plotting.
color : cycle of str
Cycler that determines the color of plotting commands.
hatch : cycle of str
Cycler that determines the hatching style of plotting commands.
binning : RectilinearBinning
The binning defining what will be plotted.
marginalize_subbinnings : bool
Whether or not subbinnings will be marginalized before plotting.
x_axis_binnings : list of int or str
The indices or variable names of to be plotted on the x-axis.
y_axis_binnings : list of int or str
The indices or variable names to be plotted on the y-axis.
"""
def __init__(self, binning, x_axis_binnings=None, y_axis_binnings=None, **kwargs):
if x_axis_binnings is None:
x_axis_binnings = list(range(int(np.ceil(len(binning.binnings) / 2.0))))
else:
x_axis_binnings = map(binning.get_variable_index, x_axis_binnings)
if y_axis_binnings is None:
y_axis_binnings = list(
range(int(np.ceil(len(binning.binnings) / 2.0)), len(binning.binnings))
)
else:
y_axis_binnings = map(binning.get_variable_index, y_axis_binnings)
kwargs["x_axis_binnings"] = x_axis_binnings
kwargs["y_axis_binnings"] = y_axis_binnings
kwargs["marginalize_subbinnings"] = True
kwargs["bins_per_row"] = -1
CartesianProductBinningPlotter.__init__(self, binning, **kwargs)
def get_bin_edges(self, i_min, i_max, j_binning):
"""Get the finite bin edges."""
bins = self.binning.binnings[j_binning].bin_edges[i_min : i_max + 1]
ret = list(bins)
if not np.isfinite(ret[0]):
if len(ret) >= 3 and np.isfinite(ret[2]):
ret[0] = ret[1] - (ret[2] - ret[1])
elif np.isfinite(ret[1]):
ret[0] = ret[1] - 1
else:
ret[0] = -0.5
if not np.isfinite(ret[-1]):
if len(ret) >= 3 and np.isfinite(ret[-3]):
ret[-1] = ret[-2] + (ret[-2] - ret[-3])
else:
ret[-1] = ret[-2] + 1
return np.array(ret)
def get_axis_label(self, j_binning):
"""Return variable name."""
return self.binning.binnings[j_binning].variable
| 3.515625
| 4
|