max_stars_repo_path
stringlengths 3
269
| max_stars_repo_name
stringlengths 4
119
| max_stars_count
int64 0
191k
| id
stringlengths 1
7
| content
stringlengths 6
1.05M
| score
float64 0.23
5.13
| int_score
int64 0
5
|
|---|---|---|---|---|---|---|
python/ray/dataframe/dataframe.py
|
cnheider/ray
| 0
|
12778351
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pandas as pd
from pandas.api.types import is_scalar
from pandas.util._validators import validate_bool_kwarg
from pandas.core.index import _ensure_index_from_sequences
from pandas._libs import lib
from pandas.core.dtypes.cast import maybe_upcast_putmask
from pandas.compat import lzip
from pandas.core.dtypes.common import (
is_bool_dtype,
is_numeric_dtype,
is_timedelta64_dtype)
import warnings
import numpy as np
import ray
import itertools
class DataFrame(object):
def __init__(self, df, columns, index=None):
"""Distributed DataFrame object backed by Pandas dataframes.
Args:
df ([ObjectID]): The list of ObjectIDs that contain the dataframe
partitions.
columns (pandas.Index): The column names for this dataframe, in
pandas Index object.
index (pandas.Index or list): The row index for this dataframe.
"""
assert(len(df) > 0)
self._df = df
self.columns = columns
# this _index object is a pd.DataFrame
# and we use that DataFrame's Index to index the rows.
self._lengths, self._index = _compute_length_and_index.remote(self._df)
if index is not None:
self.index = index
def __str__(self):
return repr(self)
def __repr__(self):
if sum(self._lengths) < 40:
result = repr(to_pandas(self))
return result
head = repr(to_pandas(self.head(20)))
tail = repr(to_pandas(self.tail(20)))
result = head + "\n...\n" + tail
return result
def _get_index(self):
"""Get the index for this DataFrame.
Returns:
The union of all indexes across the partitions.
"""
return self._index.index
def _set_index(self, new_index):
"""Set the index for this DataFrame.
Args:
new_index: The new index to set this
"""
self._index.index = new_index
index = property(_get_index, _set_index)
def _get__index(self):
"""Get the _index for this DataFrame.
Returns:
The default index.
"""
if isinstance(self._index_cache, ray.local_scheduler.ObjectID):
self._index_cache = ray.get(self._index_cache)
return self._index_cache
def _set__index(self, new__index):
"""Set the _index for this DataFrame.
Args:
new__index: The new default index to set.
"""
self._index_cache = new__index
_index = property(_get__index, _set__index)
def _compute_lengths(self):
"""Updates the stored lengths of DataFrame partions
"""
self._lengths = [_deploy_func.remote(_get_lengths, d)
for d in self._df]
def _get_lengths(self):
"""Gets the lengths for each partition and caches it if it wasn't.
Returns:
A list of integers representing the length of each partition.
"""
if isinstance(self._length_cache, ray.local_scheduler.ObjectID):
self._length_cache = ray.get(self._length_cache)
elif isinstance(self._length_cache, list) and \
isinstance(self._length_cache[0],
ray.local_scheduler.ObjectID):
self._length_cache = ray.get(self._length_cache)
return self._length_cache
def _set_lengths(self, lengths):
"""Sets the lengths of each partition for this DataFrame.
We use this because we can compute it when creating the DataFrame.
Args:
lengths ([ObjectID or Int]): A list of lengths for each
partition, in order.
"""
self._length_cache = lengths
_lengths = property(_get_lengths, _set_lengths)
@property
def size(self):
"""Get the number of elements in the DataFrame.
Returns:
The number of elements in the DataFrame.
"""
return len(self.index) * len(self.columns)
@property
def ndim(self):
"""Get the number of dimensions for this DataFrame.
Returns:
The number of dimensions for this DataFrame.
"""
# The number of dimensions is common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.ndim, self._df[0]))
@property
def ftypes(self):
"""Get the ftypes for this DataFrame.
Returns:
The ftypes for this DataFrame.
"""
# The ftypes are common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.ftypes, self._df[0]))
@property
def dtypes(self):
"""Get the dtypes for this DataFrame.
Returns:
The dtypes for this DataFrame.
"""
# The dtypes are common across all partitions.
# The first partition will be enough.
return ray.get(_deploy_func.remote(lambda df: df.dtypes, self._df[0]))
@property
def empty(self):
"""Determines if the DataFrame is empty.
Returns:
True if the DataFrame is empty.
False otherwise.
"""
all_empty = ray.get(self._map_partitions(lambda df: df.empty)._df)
return False not in all_empty
@property
def values(self):
"""Create a numpy array with the values from this DataFrame.
Returns:
The numpy representation of this DataFrame.
"""
return np.concatenate(
ray.get(self._map_partitions(lambda df: df.values)._df))
@property
def axes(self):
"""Get the axes for the DataFrame.
Returns:
The axes for the DataFrame.
"""
return [self.index, self.columns]
@property
def shape(self):
"""Get the size of each of the dimensions in the DataFrame.
Returns:
A tuple with the size of each dimension as they appear in axes().
"""
return (len(self.index), len(self.columns))
def _map_partitions(self, func, index=None):
"""Apply a function on each partition.
Args:
func (callable): The function to Apply.
Returns:
A new DataFrame containing the result of the function.
"""
assert(callable(func))
new_df = [_deploy_func.remote(func, part) for part in self._df]
if index is None:
index = self.index
return DataFrame(new_df, self.columns, index=index)
def _update_inplace(self, df=None, columns=None, index=None):
"""Updates the current DataFrame inplace
"""
assert(len(df) > 0)
if df:
self._df = df
if columns:
self.columns = columns
if index:
self.index = index
self._lengths, self._index = _compute_length_and_index.remote(self._df)
def add_prefix(self, prefix):
"""Add a prefix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(prefix) + str(x))
return DataFrame(self._df, new_cols, index=self.index)
def add_suffix(self, suffix):
"""Add a suffix to each of the column names.
Returns:
A new DataFrame containing the new column names.
"""
new_cols = self.columns.map(lambda x: str(x) + str(suffix))
return DataFrame(self._df, new_cols, index=self.index)
def applymap(self, func):
"""Apply a function to a DataFrame elementwise.
Args:
func (callable): The function to apply.
"""
assert(callable(func))
return self._map_partitions(lambda df: df.applymap(lambda x: func(x)))
def copy(self, deep=True):
"""Creates a shallow copy of the DataFrame.
Returns:
A new DataFrame pointing to the same partitions as this one.
"""
return DataFrame(self._df, self.columns, index=self.index)
def groupby(self, by=None, axis=0, level=None, as_index=True, sort=True,
group_keys=True, squeeze=False, **kwargs):
"""Apply a groupby to this DataFrame. See _groupby() remote task.
Args:
by: The value to groupby.
axis: The axis to groupby.
level: The level of the groupby.
as_index: Whether or not to store result as index.
group_keys: Whether or not to group the keys.
squeeze: Whether or not to squeeze.
Returns:
A new DataFrame resulting from the groupby.
"""
indices = self.index.unique()
chunksize = int(len(indices) / len(self._df))
partitions = [_shuffle.remote(df, indices, chunksize)
for df in self._df]
partitions = ray.get(partitions)
# Transpose the list of dataframes
# TODO find a better way
shuffle = []
for i in range(len(partitions[0])):
shuffle.append([])
for j in range(len(partitions)):
shuffle[i].append(partitions[j][i])
new_dfs = [_local_groupby.remote(part, axis=axis) for part in shuffle]
return DataFrame(new_dfs, self.columns, index=indices)
def reduce_by_index(self, func, axis=0):
"""Perform a reduction based on the row index.
Args:
func (callable): The function to call on the partition
after the groupby.
Returns:
A new DataFrame with the result of the reduction.
"""
return self.groupby(axis=axis)._map_partitions(
func, index=pd.unique(self.index))
def sum(self, axis=None, skipna=True, level=None, numeric_only=None):
"""Perform a sum across the DataFrame.
Args:
axis (int): The axis to sum on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The sum of the DataFrame.
"""
intermediate_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
sum_of_partitions = self._map_partitions(
lambda df: df.sum(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only),
index=intermediate_index)
return sum_of_partitions.reduce_by_index(
lambda df: df.sum(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only))
def abs(self):
"""Apply an absolute value function to all numberic columns.
Returns:
A new DataFrame with the applied absolute value.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
return self._map_partitions(lambda df: df.abs())
def isin(self, values):
"""Fill a DataFrame with booleans for cells contained in values.
Args:
values (iterable, DataFrame, Series, or dict): The values to find.
Returns:
A new DataFrame with booleans representing whether or not a cell
is in values.
True: cell is contained in values.
False: otherwise
"""
return self._map_partitions(lambda df: df.isin(values))
def isna(self):
"""Fill a DataFrame with booleans for cells containing NA.
Returns:
A new DataFrame with booleans representing whether or not a cell
is NA.
True: cell contains NA.
False: otherwise.
"""
return self._map_partitions(lambda df: df.isna())
def isnull(self):
"""Fill a DataFrame with booleans for cells containing a null value.
Returns:
A new DataFrame with booleans representing whether or not a cell
is null.
True: cell contains null.
False: otherwise.
"""
return self._map_partitions(lambda df: df.isnull)
def keys(self):
"""Get the info axis for the DataFrame.
Returns:
A pandas Index for this DataFrame.
"""
# Each partition should have the same index, so we'll use 0's
return self.columns
def transpose(self, *args, **kwargs):
"""Transpose columns and rows for the DataFrame.
Note: Triggers a shuffle.
Returns:
A new DataFrame transposed from this DataFrame.
"""
temp_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
temp_columns = self.index
local_transpose = self._map_partitions(
lambda df: df.transpose(*args, **kwargs), index=temp_index)
local_transpose.columns = temp_columns
# Sum will collapse the NAs from the groupby
df = local_transpose.reduce_by_index(
lambda df: df.apply(lambda x: x), axis=1)
# Reassign the columns within partition to self.index.
# We have to use _depoly_func instead of _map_partition due to
# new_labels argument
def _reassign_columns(df, new_labels):
df.columns = new_labels
return df
df._df = [
_deploy_func.remote(
_reassign_columns,
part,
self.index) for part in df._df]
return df
T = property(transpose)
def dropna(self, axis, how, thresh=None, subset=[], inplace=False):
"""Create a new DataFrame from the removed NA values from this one.
Args:
axis (int, tuple, or list): The axis to apply the drop.
how (str): How to drop the NA values.
'all': drop the label if all values are NA.
'any': drop the label if any values are NA.
thresh (int): The minimum number of NAs to require.
subset ([label]): Labels to consider from other axis.
inplace (bool): Change this DataFrame or return a new DataFrame.
True: Modify the data for this DataFrame, return None.
False: Create a new DataFrame and return it.
Returns:
If inplace is set to True, returns None, otherwise returns a new
DataFrame with the dropna applied.
"""
raise NotImplementedError("Not yet")
if how != 'any' and how != 'all':
raise ValueError("<how> not correctly set.")
def add(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def agg(self, func, axis=0, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def aggregate(self, func, axis=0, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def align(self, other, join='outer', axis=None, level=None, copy=True,
fill_value=None, method=None, limit=None, fill_axis=0,
broadcast_axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def all(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is None or axis == 0:
df = self.T
axis = 1
else:
df = self
mapped = df._map_partitions(lambda df: df.all(axis,
bool_only,
skipna,
level,
**kwargs))
return to_pandas(mapped)
def any(self, axis=None, bool_only=None, skipna=None, level=None,
**kwargs):
"""Return whether all elements are True over requested axis
Note:
If axis=None or axis=0, this call applies df.all(axis=1)
to the transpose of df.
"""
if axis is None or axis == 0:
df = self.T
axis = 1
else:
df = self
mapped = df._map_partitions(lambda df: df.any(axis,
bool_only,
skipna,
level,
**kwargs))
return to_pandas(mapped)
def append(self, other, ignore_index=False, verify_integrity=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def apply(self, func, axis=0, broadcast=False, raw=False, reduce=None,
args=(), **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_blocks(self, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def as_matrix(self, columns=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asfreq(self, freq, method=None, how=None, normalize=False,
fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def asof(self, where, subset=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def assign(self, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def astype(self, dtype, copy=True, errors='raise', **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def at_time(self, time, asof=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def between_time(self, start_time, end_time, include_start=True,
include_end=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def bfill(self, axis=None, inplace=False, limit=None, downcast=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def bool(self):
"""Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
shape = self.shape
if shape != (1,) and shape != (1, 1):
raise ValueError("""The PandasObject does not have exactly
1 element. Return the bool of a single
element PandasObject. The truth value is
ambiguous. Use a.empty, a.item(), a.any()
or a.all().""")
else:
return to_pandas(self).bool()
def boxplot(self, column=None, by=None, ax=None, fontsize=None, rot=0,
grid=True, figsize=None, layout=None, return_type=None,
**kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip(self, lower=None, upper=None, axis=None, inplace=False, *args,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_lower(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def clip_upper(self, threshold, axis=None, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine(self, other, func, fill_value=None, overwrite=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def combine_first(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def compound(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def consolidate(self, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def convert_objects(self, convert_dates=True, convert_numeric=False,
convert_timedeltas=True, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corr(self, method='pearson', min_periods=1):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def corrwith(self, other, axis=0, drop=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def count(self, axis=0, level=None, numeric_only=False):
if axis == 1:
return self.T.count(axis=0,
level=level,
numeric_only=numeric_only)
else:
temp_index = [idx
for _ in range(len(self._df))
for idx in self.columns]
collapsed_df = sum(
ray.get(
self._map_partitions(
lambda df: df.count(
axis=axis,
level=level,
numeric_only=numeric_only),
index=temp_index)._df))
return collapsed_df
def cov(self, min_periods=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cummax(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cummin(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cumprod(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def cumsum(self, axis=None, skipna=True, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def describe(self, percentiles=None, include=None, exclude=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def diff(self, periods=1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def div(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def divide(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def dot(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def drop(self, labels=None, axis=0, index=None, columns=None, level=None,
inplace=False, errors='raise'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def drop_duplicates(self, subset=None, keep='first', inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def duplicated(self, subset=None, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def eq(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def equals(self, other):
"""
Checks if other DataFrame is elementwise equal to the current one
Returns:
Boolean: True if equal, otherwise False
"""
def helper(df, index, other_series):
return df.iloc[index['index_within_partition']] \
.equals(other_series)
results = []
other_partition = None
other_df = None
for i, idx in other._index.iterrows():
if idx['partition'] != other_partition:
other_df = ray.get(other._df[idx['partition']])
other_partition = idx['partition']
# TODO: group series here into full df partitions to reduce
# the number of remote calls to helper
other_series = other_df.iloc[idx['index_within_partition']]
curr_index = self._index.iloc[i]
curr_df = self._df[int(curr_index['partition'])]
results.append(_deploy_func.remote(helper,
curr_df,
curr_index,
other_series))
for r in results:
if not ray.get(r):
return False
return True
def eval(self, expr, inplace=False, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ewm(self, com=None, span=None, halflife=None, alpha=None,
min_periods=0, freq=None, adjust=True, ignore_na=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def expanding(self, min_periods=1, freq=None, center=False, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ffill(self, axis=None, inplace=False, limit=None, downcast=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def fillna(self, value=None, method=None, axis=None, inplace=False,
limit=None, downcast=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def filter(self, items=None, like=None, regex=None, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def first_valid_index(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def floordiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_csv(self, path, header=0, sep=', ', index_col=0,
parse_dates=True, encoding=None, tupleize_cols=None,
infer_datetime_format=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_dict(self, data, orient='columns', dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_items(self, items, columns=None, orient='columns'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@classmethod
def from_records(self, data, index=None, exclude=None, columns=None,
coerce_float=False, nrows=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ge(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def get(self, key, default=None):
"""Get item from object for given key (DataFrame column, Panel
slice, etc.). Returns default value if not found.
Args:
key (DataFrame column, Panel slice) : the key for which value
to get
Returns:
value (type of items contained in object) : A value that is
stored at the key
"""
temp_df = self._map_partitions(lambda df: df.get(key, default=default))
return to_pandas(temp_df)
def get_dtype_counts(self):
"""Get the counts of dtypes in this object.
Returns:
The counts of dtypes in this object.
"""
return ray.get(
_deploy_func.remote(
lambda df: df.get_dtype_counts(), self._df[0]
)
)
def get_ftype_counts(self):
"""Get the counts of ftypes in this object.
Returns:
The counts of ftypes in this object.
"""
return ray.get(
_deploy_func.remote(
lambda df: df.get_ftype_counts(), self._df[0]
)
)
def get_value(self, index, col, takeable=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def get_values(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def gt(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def head(self, n=5):
"""Get the first n rows of the dataframe.
Args:
n (int): The number of rows to return.
Returns:
A new dataframe with the first n rows of the dataframe.
"""
sizes = self._lengths
if n >= sum(sizes):
return self
cumulative = np.cumsum(np.array(sizes))
new_dfs = [self._df[i]
for i in range(len(cumulative))
if cumulative[i] < n]
last_index = len(new_dfs)
# this happens when we only need from the first partition
if last_index == 0:
num_to_transfer = n
else:
num_to_transfer = n - cumulative[last_index - 1]
new_dfs.append(_deploy_func.remote(lambda df: df.head(num_to_transfer),
self._df[last_index]))
index = self._index.head(n).index
return DataFrame(new_dfs, self.columns, index=index)
def hist(self, data, column=None, by=None, grid=True, xlabelsize=None,
xrot=None, ylabelsize=None, yrot=None, ax=None, sharex=False,
sharey=False, figsize=None, layout=None, bins=10, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def idxmax(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the max value of the axis.
Args:
axis (int): Identify the max over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each maximum value for the axis
specified.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
if axis == 1:
return to_pandas(self._map_partitions(
lambda df: df.idxmax(axis=axis, skipna=skipna)))
else:
return self.T.idxmax(axis=1, skipna=skipna)
def idxmin(self, axis=0, skipna=True):
"""Get the index of the first occurrence of the min value of the axis.
Args:
axis (int): Identify the min over the rows (1) or columns (0).
skipna (bool): Whether or not to skip NA values.
Returns:
A Series with the index for each minimum value for the axis
specified.
"""
for t in self.dtypes:
if np.dtype('O') == t:
# TODO Give a more accurate error to Pandas
raise TypeError("bad operand type for abs():", "str")
if axis == 1:
return to_pandas(self._map_partitions(
lambda df: df.idxmin(axis=axis, skipna=skipna)))
else:
return self.T.idxmin(axis=1, skipna=skipna)
def infer_objects(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def info(self, verbose=None, buf=None, max_cols=None, memory_usage=None,
null_counts=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def insert(self, loc, column, value, allow_duplicates=False):
"""Insert column into DataFrame at specified location.
Args:
loc (int): Insertion index. Must verify 0 <= loc <= len(columns).
column (hashable object): Label of the inserted column.
value (int, Series, or array-like): The values to insert.
allow_duplicates (bool): Whether to allow duplicate column names.
"""
try:
len(value)
except TypeError:
value = [value for _ in range(len(self.index))]
if len(value) != len(self.index):
raise ValueError(
"Column length provided does not match DataFrame length.")
if loc < 0 or loc > len(self.columns):
raise ValueError(
"Location provided must be higher than 0 and lower than the "
"number of columns.")
if not allow_duplicates and column in self.columns:
raise ValueError(
"Column {} already exists in DataFrame.".format(column))
cumulative = np.cumsum(self._lengths)
partitions = [value[cumulative[i-1]:cumulative[i]]
for i in range(len(cumulative))
if i != 0]
partitions.insert(0, value[:cumulative[0]])
# Because insert is always inplace, we have to create this temp fn.
def _insert(_df, _loc, _column, _part, _allow_duplicates):
_df.insert(_loc, _column, _part, _allow_duplicates)
return _df
self._df = \
[_deploy_func.remote(_insert,
self._df[i],
loc,
column,
partitions[i],
allow_duplicates)
for i in range(len(self._df))]
self.columns = self.columns.insert(loc, column)
def interpolate(self, method='linear', axis=0, limit=None, inplace=False,
limit_direction='forward', downcast=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def iterrows(self):
"""Iterate over DataFrame rows as (index, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the rows of the frame.
"""
iters = ray.get([
_deploy_func.remote(
lambda df: list(df.iterrows()), part) for part in self._df])
iters = itertools.chain.from_iterable(iters)
series = map(lambda idx_series_tuple: idx_series_tuple[1], iters)
return zip(self.index, series)
def items(self):
"""Iterator over (column name, Series) pairs.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A generator that iterates over the columns of the frame.
"""
iters = ray.get([_deploy_func.remote(
lambda df: list(df.items()), part) for part in self._df])
def concat_iters(iterables):
for partitions in zip(*iterables):
series = pd.concat([_series for _, _series in partitions])
series.index = self.index
yield (series.name, series)
return concat_iters(iters)
def iteritems(self):
"""Iterator over (column name, Series) pairs.
Note:
Returns the same thing as .items()
Returns:
A generator that iterates over the columns of the frame.
"""
return self.items()
def itertuples(self, index=True, name='Pandas'):
"""Iterate over DataFrame rows as namedtuples.
Args:
index (boolean, default True): If True, return the index as the
first element of the tuple.
name (string, default "Pandas"): The name of the returned
namedtuples or None to return regular tuples.
Note:
Generators can't be pickeled so from the remote function
we expand the generator into a list before getting it.
This is not that ideal.
Returns:
A tuple representing row data. See args for varying tuples.
"""
iters = ray.get([
_deploy_func.remote(
lambda df: list(df.itertuples(index=index, name=name)),
part) for part in self._df])
iters = itertools.chain.from_iterable(iters)
def _replace_index(row_tuple, idx):
# We need to use try-except here because
# isinstance(row_tuple, namedtuple) won't work.
try:
row_tuple = row_tuple._replace(Index=idx)
except AttributeError: # Tuple not namedtuple
row_tuple = (idx,) + row_tuple[1:]
return row_tuple
if index:
iters = itertools.starmap(_replace_index, zip(iters, self.index))
return iters
def join(self, other, on=None, how='left', lsuffix='', rsuffix='',
sort=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def kurt(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def kurtosis(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def last(self, offset):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def last_valid_index(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def le(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def lookup(self, row_labels, col_labels):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def lt(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mad(self, axis=None, skipna=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mask(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def max(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Perform max across the DataFrame.
Args:
axis (int): The axis to take the max on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The max of the DataFrame.
"""
if(axis == 1):
return self._map_partitions(
lambda df: df.max(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs))
else:
return self.T.max(axis=1, skipna=None, level=None,
numeric_only=None, **kwargs)
def mean(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def median(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def melt(self, id_vars=None, value_vars=None, var_name=None,
value_name='value', col_level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def memory_usage(self, index=True, deep=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def merge(self, right, how='inner', on=None, left_on=None, right_on=None,
left_index=False, right_index=False, sort=False,
suffixes=('_x', '_y'), copy=True, indicator=False,
validate=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def min(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
"""Perform min across the DataFrame.
Args:
axis (int): The axis to take the min on.
skipna (bool): True to skip NA values, false otherwise.
Returns:
The min of the DataFrame.
"""
if(axis == 1):
return self._map_partitions(
lambda df: df.min(axis=axis, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs))
else:
return self.T.min(axis=1, skipna=skipna, level=level,
numeric_only=numeric_only, **kwargs)
def mod(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mode(self, axis=0, numeric_only=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def mul(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def multiply(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ne(self, other, axis='columns', level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def nlargest(self, n, columns, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def notna(self):
"""Perform notna across the DataFrame.
Args:
None
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return self._map_partitions(lambda df: df.notna())
def notnull(self):
"""Perform notnull across the DataFrame.
Args:
None
Returns:
Boolean DataFrame where value is False if corresponding
value is NaN, True otherwise
"""
return self._map_partitions(lambda df: df.notnull())
def nsmallest(self, n, columns, keep='first'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def nunique(self, axis=0, dropna=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pct_change(self, periods=1, fill_method='pad', limit=None, freq=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pipe(self, func, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pivot(self, index=None, columns=None, values=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pivot_table(self, values=None, index=None, columns=None,
aggfunc='mean', fill_value=None, margins=False,
dropna=True, margins_name='All'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def plot(self, x=None, y=None, kind='line', ax=None, subplots=False,
sharex=None, sharey=False, layout=None, figsize=None,
use_index=True, title=None, grid=None, legend=True, style=None,
logx=False, logy=False, loglog=False, xticks=None, yticks=None,
xlim=None, ylim=None, rot=None, fontsize=None, colormap=None,
table=False, yerr=None, xerr=None, secondary_y=False,
sort_columns=False, **kwds):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def pop(self, item):
"""Pops an item from this DataFrame and returns it.
Args:
item (str): Column label to be popped
Returns:
A Series containing the popped values. Also modifies this
DataFrame.
"""
popped = to_pandas(self._map_partitions(
lambda df: df.pop(item)))
self._df = self._map_partitions(lambda df: df.drop([item], axis=1))._df
self.columns = self.columns.drop(item)
return popped
def pow(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def prod(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def product(self, axis=None, skipna=None, level=None, numeric_only=None,
min_count=0, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def quantile(self, q=0.5, axis=0, numeric_only=True,
interpolation='linear'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def query(self, expr, inplace=False, **kwargs):
"""Queries the Dataframe with a boolean expression
Returns:
A new DataFrame if inplace=False
"""
new_dfs = [_deploy_func.remote(lambda df: df.query(expr, **kwargs),
part) for part in self._df]
if inplace:
self._update_inplace(new_dfs)
else:
return DataFrame(new_dfs, self.columns)
def radd(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rank(self, axis=0, method='average', numeric_only=None,
na_option='keep', ascending=True, pct=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rdiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex(self, labels=None, index=None, columns=None, axis=None,
method=None, copy=True, level=None, fill_value=np.nan,
limit=None, tolerance=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex_axis(self, labels, axis=0, method=None, level=None, copy=True,
limit=None, fill_value=np.nan):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reindex_like(self, other, method=None, copy=True, limit=None,
tolerance=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rename(self, mapper=None, index=None, columns=None, axis=None,
copy=True, inplace=False, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rename_axis(self, mapper, axis=0, copy=True, inplace=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reorder_levels(self, order, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def replace(self, to_replace=None, value=None, inplace=False, limit=None,
regex=False, method='pad', axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def resample(self, rule, how=None, axis=0, fill_method=None, closed=None,
label=None, convention='start', kind=None, loffset=None,
limit=None, base=0, on=None, level=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def reset_index(self, level=None, drop=False, inplace=False, col_level=0,
col_fill=''):
"""Reset this index to default and create column from current index.
Args:
level: Only remove the given levels from the index. Removes all
levels by default
drop: Do not try to insert index into dataframe columns. This
resets the index to the default integer index.
inplace: Modify the DataFrame in place (do not create a new object)
col_level : If the columns have multiple levels, determines which
level the labels are inserted into. By default it is inserted
into the first level.
col_fill: If the columns have multiple levels, determines how the
other levels are named. If None then the index name is
repeated.
Returns:
A new DataFrame if inplace is False, None otherwise.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if inplace:
new_obj = self
else:
new_obj = self.copy()
def _maybe_casted_values(index, labels=None):
if isinstance(index, pd.PeriodIndex):
values = index.asobject.values
elif isinstance(index, pd.DatetimeIndex) and index.tz is not None:
values = index
else:
values = index.values
if values.dtype == np.object_:
values = lib.maybe_convert_objects(values)
# if we have the labels, extract the values with a mask
if labels is not None:
mask = labels == -1
# we can have situations where the whole mask is -1,
# meaning there is nothing found in labels, so make all nan's
if mask.all():
values = np.empty(len(mask))
values.fill(np.nan)
else:
values = values.take(labels)
if mask.any():
values, changed = maybe_upcast_putmask(
values, mask, np.nan)
return values
_, new_index = _compute_length_and_index.remote(new_obj._df)
new_index = ray.get(new_index).index
if level is not None:
if not isinstance(level, (tuple, list)):
level = [level]
level = [self.index._get_level_number(lev) for lev in level]
if isinstance(self.index, pd.MultiIndex):
if len(level) < self.index.nlevels:
new_index = self.index.droplevel(level)
if not drop:
if isinstance(self.index, pd.MultiIndex):
names = [n if n is not None else ('level_%d' % i)
for (i, n) in enumerate(self.index.names)]
to_insert = lzip(self.index.levels, self.index.labels)
else:
default = 'index' if 'index' not in self else 'level_0'
names = ([default] if self.index.name is None
else [self.index.name])
to_insert = ((self.index, None),)
multi_col = isinstance(self.columns, pd.MultiIndex)
for i, (lev, lab) in reversed(list(enumerate(to_insert))):
if not (level is None or i in level):
continue
name = names[i]
if multi_col:
col_name = (list(name) if isinstance(name, tuple)
else [name])
if col_fill is None:
if len(col_name) not in (1, self.columns.nlevels):
raise ValueError("col_fill=None is incompatible "
"with incomplete column name "
"{}".format(name))
col_fill = col_name[0]
lev_num = self.columns._get_level_number(col_level)
name_lst = [col_fill] * lev_num + col_name
missing = self.columns.nlevels - len(name_lst)
name_lst += [col_fill] * missing
name = tuple(name_lst)
# to ndarray and maybe infer different dtype
level_values = _maybe_casted_values(lev, lab)
new_obj.insert(0, name, level_values)
new_obj.index = new_index
if not inplace:
return new_obj
def rfloordiv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rmod(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rmul(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rolling(self, window, min_periods=None, freq=None, center=False,
win_type=None, on=None, axis=0, closed=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def round(self, decimals=0, *args, **kwargs):
return self._map_partitions(lambda df: df.round(decimals=decimals,
*args,
**kwargs))
def rpow(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rsub(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def rtruediv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sample(self, n=None, frac=None, replace=False, weights=None,
random_state=None, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def select(self, crit, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def select_dtypes(self, include=None, exclude=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sem(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def set_axis(self, labels, axis=0, inplace=None):
"""Assign desired index to given axis.
Args:
labels (pd.Index or list-like): The Index to assign.
axis (string or int): The axis to reassign.
inplace (bool): Whether to make these modifications inplace.
Returns:
If inplace is False, returns a new DataFrame, otherwise None.
"""
if is_scalar(labels):
warnings.warn(
'set_axis now takes "labels" as first argument, and '
'"axis" as named parameter. The old form, with "axis" as '
'first parameter and \"labels\" as second, is still supported '
'but will be deprecated in a future version of pandas.',
FutureWarning, stacklevel=2)
labels, axis = axis, labels
if inplace is None:
warnings.warn(
'set_axis currently defaults to operating inplace.\nThis '
'will change in a future version of pandas, use '
'inplace=True to avoid this warning.',
FutureWarning, stacklevel=2)
inplace = True
if inplace:
setattr(self, self._index._get_axis_name(axis), labels)
else:
obj = self.copy()
obj.set_axis(labels, axis=axis, inplace=True)
return obj
def set_index(self, keys, drop=True, append=False, inplace=False,
verify_integrity=False):
"""Set the DataFrame index using one or more existing columns.
Args:
keys: column label or list of column labels / arrays.
drop (boolean): Delete columns to be used as the new index.
append (boolean): Whether to append columns to existing index.
inplace (boolean): Modify the DataFrame in place.
verify_integrity (boolean): Check the new index for duplicates.
Otherwise defer the check until necessary. Setting to False
will improve the performance of this method
Returns:
If inplace is set to false returns a new DataFrame, otherwise None.
"""
inplace = validate_bool_kwarg(inplace, 'inplace')
if not isinstance(keys, list):
keys = [keys]
if inplace:
frame = self
else:
frame = self.copy()
arrays = []
names = []
if append:
names = [x for x in self.index.names]
if isinstance(self.index, pd.MultiIndex):
for i in range(self.index.nlevels):
arrays.append(self.index._get_level_values(i))
else:
arrays.append(self.index)
to_remove = []
for col in keys:
if isinstance(col, pd.MultiIndex):
# append all but the last column so we don't have to modify
# the end of this loop
for n in range(col.nlevels - 1):
arrays.append(col._get_level_values(n))
level = col._get_level_values(col.nlevels - 1)
names.extend(col.names)
elif isinstance(col, pd.Series):
level = col._values
names.append(col.name)
elif isinstance(col, pd.Index):
level = col
names.append(col.name)
elif isinstance(col, (list, np.ndarray, pd.Index)):
level = col
names.append(None)
else:
level = frame[col]._values
names.append(col)
if drop:
to_remove.append(col)
arrays.append(level)
index = _ensure_index_from_sequences(arrays, names)
if verify_integrity and not index.is_unique:
duplicates = index.get_duplicates()
raise ValueError('Index has duplicate keys: %s' % duplicates)
for c in to_remove:
del frame[c]
# clear up memory usage
index._cleanup()
frame.index = index
if not inplace:
return frame
def set_value(self, index, col, value, takeable=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def shift(self, periods=1, freq=None, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def skew(self, axis=None, skipna=None, level=None, numeric_only=None,
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def slice_shift(self, periods=1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sort_index(self, axis=0, level=None, ascending=True, inplace=False,
kind='quicksort', na_position='last', sort_remaining=True,
by=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sort_values(self, by, axis=0, ascending=True, inplace=False,
kind='quicksort', na_position='last'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sortlevel(self, level=0, axis=0, ascending=True, inplace=False,
sort_remaining=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def squeeze(self, axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def stack(self, level=-1, dropna=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def std(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def sub(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def subtract(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def swapaxes(self, axis1, axis2, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def swaplevel(self, i=-2, j=-1, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tail(self, n=5):
"""Get the last n rows of the dataframe.
Args:
n (int): The number of rows to return.
Returns:
A new dataframe with the last n rows of this dataframe.
"""
sizes = self._lengths
if n >= sum(sizes):
return self
cumulative = np.cumsum(np.array(sizes[::-1]))
reverse_dfs = self._df[::-1]
new_dfs = [reverse_dfs[i]
for i in range(len(cumulative))
if cumulative[i] < n]
last_index = len(new_dfs)
# this happens when we only need from the last partition
if last_index == 0:
num_to_transfer = n
else:
num_to_transfer = n - cumulative[last_index - 1]
new_dfs.append(_deploy_func.remote(lambda df: df.tail(num_to_transfer),
reverse_dfs[last_index]))
new_dfs.reverse()
index = self._index.tail(n).index
return DataFrame(new_dfs, self.columns, index=index)
def take(self, indices, axis=0, convert=None, is_copy=True, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_clipboard(self, excel=None, sep=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_csv(self, path_or_buf=None, sep=', ', na_rep='', float_format=None,
columns=None, header=True, index=True, index_label=None,
mode='w', encoding=None, compression=None, quoting=None,
quotechar='"', line_terminator='\n', chunksize=None,
tupleize_cols=None, date_format=None, doublequote=True,
escapechar=None, decimal='.'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_dense(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_dict(self, orient='dict', into=dict):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_excel(self, excel_writer, sheet_name='Sheet1', na_rep='',
float_format=None, columns=None, header=True, index=True,
index_label=None, startrow=0, startcol=0, engine=None,
merge_cells=True, encoding=None, inf_rep='inf', verbose=True,
freeze_panes=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_feather(self, fname):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_gbq(self, destination_table, project_id, chunksize=10000,
verbose=True, reauth=False, if_exists='fail',
private_key=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_hdf(self, path_or_buf, key, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_html(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, bold_rows=True, classes=None, escape=True,
max_rows=None, max_cols=None, show_dimensions=False,
notebook=False, decimal='.', border=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_json(self, path_or_buf=None, orient=None, date_format=None,
double_precision=10, force_ascii=True, date_unit='ms',
default_handler=None, lines=False, compression=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_latex(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
bold_rows=False, column_format=None, longtable=None,
escape=None, encoding=None, decimal='.', multicolumn=None,
multicolumn_format=None, multirow=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_msgpack(self, path_or_buf=None, encoding='utf-8', **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_panel(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_parquet(self, fname, engine='auto', compression='snappy',
**kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_period(self, freq=None, axis=0, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_pickle(self, path, compression='infer', protocol=4):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_records(self, index=True, convert_datetime64=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_sparse(self, fill_value=None, kind='block'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_sql(self, name, con, flavor=None, schema=None, if_exists='fail',
index=True, index_label=None, chunksize=None, dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_stata(self, fname, convert_dates=None, write_index=True,
encoding='latin-1', byteorder=None, time_stamp=None,
data_label=None, variable_labels=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_string(self, buf=None, columns=None, col_space=None, header=True,
index=True, na_rep='np.NaN', formatters=None,
float_format=None, sparsify=None, index_names=True,
justify=None, line_width=None, max_rows=None, max_cols=None,
show_dimensions=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_timestamp(self, freq=None, how='start', axis=0, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def to_xarray(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def transform(self, func, *args, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def truediv(self, other, axis='columns', level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def truncate(self, before=None, after=None, axis=None, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tshift(self, periods=1, freq=None, axis=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tz_convert(self, tz, axis=0, level=None, copy=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def tz_localize(self, tz, axis=0, level=None, copy=True,
ambiguous='raise'):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def unstack(self, level=-1, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def update(self, other, join='left', overwrite=True, filter_func=None,
raise_conflict=False):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def var(self, axis=None, skipna=None, level=None, ddof=1,
numeric_only=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def where(self, cond, other=np.nan, inplace=False, axis=None, level=None,
errors='raise', try_cast=False, raise_on_error=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def xs(self, key, axis=0, level=None, drop_level=True):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __getitem__(self, key):
"""Get the column specified by key for this DataFrame.
Args:
key : The column name.
Returns:
A Pandas Series representing the value fo the column.
"""
result_column_chunks = self._map_partitions(
lambda df: df.__getitem__(key))
return to_pandas(result_column_chunks)
def __setitem__(self, key, value):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __len__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __unicode__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __invert__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __hash__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __iter__(self):
"""Iterate over the columns
Returns:
An Iterator over the columns of the dataframe.
"""
return iter(self.columns)
def __contains__(self, key):
return key in self.columns
def __nonzero__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __bool__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __abs__(self):
"""Creates a modified DataFrame by elementwise taking the absolute value
Returns:
A modified DataFrame
"""
return self.abs()
def __round__(self, decimals=0):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __array__(self, dtype=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __array_wrap__(self, result, context=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __getstate__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __setstate__(self, state):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __delitem__(self, key):
"""Delete an item by key. `del a[key]` for example.
Operation happnes in place.
Args:
key: key to delete
"""
def del_helper(df):
df.__delitem__(key)
return df
self._df = self._map_partitions(del_helper)._df
self.columns = self.columns.drop(key)
def __finalize__(self, other, method=None, **kwargs):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __copy__(self, deep=True):
"""Make a copy using Ray.DataFrame.copy method
Args:
deep: Boolean, deep copy or not.
Currently we do not support deep copy.
Returns:
A Ray DataFrame object.
"""
return self.copy(deep=deep)
def __deepcopy__(self, memo=None):
"""Make a -deep- copy using Ray.DataFrame.copy method
This is equivalent to copy(deep=True).
Args:
memo: No effect. Just to comply with Pandas API.
Returns:
A Ray DataFrame object.
"""
return self.copy(deep=True)
def __and__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __or__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __xor__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __lt__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __le__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __gt__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __ge__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __eq__(self, other):
"""Computes the equality of this DataFrame with another
Returns:
True, if the DataFrames are equal. False otherwise.
"""
return self.equals(other)
def __ne__(self, other):
"""Checks that this DataFrame is not equal to another
Returns:
True, if the DataFrames are not equal. False otherwise.
"""
return not self.equals(other)
def __add__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __iadd__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __mul__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __imul__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __pow__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __ipow__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __sub__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __isub__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __neg__(self):
"""Computes an element wise negative DataFrame
Returns:
A modified DataFrame where every element is the negation of before
"""
for t in self.dtypes:
if not (is_bool_dtype(t)
or is_numeric_dtype(t)
or is_timedelta64_dtype(t)):
raise TypeError("Unary negative expects numeric dtype, not {}"
.format(t))
return self._map_partitions(lambda df: df.__neg__())
def __floordiv__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __truediv__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __mod__(self, other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __sizeof__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def __doc__(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def blocks(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def style(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def iat(axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __rsub__(other, axis=None, level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def loc(self):
"""Purely label-location based indexer for selection by label.
We currently support: single label, list array, slice object
We do not support: boolean array, callable
"""
from .indexing import _Loc_Indexer
return _Loc_Indexer(self)
@property
def is_copy(self):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __itruediv__(other):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def __div__(other, axis=None, level=None, fill_value=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def at(axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
def ix(axis=None):
raise NotImplementedError(
"To contribute to Pandas on Ray, please visit "
"github.com/ray-project/ray.")
@property
def iloc(self):
"""Purely integer-location based indexing for selection by position.
We currently support: single label, list array, slice object
We do not support: boolean array, callable
"""
from .indexing import _iLoc_Indexer
return _iLoc_Indexer(self)
def _get_lengths(df):
"""Gets the length of the dataframe.
Args:
df: A remote pd.DataFrame object.
Returns:
Returns an integer length of the dataframe object. If the attempt
fails, returns 0 as the length.
"""
try:
return len(df)
# Because we sometimes have cases where we have summary statistics in our
# DataFrames
except TypeError:
return 0
@ray.remote
def _shuffle(df, indices, chunksize):
"""Shuffle data by sending it through the Ray Store.
Args:
df (pd.DataFrame): The pandas DataFrame to shuffle.
indices ([any]): The list of indices for the DataFrame.
chunksize (int): The number of indices to send.
Returns:
The list of pd.DataFrame objects in order of their assignment. This
order is important because it determines which task will get the data.
"""
i = 0
partition = []
while len(indices) > chunksize:
oids = df.reindex(indices[:chunksize])
partition.append(oids)
indices = indices[chunksize:]
i += 1
else:
oids = df.reindex(indices)
partition.append(oids)
return partition
@ray.remote
def _local_groupby(df_rows, axis=0):
"""Apply a groupby on this partition for the blocks sent to it.
Args:
df_rows ([pd.DataFrame]): A list of dataframes for this partition. Goes
through the Ray object store.
Returns:
A DataFrameGroupBy object from the resulting groupby.
"""
concat_df = pd.concat(df_rows, axis=axis)
return concat_df.groupby(concat_df.index)
@ray.remote
def _deploy_func(func, dataframe, *args):
"""Deploys a function for the _map_partitions call.
Args:
dataframe (pandas.DataFrame): The pandas DataFrame for this partition.
Returns:
A futures object representing the return value of the function
provided.
"""
if len(args) == 0:
return func(dataframe)
else:
return func(dataframe, *args)
def from_pandas(df, npartitions=None, chunksize=None, sort=True):
"""Converts a pandas DataFrame to a Ray DataFrame.
Args:
df (pandas.DataFrame): The pandas DataFrame to convert.
npartitions (int): The number of partitions to split the DataFrame
into. Has priority over chunksize.
chunksize (int): The number of rows to put in each partition.
sort (bool): Whether or not to sort the df as it is being converted.
Returns:
A new Ray DataFrame object.
"""
if sort and not df.index.is_monotonic_increasing:
df = df.sort_index(ascending=True)
if npartitions is not None:
chunksize = int(len(df) / npartitions)
elif chunksize is None:
raise ValueError("The number of partitions or chunksize must be set.")
temp_df = df
dataframes = []
lengths = []
while len(temp_df) > chunksize:
t_df = temp_df[:chunksize]
lengths.append(len(t_df))
# reset_index here because we want a pd.RangeIndex
# within the partitions. It is smaller and sometimes faster.
t_df = t_df.reset_index(drop=True)
top = ray.put(t_df)
dataframes.append(top)
temp_df = temp_df[chunksize:]
else:
temp_df = temp_df.reset_index(drop=True)
dataframes.append(ray.put(temp_df))
lengths.append(len(temp_df))
return DataFrame(dataframes, df.columns, index=df.index)
def to_pandas(df):
"""Converts a Ray DataFrame to a pandas DataFrame/Series.
Args:
df (ray.DataFrame): The Ray DataFrame to convert.
Returns:
A new pandas DataFrame.
"""
pd_df = pd.concat(ray.get(df._df))
pd_df.index = df.index
pd_df.columns = df.columns
return pd_df
@ray.remote(num_return_vals=2)
def _compute_length_and_index(dfs):
"""Create a default index, which is a RangeIndex
Returns:
The pd.RangeIndex object that represents this DataFrame.
"""
lengths = ray.get([_deploy_func.remote(_get_lengths, d)
for d in dfs])
dest_indices = {"partition":
[i for i in range(len(lengths))
for j in range(lengths[i])],
"index_within_partition":
[j for i in range(len(lengths))
for j in range(lengths[i])]}
return lengths, pd.DataFrame(dest_indices)
| 2.765625
| 3
|
panbib.py
|
fmrchallenge/tlzoo
| 3
|
12778352
|
<reponame>fmrchallenge/tlzoo
#!/bin/env python
"""panbib - map tlzoo YAML data into various formats
"""
from __future__ import print_function
import argparse
import os.path
import glob
import yaml
def find_db():
"""
Assume that ref/ directory is at same level as this file (panbib.py).
"""
ref_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'ref')
return glob.glob(os.path.join(ref_path, '*.yaml'))
def parse_db(paths):
paper_entries = dict()
spc_entries = dict()
tool_entries = dict()
for path in paths:
if os.path.basename(path) == 'def.yaml':
with open(path) as fp:
spc_entries = yaml.load(fp)
continue
elif os.path.basename(path) == 'tools.yaml':
with open(path) as fp:
tool_entries = yaml.load(fp)
continue
try:
int(os.path.basename(path).split('.')[0])
except ValueError:
continue
with open(path) as fp:
incoming = yaml.load(fp)
paper_entries.update(incoming)
return spc_entries, paper_entries, tool_entries
def generate_bibtex(entry, key=None):
monthtext = ['January', 'February', 'March', 'April', 'May',
'June', 'July', 'August', 'September', 'October',
'November', 'December']
if entry['type'] == 'conference paper':
output = ('@inproceedings{{{KEY},\n'
' title = {{{TITLE}}},\n'
' author = {{{AUTHORS}}},\n'
' year = {{{YEAR}}},\n'
' booktitle = {{{BOOKTITLE}}},'
'\n'.format(
KEY=key,
TITLE=entry['title'],
AUTHORS=' AND '.join(entry['authors']),
YEAR=entry['year'],
BOOKTITLE=entry['booktitle']
))
if 'month' in entry:
output += ' month = {{{}}},\n'.format(monthtext[entry['month']-1])
output += '}\n'
return output
elif entry['type'] == 'article':
return ('@article{{{KEY},\n'
' title = {{{TITLE}}},\n'
' author = {{{AUTHORS}}},\n'
' year = {{{YEAR}}},\n'
' journal = {{{JOURNAL}}},\n'
'}}'.format(
KEY=key,
TITLE=entry['title'],
AUTHORS=' AND '.join(entry['authors']),
YEAR=entry['year'],
JOURNAL=entry['journal']
))
elif entry['type'] == 'book':
return ('@book{{{KEY},\n'
' title = {{{TITLE}}},\n'
' author = {{{AUTHORS}}},\n'
' year = {{{YEAR}}},\n'
'}}'.format(
KEY=key,
TITLE=entry['title'],
AUTHORS=' AND '.join(entry['authors']),
YEAR=entry['year']
))
elif entry['type'] == 'technical report':
return ('@techreport{{{KEY},\n'
' title = {{{TITLE}}},\n'
' author = {{{AUTHORS}}},\n'
' year = {{{YEAR}}},\n'
'}}'.format(
KEY=key,
TITLE=entry['title'],
AUTHORS=' AND '.join(entry['authors']),
YEAR=entry['year']
))
def print_bibtex_list(entries):
for key, entry in entries.items():
print(generate_bibtex(entry, key=key))
print() # Blank line
def generate_tlzoo_tree(spc_entries, paper_entries, tool_entries):
"""
Output files are placed under the directory site/docs/
"""
with open(os.path.join('site', 'mkdocs.yml'), 'w') as fp:
with open(os.path.join('site', 'mkdocs.yml.prefix')) as fp_prefix:
fp.write(fp_prefix.read())
title_mapping = list(spc_entries.keys())
title_mapping.sort(key=(lambda x: spc_entries[x]['name']))
fp.write('- specification languages:\n')
for key in title_mapping:
fp.write(' - "{NAME}": spc/{KEY}.md\n'.format(
NAME=spc_entries[key]['name'],
KEY=key
))
title_mapping = list(paper_entries.keys())
title_mapping.sort(key=(lambda x: paper_entries[x]['title']))
fp.write('- papers:\n')
for key in title_mapping:
fp.write(' - "{TITLE}": papers/{KEY}.md\n'.format(
TITLE=paper_entries[key]['title'],
KEY=key
))
title_mapping = list(tool_entries.keys())
title_mapping.sort(key=(lambda x: tool_entries[x]['name']))
fp.write('- tools:\n')
for key in title_mapping:
fp.write(' - "{NAME}": tools/{KEY}.md\n'.format(
NAME=tool_entries[key]['name'],
KEY=key
))
docs_dir = os.path.join('site', 'docs')
spc_dir = os.path.join(docs_dir, 'spc')
papers_dir = os.path.join(docs_dir, 'papers')
tools_dir = os.path.join(docs_dir, 'tools')
if not os.path.exists(docs_dir):
os.mkdir(docs_dir)
if not os.path.exists(spc_dir):
os.mkdir(spc_dir)
if not os.path.exists(papers_dir):
os.mkdir(papers_dir)
if not os.path.exists(tools_dir):
os.mkdir(tools_dir)
for key, entry in spc_entries.items():
with open(os.path.join(spc_dir, key+'.md'), 'w') as fp:
fp.write('''## {NAME}
{VERBOSE_NAME}
'''.format(NAME=entry['name'],
VERBOSE_NAME=(entry['alias'][0] if 'alias' in entry else entry['name']))
)
if ('alias' in entry) and len(entry['alias']) >= 2:
fp.write('Also known as:\n\n')
fp.write('* '+'* '.join(entry['alias'][1:]))
fp.write('\n\n')
fp.write('### summary\n\n')
fp.write(entry['summary'] if 'summary' in entry else '(nil)')
fp.write('\n\n')
this_papers = [pkey for (pkey, pentry) in paper_entries.items()
if key in pentry['spc_lang']]
this_papers.sort(key=(lambda x: paper_entries[x]['year']))
this_tools = [tkey for (tkey, tentry) in tool_entries.items()
if key in tentry['spc_lang']]
this_tools.sort(key=(lambda x: tool_entries[x]['name']))
results = list()
firsts = list()
for ii, tp in enumerate(this_papers):
if paper_entries[tp]['status'] == 'first':
firsts.append('[['+str(ii+1)+']](../../papers/'+tp+')')
if len(firsts)> 0:
results.append('First defined in '
+ ', '.join(firsts))
# Assume there is at least one known fact
fp.write('### results\n\n')
if len(results) > 0:
fp.write('* ' + '\n* '.join(results) + '\n\n')
else:
fp.write('(nil)\n\n')
fp.write('### references (chronological order)\n\n')
for ii, pkey in enumerate(this_papers):
ptitle = paper_entries[pkey]['title'].replace('`', '\`')
fp.write(str(ii+1)+'. ['+ptitle
+'](../../papers/'+pkey+')'
+' ('+str(paper_entries[pkey]['year'])+')\n')
if len(this_tools) > 0:
fp.write('### tools\n\n')
for ii, tkey in enumerate(this_tools):
fp.write(str(ii+1)+'. ['+tool_entries[tkey]['name']
+'](../../tools/'+tkey+')')
for key, entry in paper_entries.items():
# Escape special Markdown symbols
entry['title'] = entry['title'].replace('`', '\`')
with open(os.path.join(papers_dir, key+'.md'), 'w') as fp:
if entry['type'] == 'conference paper':
venue = '**venue (conference):** ' + entry['booktitle']
elif entry['type'] == 'article':
venue = '**venue (journal):** ' + entry['journal']
else:
venue = '(UNKNOWN)'
fp.write('''## {TITLE}
**authors:** {AUTHORS}
{VENUE}
**date:** {DATE}
'''.format(TITLE=entry['title'],
AUTHORS=', '.join(entry['authors']),
VENUE=venue,
DATE=entry['year'])
)
fp.write('### specification languages\n\n')
spc_langs = list()
for spc_lang in entry['spc_lang']:
spc_langs.append('* ['+spc_entries[spc_lang]['name']
+'](../../spc/'+spc_lang+')')
fp.writelines(spc_langs)
fp.write('\n\n')
fp.write('### keywords\n\n')
if ('keywords' in entry) and len(entry['keywords']) > 0:
fp.write(', '.join(entry['keywords']))
else:
fp.write('(nil)')
fp.write('\n\n')
fp.write('### URL\n\n')
url_list = []
if ('url' in entry) and len(entry['url']) > 0:
url_list.extend(['* <'+url+'>\n' for url in entry['url']])
if 'doi' in entry:
url_list.append('* <https://dx.doi.org/'+entry['doi']+'> (auto-generated link)\n')
if len(url_list) > 0:
fp.writelines(url_list)
else:
fp.write('(nil)')
fp.write('\n\n')
fp.write('### BibTeX\n<pre>\n')
fp.write(generate_bibtex(entry, key))
fp.write('</pre>\n')
for key, entry in tool_entries.items():
with open(os.path.join(tools_dir, key+'.md'), 'w') as fp:
fp.write('## ' + entry['name'] + '\n\n')
fp.write('### specification languages\n\n')
spc_langs = list()
for spc_lang in entry['spc_lang']:
spc_langs.append('* ['+spc_entries[spc_lang]['name']
+'](../../spc/'+spc_lang+')')
fp.writelines(spc_langs)
fp.write('\n\n')
if ('papers' in entry) and len(entry['papers']) > 0:
fp.write('### references (chronological order)\n\n')
entry['papers'].sort(key=(lambda x: paper_entries[x]['year']))
for pkey in entry['papers']:
ptitle = paper_entries[pkey]['title'].replace('`', '\`')
fp.write(str(ii+1)+'. ['+ptitle
+'](../../papers/'+pkey+')'
+' ('+str(paper_entries[pkey]['year'])+')\n')
fp.write('### URL\n\n')
url_list = []
if ('url' in entry) and len(entry['url']) > 0:
url_list.extend(['* <'+url+'>\n' for url in entry['url']])
if 'doi' in entry:
url_list.append('* <http://dx.doi.org/'+entry['doi']+'> (auto-generated link)\n')
if len(url_list) > 0:
fp.writelines(url_list)
else:
fp.write('(nil)')
fp.write('\n\n')
if __name__ == '__main__':
parser = argparse.ArgumentParser(prog='panbib')
parser.add_argument('-t', '--type', metavar='TYPE', action='store',
dest='out_format', default='bibtex',
help='output format; support formats: bibtex, tlzoo')
args = parser.parse_args()
db_files = find_db()
spc_entries, paper_entries, tool_entries = parse_db(db_files)
target_format = args.out_format.lower()
if target_format == 'bibtex':
print_bibtex_list(paper_entries)
elif target_format == 'tlzoo':
generate_tlzoo_tree(spc_entries, paper_entries, tool_entries)
else:
pass
| 2.765625
| 3
|
src/io/create_jsons_from_csv.py
|
libercapital/dados_publicos_cnpj_receita_federal
| 7
|
12778353
|
import json
import os
import pandas as pd
from src import DATA_FOLDER, UNZIPED_FOLDER_NAME
from src.io import CNAE_JSON_NAME, NATJU_JSON_NAME, QUAL_SOCIO_JSON_NAME, MOTIVOS_JSON_NAME, PAIS_JSON_NAME, \
MUNIC_JSON_NAME
from src.io.get_last_ref_date import main as get_last_ref_date
def main(ref_date=None):
ref_date = ref_date or get_last_ref_date()
path_unziped = os.path.join(DATA_FOLDER, ref_date, UNZIPED_FOLDER_NAME)
list_all_unziped_files = os.listdir(path_unziped)
for file in list_all_unziped_files:
path_file = os.path.join(path_unziped, file)
if "CNAECSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=CNAE_JSON_NAME)
if "NATJUCSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=NATJU_JSON_NAME)
if "QUALSCSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=QUAL_SOCIO_JSON_NAME)
if "MOTICSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=MOTIVOS_JSON_NAME)
if "PAISCSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=PAIS_JSON_NAME)
if "MUNICCSV" in file:
_dict = create_json(path_file=path_file, path_unziped=path_unziped, json_name=MUNIC_JSON_NAME)
def create_json(path_file, path_unziped, json_name):
df = pd.read_csv(path_file, sep=';', encoding='cp1252', header=None)
df.sort_values(df.columns[0], inplace=True)
_dict = dict(df.values)
path_json = os.path.join(path_unziped, json_name)
with open(path_json, 'w', encoding='utf-8') as f:
print(f"creating: '{path_json}'", end=' ... ', flush=True)
json.dump(_dict, f, ensure_ascii=False)
print('done!')
return _dict
if __name__ == '__main__':
main()
| 2.6875
| 3
|
day18.py
|
bloy/adventofcode-2017
| 0
|
12778354
|
#!env python
import aoc
import collections
import pprint
import re
ISINT = re.compile(r'^-?[0-9]+$')
def parse_data(lines):
return [line.split() for line in lines]
def valueof(v, registers):
if v is None:
return None
if ISINT.match(v):
return int(v)
return registers[v]
def solve1(data):
registers = collections.defaultdict(int)
pc = 0
soundplayed = 0
while 0 <= pc < len(data):
instr = data[pc][0]
v1 = data[pc][1]
v2 = data[pc][2] if len(data[pc]) > 2 else None
pc += 1
if instr == 'snd':
soundplayed = valueof(v1, registers)
elif instr == 'rcv':
if valueof(v1, registers) != 0:
return ('Last sound played', soundplayed)
elif instr == 'set':
registers[v1] = valueof(v2, registers)
elif instr == 'add':
registers[v1] += valueof(v2, registers)
elif instr == 'mul':
registers[v1] *= valueof(v2, registers)
elif instr == 'mod':
registers[v1] = registers[v1] % valueof(v2, registers)
elif instr == 'jgz':
if valueof(v1, registers) > 0:
pc += valueof(v2, registers) - 1
return "terminated"
def program(data, pid, rcvqueue, sndqueue):
registers = collections.defaultdict()
registers['p'] = pid
pc = 0
sendcount = 0
terminated = False
while 0 <= pc < len(data) and not terminated:
instr = data[pc][0]
v1 = data[pc][1]
v2 = data[pc][2] if len(data[pc]) > 2 else None
pc += 1
if instr == 'snd':
sndqueue.appendleft(valueof(v1, registers))
sendcount += 1
elif instr == 'rcv':
if len(rcvqueue) == 0:
yield sendcount
try:
registers[v1] = rcvqueue.pop()
except IndexError:
terminated = True
elif instr == 'set':
registers[v1] = valueof(v2, registers)
elif instr == 'add':
registers[v1] += valueof(v2, registers)
elif instr == 'mul':
registers[v1] *= valueof(v2, registers)
elif instr == 'mod':
registers[v1] = registers[v1] % valueof(v2, registers)
elif instr == 'jgz':
if valueof(v1, registers) > 0:
pc += valueof(v2, registers) - 1
yield sendcount
def solve2(data):
queues = [collections.deque(), collections.deque()]
programs = [program(data, 0, queues[0], queues[1]),
program(data, 1, queues[1], queues[0])]
current = 0
returns = [None, None]
while 1:
try:
returns[current] = next(programs[current])
except StopIteration:
return returns
current = (current + 1) % 2
lines = [
'set a 1',
'add a 2',
'mul a a',
'mod a 5',
'snd a',
'set a 0',
'rcv a',
'jgz a -1',
'set a 1',
'jgz a -2',
]
if __name__ == '__main__':
lines = aoc.input_lines(day=18)
data = parse_data(lines)
pprint.pprint(solve1(data))
pprint.pprint(solve2(data))
| 3
| 3
|
openregistry/assets/core/events.py
|
EBRD-ProzorroSale/openregistry.assets.core
| 0
|
12778355
|
<gh_stars>0
# -*- coding: utf-8 -*-
class AssetInitializeEvent(object):
""" Asset initialization event. """
def __init__(self, asset):
self.asset = asset
| 1.9375
| 2
|
fileresponse/asgi.py
|
ephes/django-fileresponse
| 2
|
12778356
|
<reponame>ephes/django-fileresponse<gh_stars>1-10
# AUTOGENERATED! DO NOT EDIT! File to edit: nbs/02_asgi.ipynb (unless otherwise specified).
__all__ = ['get_asgi_application']
# Cell
import django
from fileresponse.handlers import AsyncFileASGIHandler
def get_asgi_application():
"""
Similar to django.core.asgi.get_asgi_application, but uses AsyncFileASGIHandler
instead of Djangos ASGIHandler.
"""
django.setup(set_prefix=False)
return AsyncFileASGIHandler()
| 1.820313
| 2
|
test/test_list_transactions_by_address_response_item.py
|
xan187/Crypto_APIs_2.0_SDK_Python
| 0
|
12778357
|
"""
CryptoAPIs
Crypto APIs 2.0 is a complex and innovative infrastructure layer that radically simplifies the development of any Blockchain and Crypto related applications. Organized around REST, Crypto APIs 2.0 can assist both novice Bitcoin/Ethereum enthusiasts and crypto experts with the development of their blockchain applications. Crypto APIs 2.0 provides unified endpoints and data, raw data, automatic tokens and coins forwardings, callback functionalities, and much more. # noqa: E501
The version of the OpenAPI document: 2.0.0
Contact: <EMAIL>
Generated by: https://openapi-generator.tech
"""
import sys
import unittest
import cryptoapis
from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_fee import GetTransactionDetailsByTransactionIDResponseItemFee
from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_recipients import GetTransactionDetailsByTransactionIDResponseItemRecipients
from cryptoapis.model.get_transaction_details_by_transaction_id_response_item_senders import GetTransactionDetailsByTransactionIDResponseItemSenders
from cryptoapis.model.list_transactions_by_address_response_item_blockchain_specific import ListTransactionsByAddressResponseItemBlockchainSpecific
globals()['GetTransactionDetailsByTransactionIDResponseItemFee'] = GetTransactionDetailsByTransactionIDResponseItemFee
globals()['GetTransactionDetailsByTransactionIDResponseItemRecipients'] = GetTransactionDetailsByTransactionIDResponseItemRecipients
globals()['GetTransactionDetailsByTransactionIDResponseItemSenders'] = GetTransactionDetailsByTransactionIDResponseItemSenders
globals()['ListTransactionsByAddressResponseItemBlockchainSpecific'] = ListTransactionsByAddressResponseItemBlockchainSpecific
from cryptoapis.model.list_transactions_by_address_response_item import ListTransactionsByAddressResponseItem
class TestListTransactionsByAddressResponseItem(unittest.TestCase):
"""ListTransactionsByAddressResponseItem unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testListTransactionsByAddressResponseItem(self):
"""Test ListTransactionsByAddressResponseItem"""
# FIXME: construct object with mandatory attributes with example values
# model = ListTransactionsByAddressResponseItem() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 1.851563
| 2
|
scraper/storage_spiders/thayroicom.py
|
chongiadung/choinho
| 0
|
12778358
|
<reponame>chongiadung/choinho
# Auto generated by generator.py. Delete this line if you make modification.
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
XPATH = {
'name' : "//h1[@class='post-title entry-title item_name']",
'price' : "//div[@class='prod_pricebox_price_final']/span[@id='special_price_box']",
'category' : "",
'description' : "//div[@class='post-body entry-content']/div[5]/div",
'images' : "//img[@class='item_thumb']/@src",
'canonical' : "//link[@rel='canonical']/@href",
'base_url' : "",
'brand' : ""
}
name = 'thayroi.<EMAIL>'
allowed_domains = ['thayroi.com']
start_urls = ['http://www.thayroi.com/']
tracking_url = ''
sitemap_urls = ['']
sitemap_rules = [('', 'parse_item')]
sitemap_follow = []
rules = [
Rule(LinkExtractor(allow=['/\d+/\d+/[a-zA-Z0-9-]+\.html$']), 'parse_item'),
Rule(LinkExtractor(allow=['/search/label/']), 'parse'),
#Rule(LinkExtractor(), 'parse_item_and_links'),
]
| 2.015625
| 2
|
tests/test_environment.py
|
NGalandim/roboai-python-cli
| 3
|
12778359
|
from robo_bot_cli.main import cli
def test_activate_environment(runner):
result = runner.invoke(cli, ['environment', 'activate', 'integration'])
assert result.exit_code == 0
assert 'The connection to the integration environment was successfully established.' in result.output
def test_create_environment(runner):
result = runner.invoke(cli, ['environment', 'create', 'development', '--base-url',
'http//fake-url.com', '--username', 'fake-user', '--password', '<PASSWORD>'])
assert result.exit_code == 0
assert "The environment was successfully created.\nYou can now activate it by running "\
"'robo-bot activate development'.\n" in result.output
def test_remove_environment(runner):
result = runner.invoke(cli, ['environment', 'remove', 'development'])
assert result.exit_code == 0
assert "'development' has been successfully removed." in result.output
def test_which_environment(runner):
result = runner.invoke(cli, ['environment', 'which'])
assert result.exit_code == 0
assert "The 'integration' environment is currently activated." in result.output
def test_list_environments(runner):
result = runner.invoke(cli, ['environment', 'list'])
assert result.exit_code == 0
assert "# robo-bot environments:\n" in result.output
| 2.453125
| 2
|
leetcode/course_schedule.py
|
dxmahata/codinginterviews
| 0
|
12778360
|
<gh_stars>0
"""
There are a total of n courses you have to take, labeled from 0 to n - 1.
Some courses may have prerequisites, for example to take course 0 you have to first
take course 1, which is expressed as a pair: [0,1]
Given the total number of courses and a list of prerequisite pairs, is it possible
for you to finish all courses?
For example:
2, [[1,0]]
There are a total of 2 courses to take. To take course 1 you should have finished
course 0. So it is possible.
2, [[1,0],[0,1]]
There are a total of 2 courses to take. To take course 1 you should have finished
course 0, and to take course 0 you should also have finished course 1. So it is impossible.
Note:
The input prerequisites is a graph represented by a list of edges, not adjacency
matrices. Read more about how a graph is represented.
URL: https://leetcode.com/problems/course-schedule/
"""
class Vertex:
def __init__(self, key):
self.id = key
self.adjacent = {}
self.indegree = 0
self.outdegree = 0
self.predecessor = None
self.visit_time = 0
self.finish_time = 0
self.color = "white"
def add_neighbor(self, nbr, weight=0):
self.adjacent[nbr] = weight
def get_neighbors(self):
return self.adjacent.keys()
def get_id(self):
return self.id
def get_weight(self, nbr):
return self.adjacent[nbr]
def get_indegree(self):
return self.indegree
def set_indegree(self, indegree):
self.indegree = indegree
def get_outdegree(self):
return self.outdegree
def set_outdegree(self, outdegree):
self.outdegree = outdegree
def get_predecessor(self):
return self.predecessor
def set_predecessor(self, pred):
self.predecessor = pred
def get_visit_time(self):
return self.visit_time
def set_visit_time(self, visit_time):
self.visit_time = visit_time
def get_finish_time(self):
return self.finish_time
def set_finish_time(self, finish_time):
self.finish_time = finish_time
def get_color(self):
return self.color
def set_color(self, color):
self.color = color
def __str__(self):
return str(self.id) + ' connectedTo: ' + str([x.id for x in self.adjacent])
class Graph:
def __init__(self):
self.vertex_dict = {}
self.no_vertices = 0
self.no_edges = 0
def add_vertex(self, vert_key):
new_vertex_obj = Vertex(vert_key)
self.vertex_dict[vert_key] = new_vertex_obj
self.no_vertices += 1
def get_vertex(self, vert_key):
if vert_key in self.vertex_dict:
return self.vertex_dict[vert_key]
else:
return None
def add_edge(self, fro, to, weight=1):
if fro not in self.vertex_dict:
self.add_vertex(fro)
from_vertex = self.get_vertex(fro)
else:
from_vertex = self.vertex_dict[fro]
if to not in self.vertex_dict:
self.add_vertex(to)
to_vertex = self.get_vertex(to)
else:
to_vertex = self.vertex_dict[to]
from_vertex.add_neighbor(to_vertex, weight)
from_vertex.set_outdegree(from_vertex.get_outdegree() + 1)
to_vertex.set_indegree(to_vertex.get_indegree() + 1)
self.no_edges += 1
def get_edges(self):
edges = []
for u in self.vertex_dict:
for v in self.vertex_dict[u].get_neighbors():
u_id = u
#print(v)
v_id = v.get_id()
edges.append((u_id, v_id, self.vertex_dict[u].get_weight(v)))
return edges
def get_vertices(self):
return self.vertex_dict
class DFS:
def __init__(self, graph):
self.graph = graph
self.has_cycle = False
def dfs(self):
for vertex in self.graph.get_vertices():
if self.graph.vertex_dict[vertex].get_color() == "white":
self.dfs_visit(self.graph.vertex_dict[vertex])
def dfs_visit(self, node):
node.set_color("gray")
for vert in node.get_neighbors():
if vert.get_color() == "gray":
self.has_cycle = True
if vert.get_color() == "white":
vert.set_color("gray")
vert.set_predecessor(node)
self.dfs_visit(vert)
node.set_color("black")
class Solution(object):
def canFinish(self, numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
if not prerequisites:
return True
else:
g = Graph()
for edge in prerequisites:
g.add_edge(edge[0], edge[1])
dfs_obj = DFS(g)
dfs_obj.dfs()
if dfs_obj.has_cycle == True:
return False
else:
return True
if __name__ == "__main__":
soln1 = Solution()
print(soln1.canFinish(2, [[1,0]]))
soln2 = Solution()
print(soln2.canFinish(2, [[1,0],[0,1]]))
| 4.15625
| 4
|
src/bank/bankia/__init__.py
|
sunbit/banking
| 0
|
12778361
|
from .scrapping import login, get_account_transactions, get_credit_card_transactions
from .parsing import parse_account_transaction, parse_credit_card_transaction
| 1.078125
| 1
|
river_crossing_riddle.py
|
Sanchitraina1999/eAI
| 1
|
12778362
|
"""River Crossing Riddle"""
from search_solver import SearchSolver
class RiverCrossingRiddle(SearchSolver):
"""Class to solve River Crossing riddle"""
def __init__(self, boat_capacity):
agents = ["robot", "fox", "chicken", "chicken-feed"]
agent_states = [0, 1]
self.capacity = boat_capacity
super().__init__(agents, agent_states)
def valid_state(self, state):
"""Checks if a `state` is valid
Rules:
The following cannot have the same state (i.e. together),
unless 'robot' has the same state (i.e. accompanying them):
* 'fox' and 'chicken'
* 'chicken' and 'chicken-feed'
PARAMETERS
state (dict-like)
RETURN VALUE
bool
"""
return ~(
((state["fox"] == state["chicken"]) & (state["robot"] != state["chicken"]))
| (
(state["chicken"] == state["chicken-feed"])
& (state["robot"] != state["chicken"])
)
)
def valid_transition(self, state1, state2):
"""Check if `state1` can transition to `state2`
Rules:
* The robot must always change state (i.e. row the boat)
* Total state changes cannot be more than `capacity` (i.e. boat passengers)
* Change state must be the same direction as robot
(i.e. if robot goes 1 -> 0, all changes must be 1 -> 0; vice versa)
PARAMETERS:
state1 (dict-like)
state2 (dict-like)
capacity (int): max # of passengers in the boat, including the robot
RETURN VALUE
bool
"""
assert set(state1.keys()) == set(
state2.keys()
), "state1 and state2 must have the same agents"
return (
(state1["robot"] != state2["robot"])
& (
sum(
int(state1[agent] != state2[agent])
for agent, state in state1.items()
if state == state1["robot"]
)
<= self.capacity
)
& (
sum(
int(state1[agent] != state2[agent])
for agent, state in state1.items()
if state != state1["robot"]
)
== 0
)
)
def main(boat_capacity):
"""Solves puzzle, given boat capacity
PARAMETERS
boat_capacity (int): number of passengers in the boat, including the robot
RETURN VALUE
tuple: path length, and possible states per step
"""
solver = RiverCrossingRiddle(boat_capacity)
state_i = solver.get_state_id({agent: 0 for agent in solver.agents})
state_n = solver.get_state_id({agent: 1 for agent in solver.agents})
paths = solver.find_shortest_paths(state_i, state_n)
path_length = len(paths) - 1
return (path_length, paths)
if __name__ == "__main__":
main(2)
| 3.703125
| 4
|
anaf/core/api/views.py
|
tovmeod/anaf
| 2
|
12778363
|
<gh_stars>1-10
from rest_framework import viewsets
from anaf.core.rendering import API_RENDERERS
from anaf.core.models import User as Profile, AccessEntity, Group, Perspective, Object, Module
import serializers
from anaf.viewsets import AnafViewSet
class CoreBaseViewSet(AnafViewSet):
module = 'anaf.core'
class ProfileView(CoreBaseViewSet):
"""
API endpoint that allows user profiles to be viewed or edited.
"""
queryset = Profile.objects.all()
serializer_class = serializers.ProfileSerializer
renderer_classes = API_RENDERERS
class GroupView(CoreBaseViewSet):
"""
API endpoint that allows Groups to be viewed or edited.
"""
queryset = Group.objects.all()
serializer_class = serializers.GroupSerializer
renderer_classes = API_RENDERERS
class PerspectiveView(CoreBaseViewSet):
"""
API endpoint that allows Groups to be viewed or edited.
"""
queryset = Perspective.objects.all()
serializer_class = serializers.PerspectiveSerializer
renderer_classes = API_RENDERERS
class AccessEntityView(CoreBaseViewSet):
"""
API endpoint that allows Access Entities to be viewed or edited.
"""
queryset = AccessEntity.objects.all()
serializer_class = serializers.AccessEntitySerializer
renderer_classes = API_RENDERERS
class ObjectView(viewsets.ModelViewSet):
"""
API endpoint that allows anaf Objects to be viewed or edited.
"""
queryset = Object.objects.all()
serializer_class = serializers.ObjectSerializer
class ModuleView(viewsets.ModelViewSet):
"""
API endpoint that allows anaf Modules to be viewed or edited.
"""
queryset = Module.objects.all()
serializer_class = serializers.ModuleSerializer
| 2.296875
| 2
|
restapi/models.py
|
AymenQ/tarteel.io
| 0
|
12778364
|
from __future__ import unicode_literals
from django.db import models
class DemographicInformation(models.Model):
session_id = models.CharField(max_length=32, blank=True)
# This could be used to store different platforms such as android,
# ios, web if different identification methods are used for each one.
platform = models.CharField(max_length=256, default='web')
gender = models.CharField(max_length=32)
qiraah = models.CharField(max_length=32, blank=True, null=True)
age = models.CharField(max_length=32)
ethnicity = models.CharField(max_length=32, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True)
class AnnotatedRecording(models.Model):
file = models.FileField(blank=True, null=True)
surah_num = models.IntegerField(blank=True, null=True)
ayah_num = models.IntegerField(blank=True, null=True)
hash_string = models.CharField(max_length=32)
recitation_mode = models.CharField(max_length=32, blank=True, null=True)
timestamp = models.DateTimeField(auto_now_add=True) # TODO(implement timeout)
session_id = models.CharField(max_length=32)
is_evaluated = models.BooleanField(default=False)
associated_demographic = models.ForeignKey(DemographicInformation,
on_delete=models.SET_NULL,
null=True, blank=True, default=None)
| 2.359375
| 2
|
services/web/server/src/simcore_service_webserver/storage_api.py
|
GitHK/osparc-simcore-forked
| 0
|
12778365
|
""" Storage subsystem's API: responsible of communication with storage service
"""
import logging
from pprint import pformat
from aiohttp import web
from yarl import URL
from servicelib.rest_responses import unwrap_envelope
from .storage_config import get_client_session, get_storage_config
log = logging.getLogger(__name__)
def _get_storage_client(app: web.Application):
cfg = get_storage_config(app)
# storage service API endpoint
endpoint = URL.build(scheme="http", host=cfg["host"], port=cfg["port"]).with_path(
cfg["version"]
)
session = get_client_session(app)
return session, endpoint
async def copy_data_folders_from_project(
app, source_project, destination_project, nodes_map, user_id
):
# TODO: optimize if project has actualy data or not before doing the call
client, api_endpoint = _get_storage_client(app)
# /simcore-s3/folders:
url = (api_endpoint / "simcore-s3/folders").with_query(user_id=user_id)
async with client.post(
url,
json={
"source": source_project,
"destination": destination_project,
"nodes_map": nodes_map,
},
ssl=False,
) as resp:
payload = await resp.json()
updated_project, error = unwrap_envelope(payload)
if error:
msg = "Cannot copy project data in storage: %s" % pformat(error)
log.error(msg)
# TODO: should reconstruct error and rethrow same exception as storage service?
raise web.HTTPServiceUnavailable(reason=msg)
return updated_project
async def _delete(session, target_url):
async with session.delete(target_url, ssl=False) as resp:
log.info(
"delete_data_folders_of_project request responded with status %s",
resp.status,
)
# NOTE: context will automatically close connection
async def delete_data_folders_of_project(app, project_id, user_id):
# SEE api/specs/storage/v0/openapi.yaml
session, api_endpoint = _get_storage_client(app)
url = (api_endpoint / f"simcore-s3/folders/{project_id}").with_query(
user_id=user_id
)
await _delete(session, url)
async def delete_data_folders_of_project_node(
app, project_id: str, node_id: str, user_id: str
):
# SEE api/specs/storage/v0/openapi.yaml
session, api_endpoint = _get_storage_client(app)
url = (api_endpoint / f"simcore-s3/folders/{project_id}").with_query(
user_id=user_id, node_id=node_id
)
await _delete(session, url)
| 2.140625
| 2
|
twodlearn/bayesnet/bayesnet.py
|
danmar3/twodlearn
| 0
|
12778366
|
<filename>twodlearn/bayesnet/bayesnet.py
"""Definition of several bayesian neural-networks
"""
import numbers
import warnings
import numpy as np
import tensorflow as tf
import twodlearn as tdl
from twodlearn import common
import twodlearn.feedforward as tdlf
import tensorflow_probability as tfp
from collections import namedtuple
# -------------------- Losses -------------------- #
class GaussianKL(tdlf.Loss):
''' Evaluate KL(p||q) for p and q Normal '''
@property
def p(self):
return self._p
@property
def q(self):
return self._q
def get_n_vars(self, p, q):
def is_fully_defined(x):
return (x.loc.shape.is_fully_defined() and
x.scale.shape.is_fully_defined())
if is_fully_defined(p) and is_fully_defined(q):
loc_shape = tf.broadcast_static_shape(p.loc.shape,
q.loc.shape)
scale_shape = tf.broadcast_static_shape(p.scale.shape,
q.scale.shape)
shape = tf.broadcast_static_shape(loc_shape, scale_shape)
else:
loc_shape = tf.broadcast_dynamic_shape(p.loc.shape,
q.loc.shape)
scale_shape = tf.broadcast_dynamic_shape(p.scale.shape,
q.scale.shape)
shape = tf.broadcast_dynamic_shape(loc_shape, scale_shape)
return tf.reduce_prod(shape)
def evaluate(self, p, q):
''' Evaluate KL(p||q) '''
p_var = p.scale**2
q_var = q.scale**2
ratio = p_var / q_var
kl = -tf.log(ratio) + ratio \
+ (tf.square(p.loc - q.loc) / q_var)
n_vars = self.get_n_vars(p, q)
return 0.5 * (tf.reduce_sum(kl) - tf.cast(n_vars, tf.float32))
def fromlist(self, p_list):
with tf.name_scope('list2vector'):
p_loc = tf.concat([tf.reshape(p.loc, [-1])
for p in p_list], axis=0)
p_scale = tf.concat([tf.reshape(p.scale, [-1])
for p in p_list], axis=0)
p = tfp.distributions.Normal(p_loc, p_scale)
return p
def __init__(self, p, q, name='GaussianKL'):
''' p: a normal distribution or a list of normal distributions,
q: base normal distribution'''
super(GaussianKL, self).__init__(name=name)
with tf.name_scope(self.scope):
# check type of p
if isinstance(p, list):
p = self.fromlist(p)
# evaluate kl divergence
assert (isinstance(p, (tfp.distributions.Normal, Normal,
McNormal)) and
isinstance(q, (tfp.distributions.Normal, Normal,
McNormal))), \
'GaussianKL is only defined for p, q being '\
'tf.distributions.Normal or tdl.bayesnet.Normal'
self._p = p
self._q = q
self._value = self.evaluate(p, q)
@classmethod
def fromstats(cls, p_loc, p_scale, q_loc, q_scale):
p = tfp.distributions.Normal(p_loc, p_scale)
q = tfp.distributions.Normal(q_loc, q_scale)
return cls(p, q)
class GaussianNegLogLikelihood(tdlf.EmpiricalLoss):
@property
def y(self):
return self._y
@y.setter
def y(self, value):
assert not hasattr(self, '_y'),\
'property y can only be set during initialization'
assert (isinstance(value, tf.distributions.Normal) or
isinstance(value, McNormal)),\
'y must be a normal (tf.distributions.Normal) distribution'
self._y = value
@property
def n_outputs(self):
return self.y.shape[1].value
@property
def labels(self):
''' Labels for computing the loss, if not provided,
they are created automatically '''
return self._labels
@labels.setter
def labels(self, value):
assert not hasattr(self, '_labels'), \
'property labels can only be set during initialization'
if value is None:
self._labels = tf.placeholder(tf.float32,
shape=self.y.loc.shape,
name='labels')
else:
self._labels = value
def define_fit_loss(self, y, labels):
y_mu = y.loc
y_variance = y.scale**2
loss_i = tf.reduce_sum(tf.log(y_variance) +
(tf.pow(labels - y_mu, 2) / (y_variance)), 1)
loss = 0.5 * tf.reduce_mean(loss_i, 0)
return loss
def __init__(self, y, labels=None, name='NegLogLikelihood'):
super(GaussianNegLogLikelihood, self).__init__(name=name)
with tf.name_scope(self.scope):
self.y = y
self.labels = labels
self._value = self.define_fit_loss(self.y, self.labels)
class Entropy(common.TdlModel):
@property
def value(self):
return self._value
@property
def prob(self):
return self._prob
def _evaluate(self, prob):
return tf.reduce_mean(- prob * tf.log(prob), axis=1)
def __init__(self, prob, name=None):
self._prob = prob
super(Entropy, self).__init__(name=name)
with tf.name_scope(self.scope):
self._value = self._evaluate(self.prob)
# -------------------- Models -------------------- #
class McEstimate(common.TdlModel):
@common.LazzyProperty
def mean(self):
return tf.reduce_mean(self.value, axis=0)
@common.LazzyProperty
def stddev(self):
N = tf.cast(tf.shape(self.value)[0], tf.float32)
diff = (self.value - self.mean)**2
sample_variance = (tf.reduce_sum(diff, axis=0) / (N - 1))
return tf.sqrt(sample_variance)
def __init__(self, value, name='mc_estimate'):
self.value = value
super(McEstimate, self).__init__(name=name)
class McSample(common.TdlModel):
@tdl.core.InputModel
def distribution(self, value):
if not hasattr(value, 'sample'):
raise TypeError('distribution model should have a sample method')
return value
@tdl.core.InputArgument
def sample_axis(self, value):
if value is None:
value = 0
return value
class McNormal(common.TdlModel):
@property
def value(self):
return self.samples.value
@property
def samples(self):
return self._samples
@samples.setter
def samples(self, value):
if value is None:
with tf.name_scope(self.scope):
self._samples = McEstimate(value=self._distribution.sample())
elif isinstance(value, McEstimate):
self._samples = value
else:
with tf.name_scope(self.scope):
self._samples = McEstimate(value=value)
@common.Submodel
def _distribution(self, value):
if value is None:
value = tfp.distributions.Normal(loc=self.loc, scale=self.scale)
return value
@common.Submodel
def loc(self, value):
return value
@common.Submodel
def scale(self, value):
return value
def resample(self, *args, **kargs):
with tf.name_scope(self.scope):
samples = self._distribution.sample(*args, **kargs)
self.samples = samples
return samples
def __init__(self, loc, scale, samples=None, name='McNormal', **kargs):
super(McNormal, self).__init__(loc=loc, scale=scale,
name=name, **kargs)
self.samples = samples
@tdl.core.create_init_docstring
class SampleLayer(tdl.core.Layer):
@tdl.core.InputArgument
def input_shape(self, value):
return value
@tdl.core.InputModel
def distribution(self, value):
return value
@tdl.core.InputModel
def sample_shape(self, value):
if value is not None:
value = tf.TensorShape(value)
return value
def call(self, inputs, *args, **kargs):
tdl.core.assert_initialized(
self, 'call', ['distribution', 'sample_shape'])
if self.distribution is None:
distribution = inputs
else:
distribution = (self.distribution(inputs)
if callable(self.distribution)
else self.distribution)
if self.sample_shape is not None:
return distribution.sample(sample_shape=self.sample_shape)
else:
return distribution.sample()
@tdl.core.create_init_docstring
class NormalModel(tdl.core.Layer):
@tdl.core.InputArgument
def input_shape(self, value):
'''Input tensor shape.'''
if value is None:
raise tdl.core.exceptions.ArgumentNotProvided(self)
return tf.TensorShape(value)
@tdl.core.InputArgument
def batch_shape(self, value):
if value is None:
raise tdl.core.exceptions.ArgumentNotProvided(self)
if not isinstance(value, tf.TensorShape):
value = tf.TensorShape(value)
return value
@tdl.core.SubmodelInit
def loc(self, initializer=None, trainable=True, **kargs):
tdl.core.assert_initialized(self, 'loc', ['batch_shape'])
if initializer is None:
initializer = tf.keras.initializers.zeros()
shape = tf.TensorShape(
(1 if dim is None else dim)
for dim in self.batch_shape.as_list())
return self.add_weight(
name='loc',
initializer=initializer,
shape=shape,
trainable=trainable,
**kargs)
@tdl.core.SubmodelInit
def scale(self, initializer=None, trainable=True, tolerance=1e-5,
**kargs):
tdl.core.assert_initialized(self, 'shape', ['batch_shape'])
shape = tf.TensorShape(
(1 if dim is None else dim)
for dim in self.batch_shape.as_list())
if initializer is None:
initializer = tdl.constrained.PositiveVariableExp.init_wrapper(
initializer=tf.keras.initializers.ones(),
trainable=trainable,
tolerance=tolerance,
**kargs)
return initializer(shape=shape)
def build(self, input_shape=None):
tdl.core.assert_initialized(self, 'build', ['loc', 'scale'])
self.built = True
def call(self, inputs, *args, **kargs):
if inputs is not None:
inputs = tf.convert_to_tensor(inputs)
loc = (self.loc(inputs) if callable(self.loc)
else self.loc)
scale = (self.scale(inputs) if callable(self.scale)
else self.scale)
return tfp.distributions.Normal(loc=loc, scale=scale)
class AffineBernoulliLayer(tdl.dense.AffineLayer):
''' Implements the layer y=dropout(x) W + b'''
@tdl.core.InputArgument
def keep_prob(self, value):
'''Keep prob for dropout.'''
return (value if value is not None
else 0.8)
@common.Regularizer
def regularizer(self, prior_stddev=None):
tdl.core.assert_initialized(
self, 'regularizer', ['input_shape', 'kernel'])
if prior_stddev is None:
prior_stddev = self.options['w/prior/stddev']
with tf.name_scope(self.scope):
reg = tdlf.L2Regularizer(
self.kernel,
scale=(prior_stddev**2)/np.sqrt(self.input_shape[-1].value))
return reg
class Output(tdlf.AffineLayer.Output):
@property
def keep_prob(self):
return self.model.keep_prob
@tdl.core.Submodel
def affine(self, _):
keep_prob = self.model.keep_prob
inputs = (self.inputs if keep_prob is None
else tf.nn.dropout(self.inputs, keep_prob))
output = tf.linalg.LinearOperatorFullMatrix(self.model.kernel)\
.matvec(inputs, adjoint=True)
if self.model.bias is not None:
output = output + self.model.bias
return output
@tdl.core.OutputValue
def value(self, _):
return self.affine
class DenseBernoulliLayer(AffineBernoulliLayer):
@tdl.core.InputArgument
def activation(self, value):
return value
def __init__(self, activation=tf.nn.relu, name=None, **kargs):
super(DenseBernoulliLayer, self).__init__(
activation=activation,
name=name, **kargs)
class Output(AffineBernoulliLayer.Output):
@property
def keep_prob(self):
return self.model.keep_prob
@common.OutputValue
def value(self, _):
return self.model.activation(self.affine)
@tdl.core.create_init_docstring
class LinearNormalLayer(tdl.dense.LinearLayer):
@tdl.core.InputArgument
def tolerance(self, value):
return value
@tdl.core.ParameterInit
def kernel(self, initializer=None, trainable=True, max_scale=1.0):
tdl.core.assert_initialized(
self, 'kernel', ['units', 'input_shape'])
if initializer is None:
initializer = tdl.constrained.PositiveVariableExp.init_wrapper(
initializer=tdl.core.initializers.SumFanConstant(),
trainable=trainable,
max=max_scale,
tolerance=self.tolerance)
scale = initializer(shape=[self.input_shape[-1].value, self.units])
for var in tdl.core.get_variables(scale):
self.add_weight(var)
loc = self.add_weight(
name='loc',
initializer=tf.keras.initializers.zeros(),
shape=[self.input_shape[-1].value, self.units],
trainable=trainable)
return tfp.distributions.Normal(loc=loc, scale=scale, name='kernel')
@tdl.core.Regularizer
def regularizer(self, prior_stddev=None):
''' Return the KL regularizer for the layer '''
tdl.core.assert_initialized(self, 'regularizer', ['kernel'])
if prior_stddev is None:
assert self.options['w/prior/stddev'] is not None,\
'prior stddev not specified as agument nor '\
'found in the model options'
assert isinstance(self.options['w/prior/stddev'],
numbers.Real), \
'provided prior stddev is not a number'
assert self.options['w/prior/stddev'] > 0.0, \
'provided prior stddev is <=0 '
prior_stddev = self.options['w/prior/stddev']
with tf.name_scope(self.scope):
with tf.name_scope('regularizer'):
prior = tfp.distributions.Normal(
loc=0.0, scale=prior_stddev)
reg = tf.reduce_sum(
tfp.distributions.kl_divergence(self.kernel, prior))
return reg
def _init_options(self, options):
default = {'w/stddev/init_method': 'sum',
'w/stddev/alpha': 1.0,
'w/stddev/trainable': True,
'w/prior/stddev': None}
options = common.check_defaults(options, default)
options = super(LinearNormalLayer, self)._init_options(options)
return options
class Output(tdl.core.TdlModel):
@property
def shape(self):
return self.value.shape
@tdl.core.InputModel
def model(self, value):
return value
@tdl.core.InputArgument
def inputs(self, value):
return value
def _loc(self):
kernel = self.model.kernel
return tf.linalg.LinearOperatorFullMatrix(kernel.loc)\
.matvec(self.inputs, adjoint=True)
def _scale(self):
kernel_cov = tf.linalg.LinearOperatorFullMatrix(
tf.square(self.model.kernel.scale))
output_cov = kernel_cov.matvec(tf.square(self.inputs),
adjoint=True)
return tf.sqrt(output_cov)
@tdl.core.Submodel
def affine(self, _):
'''Normal distribution for the outputs.'''
return tfp.distributions.Normal(
loc=self._loc(), scale=self._scale())
@tdl.core.OutputValue
def value(self, _):
'''Sample from the output distribution.'''
return self.affine.sample()
def call(self, inputs, *args, **kargs):
return type(self).Output(model=self, inputs=inputs, *args, **kargs)
class AffineNormalLayer(LinearNormalLayer):
@tdl.core.ParameterInit
def bias(self, initializer=None, trainable=True, **kargs):
tdl.core.assert_initialized(self, 'bias', ['units'])
if initializer is None:
initializer = tf.keras.initializers.zeros()
return self.add_weight(name='bias', initializer=initializer,
shape=[self.units], trainable=trainable,
**kargs)
class Output(LinearNormalLayer.Output):
def _loc(self):
kernel = self.model.kernel
bias = self.model.bias
loc = tf.linalg.LinearOperatorFullMatrix(kernel.loc)\
.matvec(self.inputs, adjoint=True)
if bias is not None:
loc = loc + bias
return loc
class DenseNormalLayer(AffineNormalLayer):
@tdl.core.InputArgument
def activation(self, value):
if value is not None:
if not callable(value):
raise ValueError('activation function must be callable')
return value
def __init__(self, activation=tf.nn.relu, name=None, **kargs):
super(DenseNormalLayer, self).__init__(
activation=activation, name=name, **kargs)
class Output(AffineNormalLayer.Output):
@property
def value(self):
activation = self.model.activation
samples = self.affine.sample()
return (samples if activation is None
else activation(samples))
class BayesianMlp(tdl.StackedModel):
''' Mlp composed of layers whose weights are sampled from a variational
posterior distribution
'''
@property
def n_inputs(self):
''' size of the input vectors '''
return self.layers[0].input_shape[-1]
@property
def n_outputs(self):
''' size of the output vectors '''
return self.layers[-1].units
@property
def kernels(self):
''' list the weights distributions from the layers '''
return [layer.kernel for layer in self.layers
if hasattr(layer, 'kernel')]
@property
def parameters(self):
return [pi for layer in self.layers for pi in layer.parameters]
_HiddenClass = DenseNormalLayer
_OutputClass = AffineNormalLayer
def _define_layers(self, n_inputs, n_outputs, n_hidden, afunction):
layers = list()
Layers = [self._HiddenClass] * len(n_hidden) + [self._OutputClass]
if not isinstance(afunction, list):
afunction = [afunction for i in range(len(n_hidden))] + [None]
_n_inputs = n_inputs
for l, n_units in enumerate(n_hidden + [n_outputs]):
if afunction[l] is not None:
layers.append(Layers[l](input_shape=_n_inputs,
units=n_units,
activation=afunction[l]))
else:
layers.append(Layers[l](input_shape=_n_inputs,
units=n_units))
_n_inputs = n_units
return layers
@tdl.Submodel
def layers(self, value):
layers = self._define_layers(*value)
return layers
@common.Regularizer
def regularizer(self, prior_stddev=None):
with tf.name_scope(self.scope):
with tf.name_scope('regularizer'):
reg = [(layer.regularizer.value if layer.regularizer.is_set
else layer.regularizer.init(prior_stddev))
for layer in self.layers
if hasattr(layer, 'regularizer')]
if reg:
reg = (reg[0] if len(reg) == 1
else tdl.losses.AddNLosses(reg))
else:
raise AttributeError(
'None of the Layers has a regularizer defined')
return reg
class BayesMlpOutput(tdl.core.OutputModel):
@property
def kernels(self):
return self.model.kernels
@property
def shape(self):
return self.value.shape
@tdl.ModelMethod(['output', 'hidden', 'value'], ['inputs'], BayesMlpOutput)
def evaluate(self, object, inputs):
x = inputs
hidden = list()
for layer in self.layers:
if isinstance(x, (tdl.common.TdlOp)):
x = layer(x.value)
else:
x = layer(x)
hidden.append(x)
y = hidden[-1]
if hasattr(y, 'value'):
value = y.value
else:
value = None
return y, hidden, value
def _init_options(self, options):
layers_default = {'w/stddev/init_method': 'sum',
'w/stddev/alpha': 1.0,
'w/stddev/trainable': True}
default = {'layers/options': layers_default}
options = tdl.common.check_defaults(options, default)
options = super(BayesianMlp, self)._init_options(options)
return options
def __init__(self, n_inputs, n_outputs, n_hidden,
afunction=tdlf.selu01, options=None,
name='BayesianMlp'):
super(BayesianMlp, self).__init__(
layers=(n_inputs, n_outputs, n_hidden, afunction),
options=options, name=name)
class BernoulliBayesianMlp(BayesianMlp):
_HiddenClass = DenseBernoulliLayer
_OutputClass = AffineBernoulliLayer
def _define_layers(self, n_inputs, n_outputs, n_hidden,
keep_prob, afunction):
layers = list()
Layers = [self._HiddenClass] * len(n_hidden) + [self._OutputClass]
if not isinstance(afunction, list):
afunction = [afunction for i in range(len(n_hidden))] + [None]
if not isinstance(keep_prob, list):
if len(n_hidden) == 0:
keep_prob = [keep_prob]
else:
keep_prob = [None] + [keep_prob for i in range(len(n_hidden))]
_n_inputs = n_inputs
for l, n_units in enumerate(n_hidden + [n_outputs]):
if afunction[l] is not None:
layers.append(Layers[l](input_shape=_n_inputs,
units=n_units,
activation=afunction[l],
keep_prob=keep_prob[l]))
else:
layers.append(Layers[l](input_shape=_n_inputs,
units=n_units,
keep_prob=keep_prob[l]))
_n_inputs = n_units
return layers
def __init__(self, n_inputs, n_outputs, n_hidden, keep_prob=0.8,
options=None, afunction=tf.nn.relu,
name='BernoulliBayesianMlp'):
tdl.StackedModel.__init__(
self,
layers=(n_inputs, n_outputs,
n_hidden, keep_prob, afunction),
options=options, name=name)
class BoundedBayesianMlp(BayesianMlp):
''' Multi-layer bayesian neural network, with bounded output '''
@tdl.Submodel
def layers(self, value):
try:
n_inputs, n_outputs, n_hidden, lower, upper, afunction = value
except:
raise AttributeError('Wrong format for initializing layers. '
'Format should be: '
'n_inputs, n_outputs, n_hidden, lower, '
'upper, afunction')
layers = self._define_layers(n_inputs, n_outputs, n_hidden, afunction)
layers.append(tdl.feedforward.BoundedOutput(lower=lower, upper=upper))
return layers
def __init__(self, n_inputs, n_outputs, n_hidden,
lower=1e-7, upper=None,
afunction=tdlf.selu01, options=None,
name='BayesianMlp'):
super(BayesianMlp, self).__init__(
layers=(n_inputs, n_outputs, n_hidden, lower, upper, afunction),
options=options, name=name)
class BoundedBernoulliBayesianMlp(BernoulliBayesianMlp):
''' Multi-layer bayesian neural network, with bounded output '''
@tdl.Submodel
def layers(self, value):
try:
(n_inputs, n_outputs, n_hidden, keep_prob,
lower, upper, afunction) = value
except:
raise AttributeError('Wrong format for initializing layers. '
'Format should be: '
'n_inputs, n_outputs, n_hidden, lower, '
'upper, afunction')
layers = self._define_layers(n_inputs, n_outputs, n_hidden,
keep_prob, afunction)
layers.append(tdl.feedforward.BoundedOutput(lower=lower, upper=upper))
return layers
def __init__(self, n_inputs, n_outputs, n_hidden, keep_prob,
lower=1e-7, upper=None, afunction=tdlf.selu01,
options=None, name='BayesianMlp'):
super(BayesianMlp, self).__init__(
layers=(n_inputs, n_outputs, n_hidden, keep_prob,
lower, upper, afunction),
options=options, name=name)
class Normal(tdl.TdlModel):
_submodels = ['loc', 'scale']
@common.SubmodelWithArgs
def loc(self, value, shape):
if (value is None) and (shape is not None):
value = tf.Variable(tf.zeros(shape=shape), trainable=True)
if (value is None) and (shape is None):
raise ValueError('You must provide either a value for loc '
'or a shape to create a variable')
if isinstance(value, (int, float)) and (shape is not None):
value = tdl.variable(tf.constant(value, shape=shape),
trainable=True)
return value
@common.SubmodelWithArgs
def scale(self, value, shape):
def get_value(d):
return (d if isinstance(d, int)
else 1 if d is None
else d.value if hasattr(d, 'value')
else d)
def replace_none(shape_in):
return [get_value(d) for d in shape_in]
shape = (replace_none(shape) if shape is not None
else (replace_none(self.loc.shape)
if hasattr(self.loc, 'shape')
else None))
def initializer(initial_value):
return tf.random_normal(shape=shape,
mean=initial_value,
stddev=0.00001)
if (value is None) and (shape is not None):
value = tdl.common.PositiveVariableExp(initializer=initializer,
initial_value=1.0,
trainable=True)
if isinstance(value, (int, float)) and (shape is not None):
value = tdl.common.PositiveVariableExp(initializer=initializer,
initial_value=value,
trainable=True)
if value is None:
raise ValueError('Unable to identify the shape for scale. '
'Provide shape to create a variable or '
'directly specify the scale.')
return value
@common.Regularizer
def regularizer(self, loc_scale=None, scale_scale=None):
''' Returns a sum of the loc and scale regularizers '''
with tf.name_scope(self.scope):
reg = None
if hasattr(self.loc, 'regularizer'):
reg = (self.loc.regularizer.value
if self.loc.regularizer.is_set
else self.loc.regularizer.init(loc_scale))
if hasattr(self.scale, 'regularizer'):
scale_reg = (self.scale.regularizer.value
if self.scale.regularizer.is_set
else self.scale.regularizer.init(scale_scale))
reg = (scale_reg if reg is None
else reg + scale_reg)
return reg
class NormalOutput(McNormal):
@common.InputArgument
def inputs(self, value):
return value
@common.Submodel
def loc(self, value):
if callable(self.model.loc):
return self.model.loc(self.inputs)
else:
return self.model.loc
@common.Submodel
def scale(self, value):
if callable(self.model.scale):
return self.model.scale(self.inputs)
else:
return self.model.scale
def __init__(self, model, inputs, name=None):
self.model = model
super(Normal.NormalOutput, self).__init__(
loc=None, scale=None, inputs=inputs, name=name)
def evaluate(self, inputs=None, name=None):
return type(self).NormalOutput(self, inputs=inputs, name=name)
def __call__(self, inputs, name=None):
return self.evaluate(inputs=inputs, name=name)
def __init__(self, loc=None, scale=None, shape=None,
name='Normal', **kargs):
super(Normal, self).__init__(
loc={'value': loc, 'shape': shape},
scale={'value': scale, 'shape': shape},
name=name, **kargs)
class ConditionalNormal(Normal):
_submodels = ['loc', 'scale']
NormalOutput = Normal.NormalOutput
@common.SubmodelWithArgs
def loc(self, value, shape):
if value is None:
value = tdl.common.Identity
return value
def __init__(self, loc=None, scale=None, shape=None,
name='ConditionalNormal'):
super(ConditionalNormal, self).__init__(loc=loc, scale=scale,
shape=shape,
name=name)
class NormalMlp(ConditionalNormal):
class NormalOutput(Normal.NormalOutput):
@property
def n_inputs(self):
return self.model.n_inputs
@property
def n_outputs(self):
return self.model.n_inputs
@common.InputArgument
def inputs(self, value):
if value is None:
value = tf.placeholder(tf.float32)
return value
class McNormalOutput(NormalOutput):
@property
def n_particles(self):
return self._n_particles
@common.InputArgument
def inputs(self, value):
if value is None:
value = tf.placeholder(tf.float32, shape=[1, self.n_inputs])
if not isinstance(value, Particles):
value = Particles(n_particles=self.n_particles, base=value)
return value
def __init__(self, model, n_particles, inputs, name=None):
self._n_particles = n_particles
super(NormalMlp.McNormalOutput, self).__init__(
model=model, inputs=inputs, name=name)
@property
def n_inputs(self):
return self.loc.n_inputs
@property
def n_outputs(self):
return self.loc.n_outputs
@common.SubmodelWithArgs
def loc(self, LocClass, loc_args):
if LocClass is None:
return super(type(self), type(self)).loc.finit(
self, value=None, shape=[1, self.n_inputs])
return LocClass(**loc_args)
@common.SubmodelWithArgs
def scale(self, ScaleClass, scale_args):
if ScaleClass is None:
return super(type(self), type(self)).scale.finit(
self, value=None, shape=[1, self.n_inputs])
scale_args.setdefault('n_inputs', self.loc.n_inputs)
scale_args.setdefault('n_outputs', self.loc.n_inputs)
return ScaleClass(**scale_args)
def __init__(self, loc_args, scale_args={},
LocClass=BayesianMlp, ScaleClass=None,
options=None, name='GaussianMlp'):
tdl.TdlModel.__init__(
self, loc={'LocClass': LocClass, 'loc_args': loc_args},
scale={'ScaleClass': ScaleClass, 'scale_args': scale_args},
name=name, options=options)
def mc_evaluate(self, n_particles, x=None, name=None):
return NormalMlp.McNormalOutput(self, n_particles, inputs=x, name=name)
class HeteroscedasticNormalMlp(NormalMlp):
''' Defines a conditional gaussian
N(loc=BayesianMlp(x), scale=BoundedOutputBayesianMlp(x)) '''
def __init__(self, loc_args, scale_args,
LocClass=BayesianMlp, ScaleClass=BoundedBayesianMlp,
options=None, name='HeteroscedasticGaussianMlp'):
super(HeteroscedasticNormalMlp, self)\
.__init__(loc_args, scale_args,
LocClass, ScaleClass,
options=options, name=name)
class Particles(common.TdlModel):
@property
def n_particles(self):
return self._n_particles
@property
def base_shape(self):
return self._base_shape
@property
def value(self):
return self._value
@common.SimpleParameter
def base(self, value):
if value is None:
return tf.Variable(tf.random_normal(shape=self.base_shape))
else:
return value
def _evaluate(self):
value = tf.tile(self.base, multiples=[self.n_particles, 1])
return value
def __init__(self, n_particles, base=None, shape=None,
name='Particles', **kargs):
self._n_particles = n_particles
self._base_shape = (base.shape if base is not None
else shape)
super(Particles, self).__init__(name=name, base=base, **kargs)
with tf.name_scope(self.scope):
self._value = self._evaluate()
@tdl.core.create_init_docstring
class ParticlesLayer(tdl.core.Layer):
@tdl.core.InputArgument
def input_shape(self, value):
'''Input tensor shape.'''
if value is None:
raise tdl.core.exceptions.ArgumentNotProvided(self)
if not isinstance(value, tf.TensorShape):
value = tf.TensorShape(value)
return value
@tdl.core.InputArgument
def particles(self, value):
'''Number of particles.'''
if value is None:
raise tdl.core.exceptions.ArgumentNotProvided(self)
return value
def compute_output_shape(self, input_shape=None):
if input_shape is None:
tdl.core.assert_initialized(
self, 'copute_output_shape',
['input_shape', 'particles'])
input_shape = self.input_shape
input_shape = tf.TensorShape(input_shape).as_list()
return tf.TensorShape([self.particles] + input_shape)
def call(self, inputs):
try:
inputs = tf.convert_to_tensor(inputs)
except TypeError:
return inputs.sample(sample_shape=[self.particles])
multiples = [self.particles] + [1]*inputs.shape.ndims
return tf.tile(inputs[tf.newaxis, ...],
multiples)
class McNormalEstimate(tdl.core.Layer):
@tdl.core.InputArgument
def input_shape(self, value):
'''Input tensor shape.'''
if value is None:
raise tdl.core.exceptions.ArgumentNotProvided(self)
return tf.TensorShape(value)
@tdl.core.InputArgument
def sample_dim(self, value):
'''Sample dimension across which mean and stddev are computed.'''
if value is None:
value = 0
return value
def call(self, inputs):
inputs = tf.convert_to_tensor(inputs)
if tdl.core.is_property_initialized(self, 'input_shape'):
assert self.input_shape.is_compatible_with(inputs.shape)
else:
self.input_shape = inputs.shape
# mean
loc = tf.reduce_mean(inputs, axis=self.sample_dim)
# stddev
N = tf.cast(tf.shape(inputs)[0], tf.float32)
diff = (inputs - loc)**2
sample_variance = (tf.reduce_sum(diff, axis=self.sample_dim)
/ (N - 1))
scale = tf.sqrt(sample_variance)
return tfp.distributions.Normal(loc=loc, scale=scale)
| 2.25
| 2
|
scrapers/NOW-norwich/councillors.py
|
DemocracyClub/LGSF
| 4
|
12778367
|
<filename>scrapers/NOW-norwich/councillors.py
from lgsf.councillors.scrapers import CMISCouncillorScraper
class Scraper(CMISCouncillorScraper):
base_url = "https://cmis.norwich.gov.uk/live/Councillors.aspx"
| 1.523438
| 2
|
src/masoniteorm/commands/MakeObserverCommand.py
|
yubarajshrestha/orm
| 0
|
12778368
|
<filename>src/masoniteorm/commands/MakeObserverCommand.py
import os
import pathlib
from cleo import Command
from inflection import camelize, underscore
class MakeObserverCommand(Command):
"""
Creates a new observer file.
observer
{name : The name of the observer}
{--m|model=None : The name of the model}
"""
def handle(self):
name = self.argument("name")
model = self.option("model")
if model == "None":
model = name
observer_directory = "app/observers"
with open(
os.path.join(
pathlib.Path(__file__).parent.absolute(), f"stubs/observer.stub"
)
) as fp:
output = fp.read()
output = output.replace("__CLASS__", camelize(name))
output = output.replace("__MODEL_VARIABLE__", underscore(model))
output = output.replace("__MODEL__", camelize(model))
file_name = f"{camelize(name)}Observer.py"
full_directory_path = os.path.join(os.getcwd(), observer_directory)
if os.path.exists(os.path.join(full_directory_path, file_name)):
self.line(
f'<error>Observer "{name}" Already Exists ({full_directory_path}/{file_name})</error>'
)
return
os.makedirs(os.path.join(full_directory_path), exist_ok=True)
with open(os.path.join(os.getcwd(), observer_directory, file_name), "w+") as fp:
fp.write(output)
self.info(f"Observer created: {file_name}")
| 2.4375
| 2
|
sdk/python/pulumi_azure_native/documentdb/__init__.py
|
pulumi-bot/pulumi-azure-native
| 0
|
12778369
|
<gh_stars>0
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from ._enums import *
from .cassandra_cluster import *
from .cassandra_data_center import *
from .cassandra_resource_cassandra_keyspace import *
from .cassandra_resource_cassandra_table import *
from .database_account import *
from .database_account_cassandra_keyspace import *
from .database_account_cassandra_table import *
from .database_account_gremlin_database import *
from .database_account_gremlin_graph import *
from .database_account_mongo_db_collection import *
from .database_account_mongo_db_database import *
from .database_account_sql_container import *
from .database_account_sql_database import *
from .database_account_table import *
from .get_cassandra_cluster import *
from .get_cassandra_data_center import *
from .get_cassandra_resource_cassandra_keyspace import *
from .get_cassandra_resource_cassandra_table import *
from .get_database_account import *
from .get_database_account_cassandra_keyspace import *
from .get_database_account_cassandra_table import *
from .get_database_account_gremlin_database import *
from .get_database_account_gremlin_graph import *
from .get_database_account_mongo_db_collection import *
from .get_database_account_mongo_db_database import *
from .get_database_account_sql_container import *
from .get_database_account_sql_database import *
from .get_database_account_table import *
from .get_gremlin_resource_gremlin_database import *
from .get_gremlin_resource_gremlin_graph import *
from .get_mongo_db_resource_mongo_db_collection import *
from .get_mongo_db_resource_mongo_db_database import *
from .get_notebook_workspace import *
from .get_private_endpoint_connection import *
from .get_sql_resource_sql_container import *
from .get_sql_resource_sql_database import *
from .get_sql_resource_sql_role_assignment import *
from .get_sql_resource_sql_role_definition import *
from .get_sql_resource_sql_stored_procedure import *
from .get_sql_resource_sql_trigger import *
from .get_sql_resource_sql_user_defined_function import *
from .get_table_resource_table import *
from .gremlin_resource_gremlin_database import *
from .gremlin_resource_gremlin_graph import *
from .list_database_account_connection_strings import *
from .list_database_account_keys import *
from .list_notebook_workspace_connection_info import *
from .mongo_db_resource_mongo_db_collection import *
from .mongo_db_resource_mongo_db_database import *
from .notebook_workspace import *
from .private_endpoint_connection import *
from .sql_resource_sql_container import *
from .sql_resource_sql_database import *
from .sql_resource_sql_role_assignment import *
from .sql_resource_sql_role_definition import *
from .sql_resource_sql_stored_procedure import *
from .sql_resource_sql_trigger import *
from .sql_resource_sql_user_defined_function import *
from .table_resource_table import *
from ._inputs import *
from . import outputs
# Make subpackages available:
from . import (
latest,
v20150401,
v20150408,
v20151106,
v20160319,
v20160331,
v20190801,
v20190801preview,
v20191212,
v20200301,
v20200401,
v20200601preview,
v20200901,
v20210115,
v20210301preview,
)
def _register_module():
import pulumi
from .. import _utilities
class Module(pulumi.runtime.ResourceModule):
_version = _utilities.get_semver_version()
def version(self):
return Module._version
def construct(self, name: str, typ: str, urn: str) -> pulumi.Resource:
if typ == "azure-native:documentdb:CassandraCluster":
return CassandraCluster(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:CassandraDataCenter":
return CassandraDataCenter(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:CassandraResourceCassandraKeyspace":
return CassandraResourceCassandraKeyspace(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:CassandraResourceCassandraTable":
return CassandraResourceCassandraTable(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:DatabaseAccount":
return DatabaseAccount(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:DatabaseAccountCassandraKeyspace":
return DatabaseAccountCassandraKeyspace(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:DatabaseAccountCassandraTable":
return DatabaseAccountCassandraTable(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:DatabaseAccountGremlinDatabase":
return DatabaseAccountGremlinDatabase(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:DatabaseAccountGremlinGraph":
return DatabaseAccountGremlinGraph(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:DatabaseAccountMongoDBCollection":
return DatabaseAccountMongoDBCollection(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:DatabaseAccountMongoDBDatabase":
return DatabaseAccountMongoDBDatabase(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:DatabaseAccountSqlContainer":
return DatabaseAccountSqlContainer(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:DatabaseAccountSqlDatabase":
return DatabaseAccountSqlDatabase(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:DatabaseAccountTable":
return DatabaseAccountTable(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:GremlinResourceGremlinDatabase":
return GremlinResourceGremlinDatabase(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:GremlinResourceGremlinGraph":
return GremlinResourceGremlinGraph(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:MongoDBResourceMongoDBCollection":
return MongoDBResourceMongoDBCollection(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:MongoDBResourceMongoDBDatabase":
return MongoDBResourceMongoDBDatabase(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:NotebookWorkspace":
return NotebookWorkspace(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:PrivateEndpointConnection":
return PrivateEndpointConnection(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:SqlResourceSqlContainer":
return SqlResourceSqlContainer(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:SqlResourceSqlDatabase":
return SqlResourceSqlDatabase(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:SqlResourceSqlRoleAssignment":
return SqlResourceSqlRoleAssignment(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:SqlResourceSqlRoleDefinition":
return SqlResourceSqlRoleDefinition(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:SqlResourceSqlStoredProcedure":
return SqlResourceSqlStoredProcedure(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:SqlResourceSqlTrigger":
return SqlResourceSqlTrigger(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:SqlResourceSqlUserDefinedFunction":
return SqlResourceSqlUserDefinedFunction(name, pulumi.ResourceOptions(urn=urn))
elif typ == "azure-native:documentdb:TableResourceTable":
return TableResourceTable(name, pulumi.ResourceOptions(urn=urn))
else:
raise Exception(f"unknown resource type {typ}")
_module_instance = Module()
pulumi.runtime.register_resource_module("azure-native", "documentdb", _module_instance)
_register_module()
| 1.078125
| 1
|
Algorithm/Array/985. Sum of Even Numbers After Queries.py
|
smsubham/Data-Structure-Algorithms-Questions
| 0
|
12778370
|
<filename>Algorithm/Array/985. Sum of Even Numbers After Queries.py
#https://leetcode.com/problems/sum-of-even-numbers-after-queries/
# Time Complexity: O(N+Q) where N is the length of A and Q is the number of queries.
#Space Complexity: O(Q)
class Solution:
def sumEvenAfterQueries(self, nums: List[int], queries: List[List[int]]) -> List[int]:
queryEvenSum = []
S = sum(x for x in nums if x%2==0)
#print(S)
for value,index in queries:
#remove current number from sum
if nums[index] % 2 == 0:
S -= nums[index]
nums[index] += value
#add updated value at index if its even.
if nums[index] %2 == 0:
S += nums[index]
queryEvenSum.append(S)
return queryEvenSum
| 3.765625
| 4
|
kattiskitten/language_detector.py
|
FelixDQ/kattis-kitten
| 0
|
12778371
|
import glob
import re
import pkgutil
import kattiskitten.languages as languages
SUPPORTED_LANGUAGES = []
LANGUAGE_EXTENSIONS = {}
CONFIGS = {}
for importer, language, ispkg in pkgutil.iter_modules(languages.__path__):
SUPPORTED_LANGUAGES.append(language)
config = importer.find_module(language).load_module(language)
LANGUAGE_EXTENSIONS[config.file_extension] = language
CONFIGS[language] = config
def get_config(language):
if language not in CONFIGS:
raise ValueError(f"Language not supported. Supported languages are: {', '.join(SUPPORTED_LANGUAGES)}")
return CONFIGS[language]
def determine_language(problem):
solution = glob.glob(f"./{problem}/solution.*")
if len(solution) < 1:
raise ValueError("Couldn't find any program matching patten (solution.*)")
if len(solution) > 1:
raise ValueError(
"Found more than one program matching patten (solution.*). It currently only supports one")
m = re.search(r".*\.(.+?)$", solution[0])
if m:
extension = m.group(1)
language = LANGUAGE_EXTENSIONS[extension]
if not language:
raise ValueError(
f"Couldn't find supported language with extension {extension}")
return language
| 2.703125
| 3
|
powernad/Object/AdKeyword/RequestObject/CreateAdKeywordObject.py
|
devkingsejong/python---PowerNad
| 34
|
12778372
|
class CreateAdKeywordObject:
def __init__(self, keyword):
self.bidAmt = None
self.customerId = None
self.keyword = keyword
self.useGroupBidAmt = None
self.userLock = None
| 2.203125
| 2
|
model_function_tests/fr/test_rule_based_tagger.py
|
UCREL/pymusas-models
| 0
|
12778373
|
import spacy
from spacy.tokens import Doc
from spacy.vocab import Vocab
TEST_TOKENS = ['Une', 'banque', 'est', 'une', 'institution', 'financière', '.', '5']
TEST_POS = ['DET', 'NOUN', 'AUX', 'DET', 'NOUN', 'ADJ', 'PUNCT', 'NUM']
TEST_SPACES = [True] * len(TEST_TOKENS)
def test_single_UPOS_contextual() -> None:
french_model = spacy.load("fr_single_upos2usas_contextual")
doc = Doc(Vocab(), words=TEST_TOKENS, spaces=TEST_SPACES, pos=TEST_POS)
output = french_model(doc)
expected_output = [
['Z5'],
['I1.1', 'X2.6+', 'M1', 'I1/H1', 'I1.1/I2.1c', 'W3/M4', 'A9+/H1', 'O2', 'M6'],
['M6'],
['Z5'],
['S5+c', 'S7.1+', 'H1c', 'S1.1.1', 'T2+'],
['Z99'],
['PUNCT'],
['N1']
]
assert len(expected_output) == len(output)
for token_index, token in enumerate(output):
assert expected_output[token_index] == token._.pymusas_tags
assert [(token_index, token_index + 1)] == token._.pymusas_mwe_indexes
| 2.6875
| 3
|
arrays_tricks.py
|
dremdem/pythons_handy_stuffs
| 0
|
12778374
|
<reponame>dremdem/pythons_handy_stuffs
a = [1, 3, 4, 5]
a.insert(1, 2)
print(a)
| 2.828125
| 3
|
exercises/zh/solution_03_09_01.py
|
Jette16/spacy-course
| 2,085
|
12778375
|
from spacy.lang.zh import Chinese
from spacy.tokens import Token
nlp = Chinese()
# 注册词符的扩展属性"is_country",其默认值是False
Token.set_extension("is_country", default=False)
# 处理文本,将词符"新加坡"的is_country属性设置为True
doc = nlp("我住在新加坡。")
doc[3]._.is_country = True
# 对所有词符打印词符文本及is_country属性
print([(token.text, token._.is_country) for token in doc])
| 2.984375
| 3
|
python/matching-brackets/matching_brackets.py
|
tamireinhorn/exercism
| 0
|
12778376
|
<gh_stars>0
OPENINGS_DICT = {'}': '{', ')': '(', ']': '['}
CLOSINGS = list(OPENINGS_DICT.keys())
OPENINGS = list(OPENINGS_DICT.values())
def is_paired(input_string):
# The gist of this is, you build a stack of the openings:, like (, [, {.
openings_stack = []
for element in input_string:
if element in OPENINGS:
openings_stack.append(element)
elif element in CLOSINGS:
# When you have a closing element, you expect it match the closing of the last item in the stack
if (openings_stack) and (openings_stack[-1] == OPENINGS_DICT.get(element)):
openings_stack.pop() # Then you remove the item from the stack
else:
return False # If this fails even once, it's unbalanced.
return len(openings_stack) == 0 # If we emptied the stack, we have a balanced string.
| 3.6875
| 4
|
python/wordSim.py
|
jfmyers/String-Similarity
| 3
|
12778377
|
from charPairs import CharPairs
from decimal import *
#Word Similarity Algorithm
#Similarity(string1, string2) = 2 * number of incommon char. pairs / sum of total number of char. pairs in each string
class similarity:
def __init__(self,string1, string2):
#get character pairs for string1
strChar1 = CharPairs(string1)
self.charPair1 = strChar1.getCharPairs()
self.charPair1Count = strChar1.getCharPairCount()
self.string1 = string1.lower()
#get character pairs for string2
strChar2 = CharPairs(string2)
self.charPair2 = strChar2.getCharPairs()
self.charPair2Count = strChar2.getCharPairCount()
self.string2 = string2.lower()
#run steps
self.find_in_common_char_pairs()
self.calculate_similarity()
def find_in_common_char_pairs(self):
self.incommon = set(self.charPair1).intersection(self.charPair2)
self.incommon_count = 0
for i in self.incommon:
self.incommon_count += 1
def calculate_similarity(self):
numerator = 2 * self.incommon_count
denominator = self.charPair1Count + self.charPair2Count
getcontext().prec = 4
self.sim = Decimal(numerator) / Decimal(denominator)
def get_sim(self):
return self.sim
| 3.640625
| 4
|
generic_api/generics/entity.py
|
guestready/generic_api
| 1
|
12778378
|
class GenericEntity:
def __init__(self, *args, **kwargs):
pass
def is_valid(self):
raise NotImplementedError
@property
def data(self):
raise NotImplementedError
| 2.296875
| 2
|
test/test_prompts.py
|
arpansahoo/wikipedia-speedruns
| 12
|
12778379
|
<reponame>arpansahoo/wikipedia-speedruns
import enum
import pytest
PROMPTS = [
{
"start" : "Johns Hopkins University",
"end" : "Baltimore",
},
{
"start" : "A",
"end" : "B",
},
]
@pytest.fixture()
def prompt_set(cursor):
query = "INSERT INTO sprint_prompts (prompt_id, start, end) VALUES (%s, %s, %s);"
ps = [(i, p["start"], p["end"]) for i, p in enumerate(PROMPTS)]
cursor.executemany(query, ps)
yield
cursor.execute("DELETE FROM sprint_prompts")
def test_create_prompt(client, cursor, admin_session):
response = client.post("/api/sprints/", json=PROMPTS[0])
assert response.status_code == 200
cursor.execute("SELECT start, end FROM sprint_prompts")
assert cursor.fetchone() == PROMPTS[0]
cursor.execute("DELETE FROM sprint_prompts")
def test_create_no_admin(client, cursor):
response = client.post("/api/sprints/", json=PROMPTS[0])
assert response.status_code == 401
cursor.execute("SELECT start, end FROM sprint_prompts")
assert cursor.fetchone() is None
def test_delete(client, cursor, prompt_set, admin_session):
# Try deleting id 1, which should be inserted
id = 1
response = client.delete(f"/api/sprints/{id}")
assert response.status_code == 200
cursor.execute("SELECT start, end FROM sprint_prompts WHERE prompt_id=%s", (id, ))
assert cursor.fetchone() is None
def test_delete_nonexistent(client, cursor, prompt_set, admin_session):
# Try deleting id 1, which should be inserted
id = len(PROMPTS)
response = client.delete(f"/api/sprints/{id}")
assert response.status_code == 404
def test_delete_no_admin(client, cursor, prompt_set):
id = 1
response = client.delete(f"/api/sprints/{id}")
assert response.status_code == 401
cursor.execute("SELECT start, end FROM sprint_prompts WHERE prompt_id=%s", (id, ))
assert cursor.fetchone() == PROMPTS[id]
| 2.1875
| 2
|
setup.py
|
jhgg/jeev
| 18
|
12778380
|
import os
import jeev
from setuptools import setup, find_packages
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="jeev",
version=jeev.version.split('-')[0] + 'b0',
author="<NAME>",
author_email="<EMAIL>",
description="A simple chat bot, at your service.",
license="MIT",
keywords="chat slack bot irc jeev",
url="https://github.com/jhgg/jeev",
packages=find_packages(exclude=['modules']),
install_requires=[
'certifi==14.5.14',
'coloredlogs==1.0.1',
'cssselect==0.9.1',
'Flask==0.10.1',
'geopy==1.1.3',
'gevent==1.0.2',
'greenlet==0.4.7',
'humanfriendly==1.27',
'itsdangerous==0.24',
'Jinja2==2.7.3',
'lxml==3.3.6',
'MarkupSafe==0.23',
'pytz==2014.4',
'requests==2.7.0',
'six==1.9.0',
'slackclient==0.15',
'websocket-client==0.32.0',
'Werkzeug==0.9.6',
'wheel==0.24.0',
],
include_package_data=True,
zip_safe=False,
scripts=['bin/jeev'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 4 - Beta",
"Topic :: Communications :: Chat",
"Topic :: Utilities",
"Framework :: Flask",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 2 :: Only",
"License :: OSI Approved :: MIT License",
],
)
| 1.648438
| 2
|
hipotenusa/__init__.py
|
agucova/cs42
| 0
|
12778381
|
<reponame>agucova/cs42
import check50
@check50.check()
def triangulo_1():
"""triangulo_1"""
check50.run("python3 hipotenusa.py").stdin("3\n4", prompt=False).stdout("Hipotenusa: 5", regex=False).exit(0)
@check50.check()
def triangulo_2():
"""triangulo_2"""
check50.run("python3 hipotenusa.py").stdin("6\n8", prompt=False).stdout("Hipotenusa: 10", regex=False).exit(0)
@check50.check()
def triangulo_3():
"""triangulo_3"""
check50.run("python3 hipotenusa.py").stdin("5\n12", prompt=False).stdout("Hipotenusa: 13", regex=False).exit(0)
@check50.check()
def triangulo_4():
"""triangulo_4"""
check50.run("python3 hipotenusa.py").stdin("120\n209", prompt=False).stdout("Hipotenusa: 241", regex=False).exit(0)
@check50.check()
def triangulo_5():
"""triangulo_5"""
check50.run("python3 hipotenusa.py").stdin("32\n255", prompt=False).stdout("Hipotenusa: 257", regex=False).exit(0)
@check50.check()
def triangulo_6():
"""triangulo_6"""
check50.run("python3 hipotenusa.py").stdin("160\n231", prompt=False).stdout("Hipotenusa: 281", regex=False).exit(0)
@check50.check()
def triangulo_7():
"""triangulo_7"""
check50.run("python3 hipotenusa.py").stdin("68\n285", prompt=False).stdout("Hipotenusa: 293", regex=False).exit(0)
@check50.check()
def triangulo_8():
"""triangulo_8"""
check50.run("python3 hipotenusa.py").stdin("115\n252", prompt=False).stdout("Hipotenusa: 277", regex=False).exit(0)
| 3
| 3
|
misc/permissions.py
|
jokedurnez/Psychosis
| 0
|
12778382
|
#!/usr/bin/python2
import os
for par, dirs, files in os.walk(os.environ.get("BIDSDIR")):
print(par)
if par.startswith(os.path.join(os.environ.get("BIDSDIR"),'derivatives/')):
for d in dirs:
os.chmod(par + '/' + d, 0770)
for f in files:
os.chmod(par + '/' + f, 0660)
else:
for d in dirs:
os.chmod(par + '/' + d, 0550)
for f in files:
os.chmod(par + '/' + f, 0440)
| 2.640625
| 3
|
day05/python/subesokun/main.py
|
matason/aoc-2018
| 17
|
12778383
|
def reactLetters(a, b):
return a.upper() == b.upper() and ((a.islower() and b.isupper()) or (b.islower() and a.isupper()))
def reactPolymer(polymer):
reacted_polymer = ''
i = 0
polymer_len = len(polymer)
while i < len(polymer):
if i < polymer_len - 1 and reactLetters(polymer[i], polymer[i + 1]):
i += 2
else:
reacted_polymer += polymer[i]
i += 1
return reacted_polymer
def getMinPolymer(polymer):
reacted_polymer = reactPolymer(polymer)
while len(reacted_polymer) != len(polymer):
polymer = reacted_polymer
reacted_polymer = reactPolymer(polymer)
return reacted_polymer
polymer = None
with open('input.txt') as input_file:
polymer=input_file.read().rstrip()
print('Solution to part 1: %s' % (len(getMinPolymer(polymer)),))
min_poly_len = 1000000
for i in range(ord('a'), ord('z')+1):
mod_polymer = polymer.replace(chr(i), '').replace(chr(i).upper(), '')
mod_poly_len = len(getMinPolymer(mod_polymer))
if mod_poly_len < min_poly_len:
min_poly_len = mod_poly_len
print('Solution to part 2: %i' % (min_poly_len,))
| 3.734375
| 4
|
gym_gathering/observations/basic_generators.py
|
NeoExtended/gym-gathering
| 0
|
12778384
|
from typing import Tuple
import gym
import numpy as np
from gym_gathering.observations.base_observation_generator import ObservationGenerator
class SingleChannelObservationGenerator(ObservationGenerator):
def __init__(
self,
maze: np.ndarray,
random_goal: bool,
goal_range: int,
noise: float = 0.0,
noise_type: str = "gauss",
static_noise: float = 0.0,
static_noise_type: str = "s&p",
restrict_noise: bool = True,
):
super(SingleChannelObservationGenerator, self).__init__(
random_goal=random_goal,
goal_range=goal_range,
noise=noise,
noise_type=noise_type,
static_noise=static_noise,
static_noise_type=static_noise_type,
restrict_noise=restrict_noise,
)
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(*maze.shape, 1), dtype=np.uint8
)
def observation(self, particles: np.ndarray, goal: Tuple[int, int]):
observation = np.zeros(self.maze.shape)
observation = self.render_particles(particles, out=observation)
observation = self.generate_noise(observation)
if self.random_goal:
observation = self.render_goal(goal, out=observation)
return observation[:, :, np.newaxis] # Convert to single channel image
class MultiChannelObservationGenerator(ObservationGenerator):
def __init__(
self,
maze: np.ndarray,
random_goal: bool,
goal_range: int,
noise: float = 0.0,
noise_type: str = "gauss",
static_noise: float = 0.0,
static_noise_type: str = "s&p",
restrict_noise: bool = True,
):
super(MultiChannelObservationGenerator, self).__init__(
random_goal=random_goal,
goal_range=goal_range,
noise=noise,
noise_type=noise_type,
static_noise=static_noise,
static_noise_type=static_noise_type,
restrict_noise=restrict_noise,
)
self.n_channels = 3 if random_goal else 2
self.observation_space = gym.spaces.Box(
low=0, high=255, shape=(*maze.shape, self.n_channels), dtype=np.uint8
)
def observation(self, particles: np.ndarray, goal: Tuple[int, int]):
observation = np.zeros((*self.maze.shape, self.n_channels))
observation[:, :, 0] = self.render_maze()
particle_image = self.render_particles(particles)
particle_image = self.generate_noise(particle_image)
observation[:, :, 1] = particle_image
if self.random_goal:
observation[:, :, 2] = self.render_goal(goal)
return observation
| 2.46875
| 2
|
SLpackage/private/pacbio/pythonpkgs/pysiv2/lib/python2.7/site-packages/pysiv2/custom/test_report_metrics.py
|
fanglab/6mASCOPE
| 5
|
12778385
|
<filename>SLpackage/private/pacbio/pythonpkgs/pysiv2/lib/python2.7/site-packages/pysiv2/custom/test_report_metrics.py<gh_stars>1-10
from collections import defaultdict
from unittest import SkipTest
import operator as OP
import logging
import json
import os.path
from pbcommand.pb_io.report import load_report_from_json, dict_to_report
from pysiv2.custom.base import TestBase
from pysiv2.custom import utils as u
log = logging.getLogger(__name__)
def make_func(report_id, metric_id, ops_and_values):
def test(self):
if not self.HAVE_METRICS:
self.fail("Missing report metrics")
value = self._get_report_metric(report_id, metric_id)
n_tested = 0
for op, expected in ops_and_values:
operator = getattr(OP, op)
eqn = "%s .%s. %s" % (value, operator.__name__, expected)
log.info("Comparing values of %s: %s" % (metric_id, eqn))
self.assertTrue(operator(value, expected),
"%s: ! %s" % (metric_id, eqn))
n_tested += 1
if n_tested == 0:
raise SkipTest("No values tested")
return test
class TestReportMetrics(TestBase):
class __metaclass__(type):
def __new__(cls, classname, bases, classdict):
json_file = "test_values2.json"
if os.path.exists(json_file):
f = open(json_file, 'r')
test_values = u.unicode_to_string(json.load(f))
metric_comparisons = defaultdict(list)
classdict['report_ids'] = []
for report_id, report_d in test_values["reports"].iteritems():
classdict['report_ids'].append(report_id)
id_short = report_id.split(".")[-1]
for k,v in report_d.iteritems():
fields = k.split("__")
metric_id = fields[0]
op = "eq"
if len(fields) == 2:
op = fields[1]
metric_comparisons[metric_id].append((op, v))
for metric_id, ops_and_values in metric_comparisons.iteritems():
test_name = "test_{r}_{m}".format(r=id_short,
m=metric_id)
test_f = make_func(report_id, metric_id, ops_and_values)
classdict[test_name] = test_f
return type.__new__(cls, classname, bases, classdict)
@classmethod
def setUpClass(cls):
super(TestReportMetrics, cls).setUpClass()
cls.metric_dict = {}
cls.HAVE_METRICS = False
for rpt_id in cls.report_ids:
report = cls.getReport(rpt_id)
cls.metric_dict[rpt_id] = {a.id:a.value for a in report.attributes}
cls.HAVE_METRICS = True
@classmethod
def getReport(cls, report_id):
if cls.service_access_layer is None:
report_json = cls.datastore.get_report(report_id)
assert report_json is not None, "Can't find %s" % report_id
return load_report_from_json(report_json)
else:
# load report from services, not raw file
for rpt_info in cls.service_access_layer.get_analysis_job_reports(
cls.job_id):
file_info = rpt_info['dataStoreFile']
source_id = file_info.file_id.split("-")[0]
if source_id == report_id:
report_d = cls.service_access_layer.get_analysis_job_report_details(cls.job_id, file_info.uuid)
return dict_to_report(report_d)
raise RuntimeError("Can't find {i} report".format(i=report_id))
def _get_report_metric(self, report_id, metric_id):
return self.metric_dict[report_id][metric_id]
| 2.421875
| 2
|
Game/spriteFunc.py
|
murrayireland/Neural-Net-Game-2019
| 1
|
12778386
|
from sprites import *
import pygame
import random
import os
import subprocess
class Mixin:
#dd clouds and fire to game
def add_sprite(self,event,coOrds = None):
#Check coOrds are valid clouds and fire to game (coOrds = None is used to random generate a sprite's coOrds)
if (coOrds == None) or (coOrds[0] >= 0 and coOrds[1] >=0 and coOrds[1] < SCREEN_HEIGHT):
if event == "fire":
# Create the fire instance
detected = Fire(IMG_FIRE, SPRITE_SCALING_FIRE)
else:
#Create cloud instance
detected=Cloud(IMG_CLOUD, SPRITE_SCALING_CLOUD)
detected.damage = self.cloud_damage
detected.points = ((-161, 0), (-128.5, 26.0), (-91.5, 51.0), (-66.5, 50.0),(-11.5,50), (33.5,66), (65.5,47), (120.5,26),(144.5,-26),(133.5,-78),(-47.5,-73),(-74.5,-39), (-114.5,-20), (-128.5, -26.0))
# Position the sprite using coOrds
if coOrds != None:
detected.center_x = coOrds[0]
detected.center_y = coOrds[1]
#Randomly generate spirte's coOrds
else:
detected.center_y = random.randrange(0,SCREEN_HEIGHT )
detected.center_x = SCREEN_WIDTH + random.randrange(0,SCREEN_WIDTH)
#Add Sprite to relevant list
if event == "fire":
self.fire_list.append(detected)
else:
self.clouds_list.append(detected)
#Helper function used by NN. Adds fires based on results included in file
def add_new_data(self):
#Relevant file names
fileName = self.NNDir + "background" + str(self.background_index) + "-fire.txt"
picture = self.source[self.background_index-1]
with open(fileName) as f:
lines = f.readlines()
line = lines[-1].strip()
#Check to see if fire detected. If so, add fire sprite
if line[0] == '(':
line = eval(line, {"__builtins__": {}})
self.add_sprite("fire",(line[0] + SCREEN_WIDTH, SCREEN_HEIGHT - line[1]))
#Check if sprite is colliding with fire(trigger by CPU on update but by player on button press)
def check_fire_collison(self,sprite):
# Generate a list of all emergencies that collided with the satellite.
hit_list = arcade.check_for_collision_with_list(sprite,self.fire_list)
#Setup right sound effect
if sprite == self.player_sprite:
sound = self.player_sound
else:
sound = self.cpu_sound
# Loop through each colliding fire, remove it, and add to the sprite's score.
for fire in hit_list:
#If not testing with headless setup(no display)
if not self.Test:
sound.play()
fire.kill()
sprite.score += SCOREINC
| 3.015625
| 3
|
solution/00004-count_words.py
|
wuooyun/show-me-the-code
| 0
|
12778387
|
#!/usr/bin/env python3
from collections import OrderedDict
filepath = r"C:\Users\Yun\Downloads\python-3.9.0-docs-text\library\code.txt"
dict_words = OrderedDict()
with open(filepath,'r') as f:
words = f.read().lower().replace('\n','').split(' ')
set_words = set(words)
set_words.remove('')
for word in set_words:
dict_words[word] = words.count(word)
print(sorted(dict_words.items(), key = lambda kv:(kv[1], kv[0])))
| 3.375
| 3
|
.ipynb_checkpoints/generate_docs-checkpoint.py
|
EricCacciavillani/eFlow
| 1
|
12778388
|
<filename>.ipynb_checkpoints/generate_docs-checkpoint.py
# Import libs
import os
return getmarkdown(mod)
# Taken from utils.sys_utils
def get_all_directories_from_path(directory_path):
"""
directory_path:
Given path that already exists.
Returns:
Returns back a set a directories with the provided path.
"""
dirs_in_paths = []
for (dirpath, dirnames, filenames) in os.walk(directory_path):
dirs_in_paths.extend(dirnames)
break
return set(dirs_in_paths)
def get_all_files_from_path(directory_path,
file_extension=None):
"""
directory_path:
Given path that already exists.
file_extension:
Only return files that have a given extension.
Returns:
Returns back a set a filenames with the provided path.
"""
files_in_paths = []
for (dirpath, dirnames, filenames) in os.walk(directory_path):
if file_extension:
file_extension = file_extension.replace(".","")
for file in filenames:
if file.endswith(f'.{file_extension}'):
files_in_paths.append(file)
else:
files_in_paths.extend(filenames)
break
return set(files_in_paths)
# Get the current working directory
current_work_dir = os.getcwd()
project_dir = current_work_dir[:current_work_dir.rfind('/')] + "/eflow/"
# Get all directories from project
all_dirs = get_all_directories_from_path(project_dir)
for dir_name in all_dirs:
# Ignore any hidden files
if dir_name[0] == "_":
continue
# Ignore utils for now
if dir_name == "utils":
continue
dir_files = get_all_files_from_path(project_dir + dir_name,
"py")
print(dir_files)
for file_name in dir_files:
print(file_name)
# Ignore hidden file
if file_name[0] == "_":
continue
def_start = False
with open(f'{project_dir}{dir_name}/{file_name}') as fp:
line = fp.readline()
while line:
line = fp.readline()
if line == "":
# Create template
if "# def " in line or "#def ":
continue
if ("def " in line and "def _" not in line) or def_start:
def_start = True
if "):" in line:
def_start = False
print(line)
break
break
| 2.859375
| 3
|
tembozapp/param.py
|
fazalmajid/temboz
| 55
|
12778389
|
<reponame>fazalmajid/temboz<gh_stars>10-100
########################################################################
#
# Parameter file for Temboz
#
########################################################################
# number of RSS feeds fetched in parallel
feed_concurrency = 20
# Maximum number of articles shown
overload_threshold = 200
# feed polling interval in seconds
refresh_interval = 3600
# half-life of articles' contribution towards the SNR
decay = 90
# Whether "catch-up" links require user confirmation (default is yes)
catch_up_confirm = True
hard_purge_confirm = True
# automatic backups
# stream compression utility to use for backups
backup_compressor = ('gzip -9c', '.gz')
#backup_compressor = ('bzip2 -9c', '.bz2)
# number of daily backups to keep
daily_backups = 7
# at what time should the backup be made (default: between 3 and 4 AM)
backup_hour = 4
# garbage collection - articles flagged as "uninteresting" will have their
# content automatically dumped after this interval (but not their title or
# permalink) to make room. If this parameter is set to None or False, this
# garbage-collection will not occur
garbage_contents = 7
# garbage_contents = None
# after a longer period of time, the articles themselves are purged, assuming
# they are no longer in the feed files (otherwise they would reappear the next
# time the feed is loaded)
# Note: this needs to be set much higher than the healf life for SNR
garbage_items = 180
# garbage_items = None
# URL to use as the User-Agent when downloading feeds
temboz_url = 'https://www.temboz.com/'
# user agent shown when fetching the feeds
user_agent = 'Temboz (%s)' % temboz_url
def default_user_agent():
return user_agent
import requests
requests.utils.default_user_agent = default_user_agent
# page unauthenticated users should see
# the most common case is people checking the referrer logs on their web server
unauth_page = temboz_url
# dictionary of login/password
try:
from private import auth_dict
except:
auth_dict = {'majid': 'sopo'}
# maximum number of errors, after this threshold is reached,
# the feed is automatically suspended. -1 to unlimit
max_errors = 1000
#debug = True
debug = False
#debug_sql = True
debug_sql = False
#profile = False
# logging
import sys, os
log_filename = 'error.log'
if '--daemon' in sys.argv:
# if you modify mode and buffer size, see also update.py:cleanup
# for the code that rotates this file daily
log = open(log_filename, 'a', 0)
os.dup2(log.fileno(), 1)
os.dup2(log.fileno(), 2)
activity = open('activity.log', 'a')
else:
log = sys.stderr
activity = sys.stderr
# redirect stout and stderr to the log file
# default timeout for HTTP requests in seconds
http_timeout = 60.0
# These settings are managed in the database and will ultimately supersede
# param.py
settings = {}
| 1.820313
| 2
|
test/cli/compute-scripts/with-init-and-finalize.py
|
SabineEmbacher/xcube
| 97
|
12778390
|
<filename>test/cli/compute-scripts/with-init-and-finalize.py
# noinspection PyUnusedLocal
def compute(variable_a, variable_b, input_params=None, **kwargs):
a = input_params.get('a', 0.5)
b = input_params.get('b', 0.5)
return a * variable_a + b * variable_b
def initialize(input_cubes, input_var_names, input_params):
if len(input_cubes) != 1:
raise ValueError("Expected a single input cube")
if input_var_names and len(input_var_names) != 2:
raise ValueError("Two variables expected")
input_cube = input_cubes[0]
if not input_var_names:
if 'precipitation' not in input_cube:
raise ValueError("Cube must have 'precipitation'")
if 'soil_moisture' not in input_cube:
raise ValueError("Cube must have 'soil_moisture'")
input_var_names = ['precipitation', 'soil_moisture']
illegal_param_names = set(input_params.keys()).difference({"a", "b"})
if illegal_param_names:
raise ValueError(f"Illegal parameter(s): {illegal_param_names}")
return input_var_names, dict(a=input_params.get('a', 0.2), b=input_params.get('b', 0.3))
def finalize(output_cube):
output_cube.output.attrs.update(units='mg/m^3')
output_cube.attrs.update(comment='I has a bucket')
return output_cube
| 2.578125
| 3
|
tests/test_deprecated.py
|
cloudblue/connect-python-sdk
| 13
|
12778391
|
# -*- coding: utf-8 -*-
# This file is part of the Ingram Micro Cloud Blue Connect SDK.
# Copyright (c) 2019-2020 Ingram Micro. All Rights Reserved.
import pytest
from mock import patch
from connect.exceptions import Message
from connect.resources.base import BaseResource
from .common import Response
def test_deprecated_message():
# type: () -> None
with pytest.deprecated_call():
# noinspection PyStatementEffect
Message('Hello').message
@patch('requests.get')
def test_deprecation_filter_in(get_mock):
get_mock.return_value = Response(True, '[]', 200)
class TestResource(BaseResource):
resource = 'test'
test_resouce = TestResource()
filters = {
'deprecated__in': (1, 2)
}
with pytest.deprecated_call() as warning:
test_resouce.search(filters)
assert str(warning[0].message) == 'deprecated__in: __in operator is deprecated, Use RQL syntax'
| 2.234375
| 2
|
interlink/migrations/0001_initial.py
|
FarsetLabs/farset-nadine
| 0
|
12778392
|
<filename>interlink/migrations/0001_initial.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='IncomingMail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('origin_address', models.EmailField(max_length=75)),
('sent_time', models.DateTimeField()),
('subject', models.TextField(blank=True)),
('body', models.TextField(null=True, blank=True)),
('html_body', models.TextField(null=True, blank=True)),
('original_message', models.TextField(blank=True)),
('state', models.CharField(default=b'raw', max_length=10, choices=[(b'raw', b'raw'), (b'moderate', b'moderate'), (b'send', b'send'), (b'sent', b'sent'), (b'reject', b'reject')])),
('created', models.DateTimeField(auto_now_add=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='MailingList',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(max_length=1024)),
('description', models.TextField(blank=True)),
('subject_prefix', models.CharField(max_length=1024, blank=True)),
('is_opt_out', models.BooleanField(default=False, help_text=b'True if new users should be automatically enrolled')),
('moderator_controlled', models.BooleanField(default=False, help_text=b'True if only the moderators can send mail to the list and can unsubscribe users.')),
('email_address', models.EmailField(max_length=75)),
('username', models.CharField(max_length=1024)),
('password', models.CharField(max_length=1024)),
('pop_host', models.CharField(max_length=1024)),
('pop_port', models.IntegerField(default=995)),
('smtp_host', models.CharField(max_length=1024)),
('smtp_port', models.IntegerField(default=587)),
('throttle_limit', models.IntegerField(default=0, help_text=b'The number of recipients in 10 minutes this mailing list is limited to. Default is 0, which means no limit.')),
('moderators', models.ManyToManyField(help_text=b'Users who will be sent moderation emails', related_name=b'moderated_mailing_lists', to=settings.AUTH_USER_MODEL, blank=True)),
('subscribers', models.ManyToManyField(related_name=b'subscribed_mailing_lists', to=settings.AUTH_USER_MODEL, blank=True)),
],
options={
},
bases=(models.Model,),
),
migrations.CreateModel(
name='OutgoingMail',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('moderators_only', models.BooleanField(default=False)),
('subject', models.TextField(blank=True)),
('body', models.TextField(null=True, blank=True)),
('html_body', models.TextField(null=True, blank=True)),
('attempts', models.IntegerField(default=0)),
('last_attempt', models.DateTimeField(null=True, blank=True)),
('sent', models.DateTimeField(null=True, blank=True)),
('sent_recipients', models.IntegerField(default=0)),
('created', models.DateTimeField(auto_now_add=True)),
('mailing_list', models.ForeignKey(related_name=b'outgoing_mails', to='interlink.MailingList')),
('original_mail', models.ForeignKey(default=None, blank=True, to='interlink.IncomingMail', help_text=b'The incoming mail which caused this mail to be sent', null=True)),
],
options={
'ordering': ['-created'],
'verbose_name_plural': 'outgoing mails',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='incomingmail',
name='mailing_list',
field=models.ForeignKey(related_name=b'incoming_mails', to='interlink.MailingList'),
preserve_default=True,
),
migrations.AddField(
model_name='incomingmail',
name='owner',
field=models.ForeignKey(default=None, blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
]
| 1.78125
| 2
|
Python/Courses/Python-Tutorials.Zulkarnine-Mahmud/00.Fundamentals/10.0-Debugging.py
|
shihab4t/Books-Code
| 0
|
12778393
|
<gh_stars>0
user_name = input("You name: ")
value = 1
new_string = value + user_name
print(new_string)
| 3.578125
| 4
|
zvmsdk/tests/unit/test_utils.py
|
FerrySchuller/python-zvm-sdk
| 0
|
12778394
|
<gh_stars>0
# Copyright 2017 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import zvmsdk.utils as zvmutils
from zvmsdk.tests.unit import base
class ZVMUtilsTestCases(base.SDKTestCase):
def test_convert_to_mb(self):
self.assertEqual(2355.2, zvmutils.convert_to_mb('2.3G'))
self.assertEqual(20, zvmutils.convert_to_mb('20M'))
self.assertEqual(1153433.6, zvmutils.convert_to_mb('1.1T'))
@mock.patch.object(zvmutils, 'get_smut_userid')
def test_get_namelist(self, gsu):
gsu.return_value = 'TUID'
self.assertEqual('TSTNLIST', zvmutils.get_namelist())
base.set_conf('zvm', 'namelist', None)
gsu.return_value = 'TUID'
self.assertEqual('NL00TUID', zvmutils.get_namelist())
gsu.return_value = 'TESTUSER'
self.assertEqual('NLSTUSER', zvmutils.get_namelist())
base.set_conf('zvm', 'namelist', 'TSTNLIST')
| 1.929688
| 2
|
rules/rule.py
|
harsh07021999/fuzzython
| 16
|
12778395
|
from predicate import Predicate
__author__ = ''
class Rule(object):
"""
Base class for fuzzy rules
"""
__COUNT = 0
__slots__ = ('_antecedent', '_consequent', '_weight', '_number')
def __init__(self, antecedent, consequent, weight=1):
"""
Initialize a rule
:param antecedent: premise (if part)
:param consequent: conclusion (then part)
:param weight: how sure are we about this rule
"""
self._antecedent = antecedent
self._consequent = consequent
self._weight = weight
self._number = Rule.__COUNT
Rule.__COUNT += 1
def get_weight(self):
return self._weight
def set_weight(self, value):
if 0 < value <= 1:
self._weight = value
weight = property(get_weight, set_weight, doc='weight factor')
def compute(self, activation=None):
"""Compute rule's firing level and sets this value for adjectives in consequent"""
pass
#region (IN/OUT) Adjectives, Variables
def input_adj(self):
"""
Gets all adjectives in the antecedent of rule
"""
antecedent = self._antecedent
if isinstance(antecedent, Predicate):
return [antecedent.adjective]
return list({p.adective for p in antecedent.leaves()})
def output_adj(self):
"""
Gets all adjectives in the consequent of rule
"""
return [predicate.adjective for predicate in self._consequent]
def input_vars(self):
"""
Gets all variables in the antecedent of rule
"""
antecedent = self._antecedent
if isinstance(antecedent, Predicate):
return [antecedent.variable]
return list({p.variable for p in antecedent.leaves()})
def output_vars(self):
"""
Gets all variables in the consequent of rule
"""
return [predicate.variable for predicate in self._consequent]
#endregion
#region (IN/OUT) Predicates
def inputs(self):
"""
Gets all predicates in the antecedent of rule
"""
antecedent = self._antecedent
if isinstance(antecedent, Predicate):
return [antecedent]
return list({p for p in antecedent.leaves()})
def outputs(self):
"""
Gets all predicates in the consequent of rule
"""
return [predicate for predicate in self._consequent]
def predicates(self):
"""
Gets all predicates in the rule
"""
return self.inputs() + self.outputs()
#endregion
@staticmethod
def parse(sentence, scope, tnorm=None, snorm=None, cnorm=None):
"""
Parse a str-rule with given scope and norms
"""
from rules.parser import parse_rule
return parse_rule(sentence, scope, tnorm, snorm, cnorm)
@staticmethod
def get_rule(antecedent, consequent, weight=1):
"""
Gets a correct rule for...
:param antecedent: the structure of antecedent is an operator's tree of predicates
:param consequent: the structure of consequent determines the rule type
:param weight: certainty for this rule
"""
if isinstance(consequent, list):
from rules.mrule import MRule
XRule = MRule
elif isinstance(consequent, Predicate):
from rules.trule import TRule
XRule = TRule
else:
from rules.srule import SRule
XRule = SRule
return XRule(antecedent, consequent, weight)
def __repr__(self):
w = '' if self._weight == 1 else ' WITH ' + str(self._weight)
if isinstance(self._consequent, list):
consequent = ', '.join([str(predicate) for predicate in self._consequent])
else:
consequent = self._consequent
return 'RULE {0}: IF {1} THEN {2}{3};'.format(self._number, self._antecedent, consequent, w)
__str__ = __repr__
| 3.375
| 3
|
src/pbn_api/migrations/0027_przemapuj_rok_publikacji.py
|
iplweb/django-bpp
| 1
|
12778396
|
# Generated by Django 3.0.14 on 2021-08-17 22:16
import warnings
from django.core.paginator import Paginator
from django.db import migrations
from bpp.util import pbar
def value(elem, *path, return_none=False):
v = None
if elem.versions:
for _elem in elem.versions:
if _elem["current"]:
v = _elem
break
# v = elem.current_version
if v is None:
warnings.warn(
f"Model {elem.__class__} with id {elem.mongoId} has NO current_version!"
)
if return_none:
return
return "[brak current_version]"
for elem in path:
if elem in v:
v = v[elem]
else:
if return_none:
return None
return f"[brak {elem}]"
return v
def value_or_none(elem, *path):
return value(elem, *path, return_none=True)
MAX_TEXT_FIELD_LENGTH = 512
def _pull_up_on_save(elem, pull_up_on_save):
for attr in pull_up_on_save:
v = value_or_none(elem, "object", attr)
if v is not None:
if isinstance(v, str):
if len(v) >= MAX_TEXT_FIELD_LENGTH:
v = v[:MAX_TEXT_FIELD_LENGTH]
setattr(elem, attr, v)
def rebuild_table(model, puos):
queryset = model.objects.all().only("pk", "versions").order_by("pk")
paginator = Paginator(queryset, 1000)
for page in pbar(paginator.page_range):
for elem in paginator.get_page(page).object_list:
_pull_up_on_save(elem, puos)
elem.save(update_fields=puos)
def rebuild(apps, schema_editor):
for model, puos in [
(
apps.get_model("pbn_api", "Publication"),
["year"],
),
]:
rebuild_table(model, puos)
class Migration(migrations.Migration):
dependencies = [
("pbn_api", "0026_auto_20210816_0815"),
]
operations = [
migrations.RunPython(rebuild, migrations.RunPython.noop),
]
| 1.945313
| 2
|
demo/admin.py
|
DevKor-Team/devkor_hackathon_back
| 0
|
12778397
|
from django.contrib import admin
from .models import Demo, Emoji, Tag, TechStackTag, Comment
admin.site.register(Demo)
admin.site.register(Tag)
admin.site.register(TechStackTag)
admin.site.register(Comment)
admin.site.register(Emoji)
| 1.359375
| 1
|
.buildkite/dagster-buildkite/dagster_buildkite/images/versions.py
|
asamoal/dagster
| 0
|
12778398
|
<filename>.buildkite/dagster-buildkite/dagster_buildkite/images/versions.py<gh_stars>0
import os
import yaml
def get_image_version(image_name: str) -> str:
root_images_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)),
"..",
"..",
"..",
"..",
"python_modules",
"automation",
"automation",
"docker",
"images",
)
with open(
os.path.join(root_images_path, image_name, "last_updated.yaml"), encoding="utf8"
) as f:
versions = set(yaml.safe_load(f).values())
# There should be only one image timestamp tag across all Python versions
assert len(versions) == 1
return versions.pop()
COVERAGE_IMAGE_VERSION: str = get_image_version("buildkite-coverage")
INTEGRATION_IMAGE_VERSION: str = get_image_version("buildkite-integration")
UNIT_IMAGE_VERSION: str = get_image_version("buildkite-unit")
TEST_IMAGE_BUILDER_VERSION: str = get_image_version("buildkite-test-image-builder")
| 2.09375
| 2
|
src/evaluator/evaluator.py
|
JonasFrey96/RPOSE
| 0
|
12778399
|
<gh_stars>0
import os
import sys
os.chdir(os.path.join(os.getenv("HOME"), "RPOSE"))
sys.path.insert(0, os.getcwd())
sys.path.append(os.path.join(os.getcwd() + "/src"))
sys.path.append(os.path.join(os.getcwd() + "/core"))
sys.path.append(os.path.join(os.getcwd() + "/segmentation"))
import coloredlogs
coloredlogs.install()
import shutil
import datetime
import argparse
from pathlib import Path
import os
import torch
from src_utils import file_path, load_yaml
import datasets
from lightning import Inferencer
from pose_estimation import full_pose_estimation, compute_auc
import numpy as np
from enum import Enum
from ycb.rotations import so3_relative_angle
from scipy.stats import special_ortho_group
import neptune.new as neptune
from pose_estimation import Violation
from ycb.ycb_helper import BoundingBox
import cv2
import time
from skimage.morphology import binary_erosion, binary_dilation
from skimage.morphology import disk # noqa
def expand_to_batch(batch, device):
ret = []
for b in batch:
if torch.is_tensor(b):
ret.append(b[None].cuda())
elif type(b) is tuple:
new = []
for el in b:
new.append(el[None].cuda())
ret.append(tuple(new))
else:
ret.append(b)
# return not mutable
return tuple(ret)
class Mode(Enum):
TRACKING = 1
REFINEMENT = 2
MUTIPLE_INIT_POSES = 3
def str_to_mode(s):
if s == "MUTIPLE_INIT_POSES":
return Mode.MUTIPLE_INIT_POSES
elif s == "REFINEMENT":
return Mode.REFINEMENT
elif s == "TRACKING":
return Mode.TRACKING
else:
raise Exception
def compute_tracklets(paths, objs):
tracklets = []
global_idx = np.arange(0, len(paths))
for o in range(objs.min(), objs.max() + 1):
p = paths[objs == o]
local_idx = global_idx[objs == o]
tracklets.append([])
seq_last, idx_last = int(p[0].split("/")[-2]), int(p[0].split("/")[-1]) - 1
for j, _p in enumerate(p):
seq_cur, idx_cur = int(_p.split("/")[-2]), int(_p.split("/")[-1])
if seq_cur == seq_last and idx_cur - 50 < idx_last and idx_cur > idx_last:
tracklets[-1].append(local_idx[j])
else:
tracklets.append([local_idx[j]])
seq_last, idx_last = seq_cur, idx_cur
return tracklets
def rel_h(h1, h2):
return so3_relative_angle(
torch.tensor(h1)[:3, :3][None], torch.tensor(h2)[:3, :3][None]
)
def add_noise(h, nt=0.01, nr=30):
h_noise = np.eye(4)
while True:
x = special_ortho_group.rvs(3).astype(np.float32)
if abs(float(rel_h(h[:3, :3], x) / (2 * float(np.math.pi)) * 360)) < nr:
break
h_noise[:3, :3] = x
h_noise[:3, 3] = np.random.normal(loc=h[:3, 3], scale=nt)
return h_noise
# Implements
class Evaluator:
def __init__(self, exp, env, log=True):
super().__init__()
self._log = log
if self._log:
files = [str(s) for s in Path(exp["name"]).rglob("*.yml")]
if env["workstation"]:
self._run = neptune.init(
project=exp["neptune_project_name"],
api_token=os.environ["NEPTUNE_API_TOKEN"],
tags=[exp["name"], "workstation_" + str(env["workstation"])],
source_files=files,
)
else:
self._run = neptune.init(
project=exp["neptune_project_name"],
api_token=os.environ["NEPTUNE_API_TOKEN"],
proxies={
"http": "http://proxy.ethz.ch:3128",
"https": "http://proxy.ethz.ch:3128",
},
tags=[exp["name"], "workstation_" + str(env["workstation"])],
source_files=files,
)
print(exp)
print(exp["name"])
print("Flow Checkpoint: ", exp["checkpoint_load"])
print("Segm Checkpoint: ", exp["checkpoint_load_seg"])
self._exp = exp
self._env = env
self._val = exp.get("val", {})
self._inferencer = Inferencer(exp, env)
self.device = "cuda"
self._inferencer.to(self.device)
self.iterations = exp["eval_cfg"]["iterations"]
from visu import Visualizer
self._visu = Visualizer(
os.path.join(exp["name"], "visu"), num_classes=2, store=True
)
self._visu.logger = self._run
self.mode = str_to_mode(exp["eval_cfg"]["mode"]) # MUTIPLE_INIT_POSES
def __del__(self):
if self._log:
# Stop logging
self._run.stop()
@torch.no_grad()
def evaluate_full_dataset(self, test_dataloader):
ycb = test_dataloader.dataset
if self.mode != Mode.TRACKING:
ycb.deterministic_random_shuffel()
ycb.estimate_pose = True
ycb.err = True
ycb.valid_flow_minimum = 0
ycb.fake_flow = not (self._exp["eval_cfg"]["use_gt_valid"] == "gt")
slow = self._exp["eval_cfg"]["use_gt_valid"] == "gt"
if self._exp["test_dataset"]["mode"] == "test_tracking":
sub_sample = 10
else:
sub_sample = 1
elements = len(test_dataloader.dataset._base_path_list)
if self.mode != Mode.TRACKING:
tracklets = []
for i in range(elements):
tracklets.append([i])
else:
paths = np.array(ycb._base_path_list)
objs = np.array(ycb._obj_idx_list)
tracklets = compute_tracklets(paths, objs)
nr_tracklets = len(tracklets)
tracklet_totals = [len(le) for le in tracklets]
count = 0
if self._exp["test_dataset"]["mode"] == "test_tracking":
elements = np.array(tracklet_totals).sum()
adds = np.full((elements, self.iterations), np.inf)
add_s = np.full((elements, self.iterations), np.inf)
idx_arr = np.full((elements), np.inf)
epe = np.full((elements, self.iterations), 999)
init_adds_arr = np.full((elements, self.iterations), np.inf)
init_add_s_arr = np.full((elements, self.iterations), np.inf)
h_init_all = np.eye(4)[None, None].repeat(elements, 0).repeat(self.iterations, 1)
h_pred_all = np.eye(4)[None, None].repeat(elements, 0).repeat(self.iterations, 1)
ratios_arr = np.zeros((elements, self.iterations))
r_repro_arr = np.zeros((elements, self.iterations))
repro_errors = np.full((elements, self.iterations), np.inf)
valid_corrospondences = np.zeros((elements, self.iterations))
violation_arr = np.full((elements, self.iterations), Violation.UNKNOWN)
computed_elements = []
_st = time.time()
move_dir = np.zeros((2))
# ────────────────────────────────────────────────────────── ─────
# Iterate over full dataset.
# ────────────────────────────────────────────────────────── ─────
for i, track in enumerate(tracklets):
print(f"Inferenced Tracklets {i}/{nr_tracklets}")
valid_element = True
h_store = None
# ────────────────────────────────────────────────────────── ─────
# Apply network mutiple times.
# ────────────────────────────────────────────────────────── ─────
history_rot = []
history_trans = []
track = track[::sub_sample]
for nr_t, t in enumerate(track):
computed_elements.append(t)
for k in range(self.iterations):
count += 1
if k == 0 and nr_t == 0 and h_store is None:
# START TRACKLET AND FIRST ITERATION GET YCB DATA
batch = ycb.getElement(t)
h_store = batch[7].detach().cpu().numpy()
else:
if self.mode == Mode.REFINEMENT or Mode.TRACKING:
# LOAD THE STORED POSE ESTIMATE
current_pose = h_store
elif self.mode == Mode.MUTIPLE_INIT_POSES:
# LOAD STORED POSE ESTIMATE WITH TRANSLATION
h_store[:2, 3] = (
h_store[:2, 3] + (move_dir.clip(-75, 75) / 75 * 0.04).cpu().numpy()
)
current_pose = h_store
# GET DATA
batch = ycb.getElement(t, h_real_est=current_pose)
print(
f"Tracklet: {nr_t}, ",
batch[-2][-10:],
" obj ",
batch[9],
"time",
time.time() - _st,
"left t:",
(time.time() - _st) / count * ((sum(tracklet_totals) / sub_sample) - count),
)
if batch[0] is None and k == 0:
print("CANT start given PoseCNN fails!")
violation_arr[t, k] = Violation.DATALOADER_OBJECT_INVALID
idx_arr[t] = int(batch[1])
# Only break if we are not in MUTIPLE INIT POSES mode
if self.mode != Mode.MUTIPLE_INIT_POSES:
valid_element = False
break
else:
continue
else:
idx_arr[t] = int(batch[9])
# ACTUAL POSE INFERENCE
batch = expand_to_batch(batch, self.device)
flow_predictions, pred_valid = self._inferencer(batch) # 200ms
valid_corrospondences[t, k] = int(pred_valid.sum())
if slow:
gt_valid = batch[3]
gt_flow = batch[2]
_epe = float(
(
(
torch.sum((flow_predictions[-1] - gt_flow) ** 2, dim=1).sqrt()
* gt_valid
).sum()
/ gt_valid.sum()
).cpu()
)
(
h_gt,
h_render,
h_init,
bb,
idx,
K_ren,
K_real,
render_d,
model_points,
img_real_ori,
p,
img_render_ori,
) = batch[5:]
if self._exp["eval_cfg"]["use_gt_valid"] == "gt":
fv = gt_valid
elif self._exp["eval_cfg"]["use_gt_valid"] == "pred":
fv = pred_valid
elif self._exp["eval_cfg"]["use_gt_valid"] == "none":
fv = torch.ones_like(pred_valid)
move_dir = flow_predictions[-1][0, :, fv[0, :, :] == 1].mean(axis=1)
st = time.time()
(
res_dict,
count_invalid,
h_pred__pred_pred,
repro_error,
ratios,
valid,
violations,
) = full_pose_estimation(
h_gt=h_gt,
h_render=h_render,
h_init=h_init,
bb=bb,
flow_valid=fv,
flow_pred=flow_predictions[-1],
idx=idx.clone(),
K_ren=K_ren,
K_real=K_real,
render_d=render_d,
model_points=model_points,
cfg=self._exp["eval_cfg"].get("full_pose_estimation", {}),
) # 50ms
if self._env["workstation"] and count < 10:
bb_real = BoundingBox(bb[0][0], bb[1][0])
bb_render = BoundingBox(bb[2][0], bb[3][0])
img_real_crop = bb_real.crop(img_real_ori[0])
img_render_crop = bb_render.crop(img_render_ori[0])
img_real_crop = cv2.resize(img_real_crop.cpu().numpy(), (640, 480))
img_render_crop = cv2.resize(img_render_crop.cpu().numpy(), (640, 480))
self._visu.epoch = count
self._visu.plot_image(img_real_ori[0], tag="img_real")
self._visu.plot_image(img_real_crop, tag="img_real_crop")
self._visu.plot_image(img_render_ori[0], tag="img_render")
self._visu.plot_image(img_render_crop, tag="img_render_crop")
self._visu.plot_detectron(
img=img_real_crop,
label=fv[0],
tag="gt_detectron",
alpha=0.75,
text_off=True,
)
self._visu.plot_detectron(
img=img_real_crop,
label=pred_valid[0],
tag="pred_detectron",
alpha=0.75,
text_off=True,
)
self._visu.plot_flow(
flow_predictions[-1][0].permute(0, 2, 1), tag="pred_flow"
)
if slow:
self._visu.plot_flow(gt_flow[0].permute(0, 2, 1), tag="gt_flow")
self._visu.plot_corrospondence(
gt_flow[0, 0, :, :],
gt_flow[0, 1, :, :],
fv[0].cpu(),
torch.tensor(img_real_crop),
torch.tensor(img_render_crop),
colorful=False,
text=False,
res_h=30,
res_w=30,
min_points=50,
jupyter=False,
col=(0, 255, 255),
tag="gt_corro",
)
self._visu.plot_corrospondence(
flow_predictions[-1][0, 0, :, :],
flow_predictions[-1][0, 1, :, :],
fv[0].cpu(),
torch.tensor(img_real_crop),
torch.tensor(img_render_crop),
colorful=False,
text=False,
res_h=30,
res_w=30,
min_points=50,
jupyter=False,
col=(0, 255, 255),
tag="pred_corro",
)
b = 0
img_gt = self._visu.plot_estimated_pose(
img=img_real_ori[b].cpu().numpy(),
points=model_points[b].cpu(),
H=h_gt[b].cpu(),
K=K_real[b].cpu(),
color=(0, 255, 255, 255),
tag="h_gt",
w=1,
)
img_pred = self._visu.plot_estimated_pose(
img=img_real_ori[b].cpu().numpy(),
points=model_points[b].cpu(),
H=h_pred__pred_pred[b].cpu(),
K=K_real[b].cpu(),
color=(0, 255, 255, 255),
tag="h_pred",
w=1,
)
img_init = self._visu.plot_estimated_pose(
img=img_real_ori[b].cpu().numpy(),
points=model_points[b].cpu(),
H=h_init[b].cpu(),
K=K_real[b].cpu(),
color=(0, 255, 255, 255),
tag="h_init",
w=1,
)
ratios_arr[t, k] = ratios[0]
repro_errors[t, k] = repro_error
h_init_all[t, k] = h_init.cpu().numpy()[0]
h_pred_all[t, k] = h_pred__pred_pred.cpu().numpy()[0]
init_adds_arr[t, k] = res_dict["adds_h_init"]
init_add_s_arr[t, k] = res_dict["add_s_h_init"]
r_repro = 0
if violations[0] == Violation.SUCCESS:
adds[t, k] = res_dict["adds_h_pred"]
add_s[t, k] = res_dict["add_s_h_pred"]
if slow:
epe[t, k] = _epe
h_store = h_pred__pred_pred.cpu().numpy()[0]
patients_count = 0
pred_p = torch.bmm(
model_points, torch.transpose(h_pred__pred_pred[:, :3, :3], 1, 2)
) + h_pred__pred_pred[:, :3, 3][:, None, :].repeat(
1, model_points.shape[1], 1
)
from ycb.ycb_helper import backproject_points
points = backproject_points(pred_p[0], K=K_real[0]).type(torch.long).T
repro = torch.zeros_like(pred_valid)
points[0, :] = points[0, :].clip(0, repro.shape[1] - 1)
points[1, :] = points[1, :].clip(0, repro.shape[2] - 1)
repro[0][points[0, :], points[1, :]] = 1
bb_real = BoundingBox(bb[0][0], bb[1][0])
repro_crop = bb_real.crop(
repro[0][:, :, None].type(torch.float32), scale=True
)
footprint = disk(12)
tmp = binary_dilation(
repro_crop[:, :, 0].cpu().numpy().astype(np.bool), selem=footprint
)
tmp = binary_erosion(tmp, selem=footprint)
r_sum = (pred_valid.cpu().numpy() * tmp).sum()
r_repro = r_sum / pred_valid.cpu().numpy().sum()
r_repro_arr[t, k] = float(r_repro)
if r_repro < self._exp["eval_cfg"]["reject_ratio"]:
violations[0] == Violation.FAILED_R_REPRO
adds[t, k] = res_dict["adds_h_init"]
add_s[t, k] = res_dict["add_s_h_init"]
if self.mode == Mode.REFINEMENT:
violation_arr[t, k] = violations[0]
break
patients_count += 1
# reset to posecnn
if self.mode == Mode.TRACKING:
if patients_count > self._exp["eval_cfg"].get("track_patients", 0):
h_store = None
if self._env["workstation"] and count < 10:
img_real_crop = bb_real.crop(img_real_ori[0])
img_real_crop = cv2.resize(img_real_crop.cpu().numpy(), (640, 480))
self._visu.plot_detectron(
img=img_real_crop,
label=tmp.astype(np.uint8),
tag="REPRO Crop",
alpha=0.75,
text_off=True,
)
self._visu.plot_detectron(
img=img_real_ori[b].cpu().numpy(),
label=repro[0].cpu().numpy(),
tag="REPRO",
alpha=0.75,
text_off=True,
)
self._visu.plot_detectron(
img=img_real_crop,
label=pred_valid[0].cpu().numpy(),
tag="PREDICTION",
alpha=0.75,
text_off=True,
)
else:
adds[t, k] = res_dict["adds_h_init"]
add_s[t, k] = res_dict["add_s_h_init"]
if self.mode == Mode.REFINEMENT:
violation_arr[t, k] = violations[0]
break
patients_count += 1
# reset to posecnn
if self.mode == Mode.TRACKING:
if patients_count > self._exp["eval_cfg"].get("track_patients", 0):
h_store = None
if h_store is None:
history_rot = []
history_trans = []
else:
history_trans.append(
np.linalg.norm(h_init.cpu().numpy()[0, :3, 3] - h_store[:3, 3])
)
if len(history_trans) > 10:
history_trans = history_trans[1:]
if np.array(history_trans).mean() > self._exp["eval_cfg"].get(
"trans_difference", 0.02
):
print("RESET BASED ON TRANS")
violations[0] = Violation.TRANS_DIFFERENCE
history_rot = []
history_trans = []
h_store = None
self._run["trans_mean"].log(np.array(history_trans).mean())
violation_arr[t, k] = violations[0]
# ─── ────────────────────────────────────────────────────────────
# ONLY LOGGING
# ─── ────────────────────────────────────────────────────────────
self._run["count"].log(count)
if count % 100 == 0 and count != 0:
print("PROGRESS REPORT COUNT, ", count)
mask = np.array(computed_elements)
add_s_finite = np.isfinite(add_s[mask])
sm = add_s_finite.sum(axis=1) - 1
sm[sm < 0] = 0
sel = np.eye(self.iterations)[sm] == 1
print(
f"final after {self.iterations}th-iteration: ",
compute_auc(add_s[mask][sel]),
)
print("Mean 1th-iteration: ", compute_auc(add_s[mask, 0]))
print(
"AUC best over all iterations: ",
compute_auc(np.min(add_s[mask, :], axis=1)),
)
tar = np.argmax(ratios_arr[mask], axis=1)
sel = np.zeros_like(ratios_arr[mask])
for _j, _i in enumerate(tar.tolist()):
sel[_j, _i] = 1
sel = sel == 1
print("Best RANSAC ratios: ", compute_auc(add_s[mask][sel]))
sel2 = np.argmin(valid_corrospondences[mask], axis=1)
sel2 = np.eye(valid_corrospondences.shape[1])[sel2] == 1
print("AUC best valids: ", compute_auc(add_s[mask][sel2]))
print("INIT ADDS PoseCNN: ", compute_auc(init_add_s_arr[mask][:, 0]))
# STOING INTERMEDATE RESULTS PICKLE
if count % 5000 == 0:
st = self._exp["eval_cfg"]["output_filename"]
b = os.path.join(self._exp["name"], f"{self.mode}_{st}_data_{count}.pkl")
dic = {
"add_s": add_s,
"adds": adds,
"idx_arr": idx_arr,
"ratios_arr": ratios_arr,
"valid_corrospondences": valid_corrospondences,
"init_adds_arr": init_adds_arr,
"init_add_s_arr": init_add_s_arr,
"epe": epe,
"h_init_all": h_init_all,
"h_pred_all": h_pred_all,
"violation_arr": violation_arr,
"repro_errors": repro_errors,
"mask": np.array(computed_elements),
"r_repro_arr": r_repro_arr,
}
import pickle
with open(b, "wb") as handle:
pickle.dump(dic, handle, protocol=pickle.HIGHEST_PROTOCOL)
self._run[f"result_inter_{i}"].upload(b)
# NEPTUNE LOGGING
if self._log:
mask = np.array(computed_elements)
logs = {
"add_s": add_s,
"adds": adds,
"ratios_arr": ratios_arr,
"valid_corrospondences": valid_corrospondences,
"epe": epe,
"init_adds_arr": init_adds_arr,
"init_add_s_arr": init_add_s_arr,
}
for k, v in logs.items():
for iter in range(self.iterations):
self._run[k + f"_iter_{iter}"].log(v[t, iter])
logs = {"idx_arr": idx_arr}
self._run["r_repro"].log(r_repro)
for k, v in logs.items():
self._run[k + f"_iter"].log(v[t])
if count % 10 == 0 and count != 0:
# compute aucs
for iter in range(self.iterations):
self._run["auc_add_s" + f"_iter_{iter}"].log(
compute_auc(add_s[mask, iter])
)
self._run["auc_adds" + f"_iter_{iter}"].log(compute_auc(adds[mask, iter]))
self._run["auc_init_adds" + f"_iter_{iter}"].log(
compute_auc(init_adds_arr[mask, iter])
)
self._run["auc_init_add_s" + f"_iter_{iter}"].log(
compute_auc(init_add_s_arr[mask, iter])
)
for _j in range(21):
m = idx_arr[mask] == _j
for iter in range(self.iterations):
self._run[f"auc_add_s_obj_{_j}" + f"_iter_{iter}"].log(
compute_auc(add_s[mask][m, iter])
)
self._run[f"auc_adds_obj_{_j}" + f"_iter_{iter}"].log(
compute_auc(adds[mask][m, iter])
)
self._run[f"auc_init_adds_obj_{_j}" + f"_iter_{iter}"].log(
compute_auc(init_adds_arr[mask][m, iter])
)
self._run[f"auc_init_add_s_obj_{_j}" + f"_iter_{iter}"].log(
compute_auc(init_add_s_arr[mask][m, iter])
)
# STOING FINAL RESULTS PICKLE
st = self._exp["eval_cfg"]["output_filename"]
b = os.path.join(self._exp["name"], f"{self.mode}_{st}_data_final.pkl")
dic = {
"add_s": add_s,
"adds": adds,
"idx_arr": idx_arr,
"ratios_arr": ratios_arr,
"valid_corrospondences": valid_corrospondences,
"init_adds_arr": init_adds_arr,
"init_add_s_arr": init_add_s_arr,
"epe": epe,
"h_init_all": h_init_all,
"h_pred_all": h_pred_all,
"violation_arr": violation_arr,
"repro_errors": repro_errors,
"r_repro_arr": r_repro_arr,
}
import pickle
varss = np.array([a.value for a in violation_arr[:, 0]])
print(np.unique(varss, return_counts=True))
with open(b, "wb") as handle:
pickle.dump(dic, handle, protocol=pickle.HIGHEST_PROTOCOL)
self._run["result_final"].upload(b)
sym = []
for ind in idx_arr.tolist():
sym.append(
not (int(ind) + 1 in [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 14, 15, 17, 18])
)
sym = np.array(sym)
for i in range(self.iterations):
non_sym = sym == False
mix = adds[sym, i].tolist() + add_s[non_sym, i].tolist()
self._run[f"auc_s_mix_iter_{i}"].log(compute_auc(np.array(mix)))
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--exp",
type=file_path,
default="cfg/exp/final/1_pose_prediction/pose_estimation.yml",
help="The main experiment yaml file.",
)
args = parser.parse_args()
exp_cfg_path = args.exp
env_cfg_path = os.path.join("cfg/env", os.environ["ENV_WORKSTATION_NAME"] + ".yml")
exp = load_yaml(exp_cfg_path)
env = load_yaml(env_cfg_path)
if exp.get("timestamp", True):
timestamp = datetime.datetime.now().replace(microsecond=0).isoformat()
model_path = os.path.join(env["base"], exp["name"])
p = model_path.split("/")
model_path = os.path.join("/", *p[:-1], str(timestamp) + "_" + p[-1])
else:
model_path = os.path.join(env["base"], exp["name"])
shutil.rmtree(model_path, ignore_errors=True)
# Create the directory
Path(model_path).mkdir(parents=True, exist_ok=True)
# Only copy config files for the main ddp-task
exp_cfg_fn = os.path.split(exp_cfg_path)[-1]
env_cfg_fn = os.path.split(env_cfg_path)[-1]
print(f"Copy {env_cfg_path} to {model_path}/{exp_cfg_fn}")
shutil.copy(exp_cfg_path, f"{model_path}/{exp_cfg_fn}")
shutil.copy(env_cfg_path, f"{model_path}/{env_cfg_fn}")
exp["name"] = model_path
inference_manager = Evaluator(exp=exp, env=env)
# LOAD WEIGHTS
p = os.path.join(env["base"], exp["checkpoint_load"])
if os.path.isfile(p):
res = torch.load(p)
out = inference_manager._inferencer.load_state_dict(res["state_dict"], strict=False)
if len(out[1]) > 0:
print("Restore weights from ckpts", out)
raise Exception(f"Not found seg checkpoint: {p}")
else:
print("Restore flow-weights from ckpts successfull")
else:
raise Exception(f"Not found flow checkpoint: {p}")
p = os.path.join(env["base"], exp["checkpoint_load_seg"])
if os.path.isfile(p):
res = torch.load(p)
new_statedict = {}
for (k, v) in res["state_dict"].items():
new_statedict[k.replace("model", "seg")] = v
out = inference_manager._inferencer.load_state_dict(new_statedict, strict=False)
if len(out[1]) > 0:
print("Restore_seg weights from ckpts", out)
raise Exception(f"Not found seg checkpoint: {p}")
else:
print("Restore seg-weights from ckpts successfull")
else:
raise Exception(f"Not found seg checkpoint: {p}")
# PERFORME EVALUATION
test_dataloader = datasets.fetch_dataloader(exp["test_dataset"], env)
inference_manager.evaluate_full_dataset(test_dataloader)
| 1.742188
| 2
|
app/user_agents.py
|
cclauss/personfinder
| 1
|
12778400
|
#!/usr/bin/python2.7
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
# Regular expression to detect Japanese Tier-2 mobile phones.
JP_TIER2_MOBILE_USER_AGENT_RE = re.compile(
r'^(KDDI|DoCoMo|SoftBank|J-PHONE|Vodafone)')
# Regular expression to detect phones which prefer Shift_JIS charset.
# Some KDDI phones support UTF-8 but they have a bug encoding UTF-8 query
# parameters.
SJIS_PREFERRED_USER_AGENT_RE = re.compile(r'^KDDI')
def is_jp_tier2_mobile_phone(request):
"""Returns True if the user agent is a Japanese Tier-2 mobile phone."""
user_agent = request.headers.get('User-Agent')
return user_agent and JP_TIER2_MOBILE_USER_AGENT_RE.match(user_agent)
def prefer_sjis_charset(request):
"""Returns True if Shift_JIS charset should be used for the user agent."""
user_agent = request.headers.get('User-Agent')
return user_agent and SJIS_PREFERRED_USER_AGENT_RE.match(user_agent)
| 2.390625
| 2
|
visual_dynamics/utils/tests/test_generator.py
|
alexlee-gk/visual_dynamics
| 30
|
12778401
|
<reponame>alexlee-gk/visual_dynamics
import tempfile
import numpy as np
from nose2 import tools
from visual_dynamics.utils import DataContainer, DataGenerator
container_fnames = [tempfile.mktemp() for _ in range(4)]
num_steps_per_traj = [100] * 4 + [50] * 12 + [100] + [150] * 6
num_trajs_per_container = [4, 12, 1, 6]
num_steps_per_container = [400, 600, 100, 900]
assert sum(num_steps_per_traj) == sum(num_steps_per_container)
for container_ind, container_fname in enumerate(container_fnames):
with DataContainer(container_fname, 'x') as container:
num_trajs = num_trajs_per_container[container_ind]
num_steps = num_steps_per_container[container_ind] // num_trajs
assert num_steps_per_container[container_ind] == num_steps * num_trajs
container.reserve(['container_ind', 'traj_iter', 'step_iter'], (num_trajs, num_steps))
for traj_iter in range(num_trajs):
for step_iter in range(num_steps):
container.add_datum(traj_iter, step_iter,
container_ind=np.array(container_ind),
traj_iter=np.array(traj_iter),
step_iter=np.array(step_iter))
@tools.params([(0, 1), (0, 1)],
[(0, 2), (0, 2)],
[(0, 3), (0, 3)],
[(0, 1), (0, 3)],
[(0, 3), (0, 1)],
[(-1, 2), (0, 3)],
[(-2, 2), (0, 3)],
[(-3, 2), (0, 3)]
)
def test_generator_int_offset(offset_limits):
traj_offset_limit, step_offset_limit = offset_limits
data_name_offset_pairs = [('container_ind', 0)] + \
[('traj_iter', i) for i in range(*traj_offset_limit)] + \
[('step_iter', i) for i in range(*step_offset_limit)]
generator = DataGenerator(container_fnames,
data_name_offset_pairs=data_name_offset_pairs,
batch_size=32,
shuffle=True,
once=True)
max_iter = 4
for _iter, batch_data in zip(range(max_iter), generator):
traj_iters_traj = np.array(batch_data[1:1 + (traj_offset_limit[1] - traj_offset_limit[0])])
step_iters_traj = np.array(batch_data[-(step_offset_limit[1] - step_offset_limit[0]):])
# all traj_iters should be the same
assert (traj_iters_traj == traj_iters_traj[0, :]).all()
# all consecutive step_iters should differ by 1
assert ((step_iters_traj - np.arange(len(step_iters_traj))[:, None]) == step_iters_traj[0, :]).all()
@tools.params([(0, 1), (0, 1)],
[(0, 2), (0, 2)],
[(0, 3), (0, 3)],
[(0, 1), (0, 3)],
[(0, 3), (0, 1)],
[(-1, 2), (0, 3)],
[(-2, 2), (0, 3)],
[(-3, 2), (0, 3)]
)
def test_generator_slice_offset(offset_limits):
traj_offset_limit, step_offset_limit = offset_limits
data_name_offset_pairs = [('container_ind', 0),
('traj_iter', slice(*traj_offset_limit)),
('step_iter', slice(*step_offset_limit))]
generator = DataGenerator(container_fnames,
data_name_offset_pairs=data_name_offset_pairs,
batch_size=32,
shuffle=True,
once=True)
max_iter = 4
for _iter, batch_data in zip(range(max_iter), generator):
traj_iters_traj = np.swapaxes(batch_data[1], 0, 1)
step_iters_traj = np.swapaxes(batch_data[2], 0, 1)
# all traj_iters should be the same
assert (traj_iters_traj == traj_iters_traj[0, :]).all()
# all consecutive step_iters should differ by 1
assert ((step_iters_traj - np.arange(len(step_iters_traj))[:, None]) == step_iters_traj[0, :]).all()
@tools.params([(0, 1), (0, 1)],
[(0, 2), (0, 2)],
[(0, 3), (0, 3)],
[(0, 1), (0, 3)],
[(0, 3), (0, 1)],
[(-1, 2), (0, 3)],
[(-2, 2), (0, 3)],
[(-3, 2), (0, 3)]
)
def test_generator_list_offset(offset_limits):
traj_offset_limit, step_offset_limit = offset_limits
data_name_offset_pairs = [('container_ind', 0),
('traj_iter', list(range(*traj_offset_limit))),
('step_iter', list(range(*step_offset_limit)))]
generator = DataGenerator(container_fnames,
data_name_offset_pairs=data_name_offset_pairs,
batch_size=32,
shuffle=True,
once=True)
max_iter = 4
for _iter, batch_data in zip(range(max_iter), generator):
traj_iters_traj = np.swapaxes(batch_data[1], 0, 1)
step_iters_traj = np.swapaxes(batch_data[2], 0, 1)
# all traj_iters should be the same
assert (traj_iters_traj == traj_iters_traj[0, :]).all()
# all consecutive step_iters should differ by 1
assert ((step_iters_traj - np.arange(len(step_iters_traj))[:, None]) == step_iters_traj[0, :]).all()
| 2.140625
| 2
|
maas/plugins/neutron_metadata_local_check.py
|
claco/rpc-openstack
| 0
|
12778402
|
<reponame>claco/rpc-openstack<gh_stars>0
#!/usr/bin/env python
# Copyright 2014, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import shlex
import subprocess
from maas_common import get_neutron_client
from maas_common import metric_bool
from maas_common import print_output
from maas_common import status_err
from maas_common import status_ok
# identify the first active neutron agents container on this host
# network namespaces can only be accessed from within neutron agents container
FIND_CONTAINER = shlex.split('lxc-ls -1 --running .*neutron_agents')
SERVICE_CHECK = 'ip netns exec %s curl -fvs 169.254.169.254:80'
def check(args):
# identify the container we will use for monitoring
try:
containers_list = subprocess.check_output(FIND_CONTAINER)
container = containers_list.splitlines()[0]
except (IndexError, subprocess.CalledProcessError):
status_err('no running neutron agents containers found')
network_endpoint = 'http://{host}:9696'.format(host=args.neutron_host)
try:
neutron = get_neutron_client(endpoint_url=network_endpoint)
# not gathering api status metric here so catch any exception
except Exception as e:
status_err(str(e))
# only check networks which have a port with DHCP enabled
ports = neutron.list_ports(device_owner='network:dhcp')['ports']
nets = set([p['network_id'] for p in ports])
# perform checks for each identified network
failures = []
for net_id in nets:
namespace = 'qdhcp-%s' % net_id
service_check_cmd = SERVICE_CHECK % namespace
command = shlex.split('lxc-attach -n %s -- %s' % (container,
service_check_cmd))
try:
subprocess.check_output(command, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError as e:
# HTTP 404 response indicates the service is responsive.
# this is the expected response because the maas testing host IP
# is used to look up metadata and no metadata exists for this IP
if '404 Not Found' not in e.output:
failures.append(net_id)
is_ok = len(failures) == 0
metric_bool('neutron-metadata-agent-proxy_status', is_ok)
if is_ok:
status_ok()
else:
status_err('neutron metadata agent proxies fail on host %s '
'net_ids: %s' % (container, ','.join(failures)))
def main(args):
check(args)
if __name__ == "__main__":
with print_output():
parser = argparse.ArgumentParser(description='Check neutron proxies')
parser.add_argument('neutron_host',
type=str,
help='Neutron API hostname or IP address')
main(parser.parse_args())
| 1.84375
| 2
|
covidsafescan/__main__.py
|
micolous/covidsafescan
| 1
|
12778403
|
<filename>covidsafescan/__main__.py
#!/usr/bin/env python3
import base64
import bleak
import asyncio
import traceback
import argparse
import sys
import datetime
import json
APPLE_ID = 0x4c
WITHINGS_ID = 1023
STAGING_UUID = '17e033d3-490e-4bc9-9fe8-2f567643f4d3'
PRODUCTION_UUID = 'b82ab3fc-1595-4f6a-80f0-fe094cc218f9'
def b16(b):
"""Converts a bytes (or array of ints) into a base16 encoded str."""
return base64.b16encode(bytes(b)).decode()
async def connect(loop, address, uuid):
async with bleak.BleakClient(address, loop=loop, timeout=args.timeout) as client:
message = await client.read_gatt_char(uuid, timeout=args.timeout)
now = datetime.datetime.now().isoformat()
if args.json: #soooo deeeeep . what is pep8?
data = {
"time": now,
"data": message.decode(),
"address": address
}
print(json.dumps(data))
else:
print(f'[{now}] {address} : {message.decode()}')
def log(message):
if args.debug:
print(str(message), file=sys.stderr)
async def run(loop):
while True:
log("Scanning")
devices = await bleak.discover(timeout=args.timeout, filter_dups=False)
log("Found devices")
log(", ".join([x.address for x in devices]))
for d in devices:
log(f'{d.address}: {d.metadata}')
uuid = None
if args.adv_uuids and 'uuids' in d.metadata:
if PRODUCTION_UUID in d.metadata['uuids']:
log('* Detected production TraceTogether UUID')
uuid = PRODUCTION_UUID
elif STAGING_UUID in d.metadata['uuids']:
log('* Detected staging TraceTogether UUID')
uuid = STAGING_UUID
if 'manufacturer_data' in d.metadata:
manufacturer_data = d.metadata['manufacturer_data']
if args.adv_manuf and WITHINGS_ID in manufacturer_data:
withings_data = manufacturer_data[WITHINGS_ID]
log(f'* Detected Withings manufacturer data: {b16(withings_data)} ({withings_data})')
# TODO: Find the actual UUID to use. For now, assume prod.
if uuid is None:
uuid = PRODUCTION_UUID
if args.apple and APPLE_ID in manufacturer_data:
apple_data = manufacturer_data[APPLE_ID]
if len(apple_data) >= 17 and apple_data[0] == 1:
log(f'* Apple Overflow Area: {b16(apple_data[1:])}')
# Ref: http://www.davidgyoungtech.com/2020/05/07/hacking-the-overflow-area#background-ios-data-exchange
# Apple manufacturer packet type 0x01 has a 128-bit
# value. Each service is hashed to a 7-bit value,
# corresponding to the bit to flip high.
#
# byte 1 bit 0x01 = TraceTogether (Production)
# byte 3 bit 0x80 = TraceTogether (Staging)
if apple_data[1] & 0x01 == 0x01:
log('* Possible use of TraceTogether Prod UUID!')
uuid = PRODUCTION_UUID
elif apple_data[3] & 0x80 == 0x80:
log('* Possible use of TraceTogether Staging UUID!')
uuid = STAGING_UUID
else:
log('* No known UUID found. :(')
if uuid is not None:
if args.passive:
print(f'[{now}] {d.address} : {uuid}')
continue
log(f'Connecting to {d.address}')
try:
result = await connect(loop, d.address, uuid)
if not result:
log("Time out connecting")
except KeyboardInterrupt:
raise
except: # ignore errors - yolo driven dev
if args.debug:
traceback.print_exc(file=sys.stderr)
if args.once:
break
def main():
global args
parser = argparse.ArgumentParser(description='Covidsafe BLE Scanner')
parser.add_argument(
'--debug',
action='store_true',
help='Enables logs')
parser.add_argument(
'--json',
action='store_true',
help='JSON Output')
parser.add_argument(
'--timeout',
type=int,
dest='timeout',
default=15,
help='Timeout, in seconds (default: %(default)s)')
parser.add_argument(
'--once',
action='store_true',
help='Only run once')
parser.add_argument(
'--no-adv-uuids',
dest='adv_uuids', action='store_false',
help='Don\'t use UUIDs in advertisement frames to find CovidSafe')
parser.add_argument(
'--no-adv-manuf',
dest='adv_manuf', action='store_false',
help='Don\'t use Withings Manufacturer Data in advertisement frames to find CovidSafe')
# https://github.com/xssfox/covidsafescan/pull/4
parser.add_argument(
'--apple',
dest='apple', action='store_true',
help='Use Apple Overflow Area to find CovidSafe (experimental, may crash!)')
parser.add_argument(
'--passive',
dest='passive', action='store_true',
help='Don\'t try to exchange GATT details, just report MAC addresses')
args = parser.parse_args()
loop = asyncio.get_event_loop()
loop.run_until_complete(run(loop))
if __name__ == "__main__":
main()
| 2.65625
| 3
|
pylabnet/network/client_server/hdawg.py
|
wi11dey/pylabnet
| 10
|
12778404
|
from pylabnet.network.core.service_base import ServiceBase
from pylabnet.network.core.client_base import ClientBase
class Service(ServiceBase):
def exposed_set_direct_user_register(self, awg_num, index, value):
return self._module.set_direct_user_register(awg_num, index, value)
def exposed_get_direct_user_register(self, awg_num, index):
return self._module.get_direct_user_register(awg_num, index)
def exposed_geti(self, node):
return self._module.geti(node)
def exposed_seti(self, node, new_int):
return self._module.seti(node, new_int)
def exposed_setd(self, node, new_double):
return self._module.setd(node, new_double)
class Client(ClientBase):
def set_direct_user_register(self, awg_num, index, value):
""" Sets a user register to a desired value
:param awg_num: (int) index of awg module
:param index: (int) index of user register (from 0-15)
:param value: (int) value to set user register to
"""
return self._service.exposed_set_direct_user_register(awg_num, index, value)
def get_direct_user_register(self, awg_num, index):
""" Gets a user register to a desired value
:param awg_num: (int) index of awg module
:param index: (int) index of user register (from 0-15)
"""
return self._service.exposed_get_direct_user_register(awg_num, index)
def geti(self, node):
"""
Wrapper for daq.getInt commands. For instance, instead of
daq.getInt('/dev8040/sigouts/0/busy'), write
hdawg.geti('sigouts/0/busy')
:node: Node which will be appended to '/device_id/'
"""
return self._service.exposed_geti(node)
def seti(self, node, new_int):
"""
Warapper for daq.setInt commands. For instance, instead of
daq.setInt('/dev8040/sigouts/0/on', 1), write
hdawg.seti('sigouts/0/on, 1)
:node: Node which will be appended to '/device_id/'
:new_int: New value for integer
"""
return self._service.exposed_seti(node, new_int)
def setd(self, node, new_double):
"""
Warapper for daq.setDouble commands.
"""
return self._service.exposed_setd(node, new_double)
| 2.453125
| 2
|
ppgan/solver/lr_scheduler.py
|
pcwuyu/PaddleGAN
| 40
|
12778405
|
<filename>ppgan/solver/lr_scheduler.py
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import paddle
from paddle.optimizer.lr import LRScheduler, MultiStepDecay, LambdaDecay
from .builder import LRSCHEDULERS
LRSCHEDULERS.register(MultiStepDecay)
@LRSCHEDULERS.register()
class NonLinearDecay(LRScheduler):
def __init__(self, learning_rate, lr_decay, last_epoch=-1):
self.lr_decay = lr_decay
super(NonLinearDecay, self).__init__(learning_rate, last_epoch)
def get_lr(self):
lr = self.base_lr / (1.0 + self.lr_decay * self.last_epoch)
return lr
@LRSCHEDULERS.register()
class LinearDecay(LambdaDecay):
def __init__(self, learning_rate, start_epoch, decay_epochs,
iters_per_epoch):
def lambda_rule(epoch):
epoch = epoch // iters_per_epoch
lr_l = 1.0 - max(0,
epoch + 1 - start_epoch) / float(decay_epochs + 1)
return lr_l
super().__init__(learning_rate, lambda_rule)
@LRSCHEDULERS.register()
class CosineAnnealingRestartLR(LRScheduler):
""" Cosine annealing with restarts learning rate scheme.
An example config from configs/edvr_l_blur_wo_tsa.yaml:
learning_rate: !!float 4e-4
periods: [150000, 150000, 150000, 150000]
restart_weights: [1, 1, 1, 1]
eta_min: !!float 1e-7
It has four cycles, each has 150000 iterations. At 150000th, 300000th,
450000th, the scheduler will restart with the weights in restart_weights.
Args:
learning_rate (float): Base learning rate.
periods (list): Period for each cosine anneling cycle.
restart_weights (list): Restart weights at each restart iteration.
Default: [1].
eta_min (float): The mimimum learning rate of the cosine anneling cycle. Default: 0.
last_epoch (int): Used in paddle.nn._LRScheduler. Default: -1.
"""
def __init__(self,
learning_rate,
periods,
restart_weights=[1],
eta_min=0,
last_epoch=-1):
self.periods = periods
self.restart_weights = restart_weights
self.eta_min = eta_min
assert (len(self.periods) == len(self.restart_weights)
), 'periods and restart_weights should have the same length.'
self.cumulative_period = [
sum(self.periods[0:i + 1]) for i in range(0, len(self.periods))
]
super(CosineAnnealingRestartLR, self).__init__(learning_rate,
last_epoch)
def get_lr(self):
for i, period in enumerate(self.cumulative_period):
if self.last_epoch <= period:
index = i
break
current_weight = self.restart_weights[index]
nearest_restart = 0 if index == 0 else self.cumulative_period[index - 1]
current_period = self.periods[index]
lr = self.eta_min + current_weight * 0.5 * (
self.base_lr - self.eta_min) * (1 + math.cos(math.pi * (
(self.last_epoch - nearest_restart) / current_period)))
return lr
| 2.25
| 2
|
Python/to-lower-case.py
|
Ravan339/LeetCode
| 4
|
12778406
|
<gh_stars>1-10
# https://leetcode.com/problems/to-lower-case/submissions/
class Solution:
def toLowerCase(self, str):
"""
:type str: str
:rtype: str
"""
return str.lower()
| 3.09375
| 3
|
file_explorer/seabird/dat_file.py
|
sharksmhi/file_explorer
| 0
|
12778407
|
<filename>file_explorer/seabird/dat_file.py
from file_explorer.file import InstrumentFile
class DatFile(InstrumentFile):
suffix = '.dat'
def _save_info_from_file(self):
""" Binary file, sort of """
pass
def _save_attributes(self):
pass
| 2.640625
| 3
|
tests/test_vcs_verify_read_a_isofill.py
|
scottwittenburg/vcs
| 11
|
12778408
|
import basevcstest
class TestVCSVerify(basevcstest.VCSBaseTest):
def testReadIsofill(self):
iso = self.x.getisofill("a_isofill")
assert(iso.levels != [])
| 1.898438
| 2
|
api/routers/search.py
|
Amsterdam/fixxx-cspp-mini-crm-api
| 1
|
12778409
|
from fastapi import APIRouter, Depends, Request
from sqlalchemy.orm import Session
from api import search
from ..dependencies import get_user
router = APIRouter()
# Dependency
def get_db(request: Request):
return request.state.db
@router.get("/api/v1/search/{key}")
def search_schools_and_contacts(key, db: Session = Depends(get_db), user: str = Depends(get_user)):
return search.json_search(key, db)
| 2.390625
| 2
|
anime_downloader/extractors/vidstream.py
|
danielb2/anime-downloader
| 0
|
12778410
|
<reponame>danielb2/anime-downloader
import logging
import re
import sys
from anime_downloader.extractors.base_extractor import BaseExtractor
from anime_downloader.sites import helpers
logger = logging.getLogger(__name__)
class VidStream(BaseExtractor):
def _get_data(self):
url = self.url.replace('https:////','https://')
soup = helpers.get(url).text
regex = r'https://vidstreaming\.io/download\?[^"]*'
download = re.search(regex,soup).group()
soup = helpers.soupify(helpers.get(download))
link = soup.select('div.dowload > a')[0].get('href')
return {
'stream_url': link,
'referer': download
}
| 2.671875
| 3
|
summary_chart.py
|
DA04/fitness_tracker_data_parsing
| 0
|
12778411
|
# Stacked Histogram with minutes spent on training type from monthly perspective
import psycopg2
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime
import seaborn as sns
from matplotlib.pyplot import figure
# get session data summary with sport split
conn = psycopg2.connect(host="localhost", database="garmin_data", user="postgres", password="*****")
df = pd.read_sql_query("""select to_char(timestamp, 'YYYY-MM') as stamp, sum(total_timer_time / 60) as minutes_spent, sport
from session
group by to_char(timestamp, 'YYYY-MM'), sport
having sum(total_timer_time / 60) > 0
order by to_char(timestamp, 'YYYY-MM') desc""", conn)
# get min and max dates from the dataframe
min_date = datetime.strptime(min(df.stamp), '%Y-%m')
max_date = datetime.strptime(max(df.stamp), '%Y-%m')
n_max_date = max_date + pd.DateOffset(months=1)
# create a table with all months from min to max date
data = pd.DataFrame()
data['Dates'] = pd.date_range(start=min_date, end=n_max_date, freq='M')
data['Dates'] = data['Dates'].dt.strftime('%Y-%m')
# merge datasets
df_main = pd.merge(data, df, left_on='Dates', right_on='stamp', how='left', indicator=True)
df_main = df_main[['Dates', 'minutes_spent','sport']]
df_main = df_main.fillna(0)
# pivot table
df_pivot = pd.pivot_table(df_main, index='Dates', columns='sport', values='minutes_spent').reset_index()
df_pivot = df_pivot.fillna(0)
df_pivot = df_pivot[['Dates', 'cross_country_skiing', 'cycling', 'running', 'swimming', 'walking']]
# create stacked bar chart for monthly sports
df_pivot.plot(x='Dates', kind='bar', stacked=True, color=['r', 'y', 'g', 'b', 'k'])
# labels for x & y axis
plt.xlabel('Months', fontsize=20)
plt.ylabel('Minutes Spent', fontsize=20)
plt.legend(loc='upper left', fontsize=20)
for num in [69, 57, 45, 33, 21, 9]:
plt.axvline(linewidth=2, x=num, linestyle=':', color = 'grey')
# title of plot
plt.title('Minutes spent by Sport', fontsize=20)
plt.rcParams['figure.figsize'] = [24, 10]
| 2.953125
| 3
|
grand_contest/012/A.py
|
FGtatsuro/myatcoder
| 0
|
12778412
|
import sys
input = sys.stdin.readline
sys.setrecursionlimit(10 ** 7)
n = int(input())
a = sorted(list(map(int, input().split())))
ans = 0
for i in range(n):
ans += a[(3 * n) - (2*i + 2)]
print(ans)
| 2.75
| 3
|
descriptive/descriptive/doctype/industries/test_industries.py
|
ujjwalkumar93/descriptive
| 0
|
12778413
|
<gh_stars>0
# Copyright (c) 2022, k2s.co and Contributors
# See license.txt
# import frappe
import unittest
class TestIndustries(unittest.TestCase):
pass
| 1.070313
| 1
|
fast_autoGrad/autoGrad.py
|
juliaprocess/ml_libs
| 4
|
12778414
|
#!/usr/bin/env python
#This example show how to use pytorch to
#solve a convex optimization problem
# Optimize x^T A x + b^T x
# A = [1 0;0 2] , b = [1, 2] , solution = -[1/2 1/2]
import torch
from torch.autograd import Variable
import numpy as np
from minConf_PQN import *
dtype = torch.FloatTensor
#dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU
learning_rate = 0.1
x = torch.from_numpy(np.ones((2,1)))
x = Variable(x.type(dtype), requires_grad=True)
A = torch.from_numpy(np.array([[1,0],[0,2]]))
A = Variable(A.type(dtype), requires_grad=False)
b = torch.from_numpy(np.array([[1],[2]]))
b = Variable(b.type(dtype), requires_grad=False)
for m in range(30):
opt1 = torch.mm(x.transpose(0,1), A)
loss = torch.mm(opt1, x) + torch.mm(b.transpose(0,1),x)
loss.backward()
minConf_PQN(funObj, x, funProj, options=None)
x.data -= learning_rate*x.grad.data
x.grad.data.zero_()
print x.data.numpy()
import pdb; pdb.set_trace()
| 3.34375
| 3
|
tests/test_version.py
|
kelsin/mypyprox
| 0
|
12778415
|
<filename>tests/test_version.py
import io
import contextlib
import unittest
from mypyprox import version
class TestTypes(unittest.TestCase):
def test_version(self):
self.assertTrue(isinstance(version.__version__, str))
def test_main(self):
out = io.StringIO()
with contextlib.redirect_stdout(out):
version.main("__main__")
self.assertEqual(f"{version.__version__}\n", out.getvalue())
| 2.734375
| 3
|
LC/557c.py
|
szhu3210/LeetCode_Solutions
| 2
|
12778416
|
class Solution(object):
def reverseWords(self, s):
"""
:type s: str
:rtype: str
"""
words = s.split(' ')
words = map(lambda x: x[::-1], words)
res = ' '.join(words)
return res
| 3.40625
| 3
|
scripts/EmailWorstCorrections/email_worst_correction_lambda_function.py
|
Ivan-Nebogatikov/ChineseCorrector
| 0
|
12778417
|
<reponame>Ivan-Nebogatikov/ChineseCorrector
import json
import boto3
from boto3.dynamodb.conditions import Attr
from time import strftime, gmtime
client = boto3.Session(
aws_access_key_id='<KEY>',
aws_secret_access_key='a'
)
dynamodb = client.resource('dynamodb', region_name='us-east-2')
table = dynamodb.Table('CachedCorrections')
ses = boto3.client('ses', region_name='eu-west-2', aws_access_key_id='<KEY>',
aws_secret_access_key='a')
def lambda_handler(event, context):
res = table.scan(
FilterExpression=Attr('Disliked').size().gt(0)
)
r = list(map(lambda x: {'Input': x['Input'], 'Corrected': x['Corrected'], 'DislikedCount': len(x['Disliked'])},
res['Items']))
r.sort(key=lambda x: x['DislikedCount'], reverse=True)
resp = ses.send_email(
Source='<EMAIL>',
Destination={
'ToAddresses': [
'<EMAIL>',
]
},
Message={
'Subject': {
'Data': 'Weekly update of the worst corrections: ' + strftime("%Y-%m-%d", gmtime()),
'Charset': 'utf-8'
},
'Body': {
'Text': {
'Data': json.dumps(r, ensure_ascii=False, indent=2),
'Charset': 'UTF-8'
}
}
}
)
return {
'statusCode': 200,
'body': json.dumps(response, indent=2)
}
| 2.140625
| 2
|
gamelib/data.py
|
sirtango/LordPong
| 0
|
12778418
|
import os.path
ROOT = os.path.join(os.path.dirname(__file__), '..')
DATA = os.path.join(ROOT, 'data')
def scenefile(scenename, filename):
return os.path.join(os.path.join(DATA, scenename), filename)
def datafile(filename):
return os.path.join(DATA, filename)
| 2.5
| 2
|
examples/add_custom_field_options.py
|
iskunk/hub-rest-api-python
| 68
|
12778419
|
<reponame>iskunk/hub-rest-api-python<filename>examples/add_custom_field_options.py
#!/usr/bin/env python
import argparse
import json
import logging
import sys
from blackduck.HubRestApi import HubInstance
parser = argparse.ArgumentParser("Modify a custom field")
parser.add_argument("object", choices=["BOM Component", "Component", "Component Version", "Project", "Project Version"], help="The object that the custom field should be attached to")
parser.add_argument("field_id", help="The ID of the custom field to modify")
parser.add_argument("-o", "--options", action='append', nargs=2, metavar=('label', 'position'), help="The options to add. To add more than one option repeat the -o option, supply a label and position for each possible selection. Used for DROPDOWN, MULTISELECT, and RADIO field types.")
args = parser.parse_args()
logging.basicConfig(format='%(asctime)s:%(levelname)s:%(message)s', stream=sys.stderr, level=logging.DEBUG)
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.getLogger("urllib3").setLevel(logging.WARNING)
options = [{"label": io[0], "position": io[1]} for io in args.options]
hub = HubInstance()
# delete all custom fields for the specified object type
custom_fields = hub.get_custom_fields(args.object).get('items', [])
for custom_field in custom_fields:
url = custom_field['_meta']['href']
field_id = url.split("/")[-1]
if field_id == args.field_id:
field_obj = hub.execute_get(url).json()
options_url = hub.get_link(field_obj, "custom-field-option-list")
for option in options:
response = hub.execute_post(options_url, data=option)
if response.status_code == 201:
print("Successfully added option {} to custom field {}".format(option, url))
else:
print("Failed to add option {} for custom field {}, status code: {}".format(
option, url, response.status_code))
| 2.625
| 3
|
stage2_cINN/AE/modules/ckpt_util.py
|
CJWBW/image2video-synthesis-using-cINNs
| 85
|
12778420
|
<gh_stars>10-100
import os, hashlib
import requests
from tqdm import tqdm
URL_MAP = {
"vgg_lpips": "https://heibox.uni-heidelberg.de/f/607503859c864bc1b30b/?dl=1"
}
CKPT_MAP = {
"vgg_lpips": "modules/lpips/vgg.pth"
}
MD5_MAP = {
"vgg_lpips": "d507d7349b931f0638a25a48a722f98a"
}
def download(url, local_path, chunk_size=1024):
os.makedirs(os.path.split(local_path)[0], exist_ok=True)
with requests.get(url, stream=True) as r:
total_size = int(r.headers.get("content-length", 0))
with tqdm(total=total_size, unit="B", unit_scale=True) as pbar:
with open(local_path, "wb") as f:
for data in r.iter_content(chunk_size=chunk_size):
if data:
f.write(data)
pbar.update(chunk_size)
def md5_hash(path):
with open(path, "rb") as f:
content = f.read()
return hashlib.md5(content).hexdigest()
def get_ckpt_path(name, root=None, check=False):
assert name in URL_MAP
cachedir = os.environ.get("XDG_CACHE_HOME", os.path.expanduser("~/.cache"))
root = root if root is not None else os.path.join(cachedir, "modules")
path = os.path.join(root, CKPT_MAP[name])
if not os.path.exists(path) or (check and not md5_hash(path) == MD5_MAP[name]):
print("Downloading {} model from {} to {}".format(name, URL_MAP[name], path))
download(URL_MAP[name], path)
md5 = md5_hash(path)
assert md5 == MD5_MAP[name], md5
return path
| 2.265625
| 2
|
main.py
|
ovshake/cobra
| 22
|
12778421
|
<filename>main.py
def main(config):
from COBRA import Solver
solver = Solver(config)
cudnn.benchmark = True
return solver.train()
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--compute_all', type=bool, default=False)
parser.add_argument('--mode', type=str, default='train')
parser.add_argument('--just_valid', type=bool, default=False)
parser.add_argument('--lr', type=list, default=[1e-4, 2e-4, 2e-4, 2e-4, 2e-4])
parser.add_argument('--beta1', type=float, default=0.5)
parser.add_argument('--beta2', type=float, default=0.999)
parser.add_argument('--batch_size', type=int, default=100)
parser.add_argument('--output_shape', type=int, default=512)
parser.add_argument('--alpha', type=float, default=0.5)
parser.add_argument('--beta', type=float, default=0.166)
parser.add_argument('--gamma', type=float, default=0.166)
parser.add_argument('--delta', type=float, default=0.166)
parser.add_argument('--datasets', type=str, default='wiki_doc2vec') # xmedia, wiki_doc2vec, mscoco, gossip, politifact, nus_tfidf, metoo, crisis, crisis_damage
parser.add_argument('--view_id', type=int, default=-1)
parser.add_argument('--sample_interval', type=int, default=1)
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--num_negative_samples', type=int, default=25)
parser.add_argument('--num_anchors', type=int, default=5)
parser.add_argument('--use_nce', type=bool, default=False)
config = parser.parse_args()
seed = 123
print('seed: ' + str(seed))
import numpy as np
np.random.seed(seed)
import random as rn
rn.seed(seed)
import os
os.environ['PYTHONHASHSEED'] = str(seed)
import torch
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
from torch.backends import cudnn
cudnn.enabled = False
results = main(config)
| 2.125
| 2
|
tests/test_characters_already_hired_as_lines.py
|
Pelmen323/Kaiserreich_Jenkins_PyTests
| 0
|
12778422
|
<gh_stars>0
##########################
# Test script to check for characters have already hired lines if having > 1 advisors roles
# By Pelmen, https://github.com/Pelmen323
##########################
import re
from ..test_classes.generic_test_class import ResultsReporter
from ..test_classes.characters_class import Characters
FALSE_POSITIVES = ['eth_', 'asy_malik_qambar', 'can_john_bracken', 'can_robert_manion', 'sic_giovanni_messe', 'syr_sami_al_hinawi', 'irq_rashid_al_gaylani', 'irq_hashim_al_alawi'] # convert to list if more added here
def test_check_characters_already_hired(test_runner: object):
characters, paths = Characters.get_all_characters(test_runner=test_runner, return_paths=True)
results = []
for char in characters:
char_name = re.findall('name = (.*)', char)[0]
one_advisor_role = char.count('advisor = {') == 1
two_advisor_roles = char.count('advisor = {') == 2
three_advisor_roles = char.count('advisor = {') == 3
sic_status = char.count('slot = second_in_command')
not_already_hired_status = char.count('not_already_hired_except_as')
if len([i for i in FALSE_POSITIVES if i in char_name]) > 0:
continue
if one_advisor_role:
if not_already_hired_status > 0:
results.append((char_name, paths[char], "Character has 1 advisor role and has 'not_already_hired_except_as' line"))
elif two_advisor_roles:
if sic_status == 0:
if not_already_hired_status < 2:
results.append((char_name, paths[char], "Character has 2 advisor roles but doesn't have 2 'not_already_hired_except_as' lines"))
if sic_status == 1:
if not_already_hired_status < 1:
results.append((char_name, paths[char], "Character has 2 advisor roles (including 1 sic role) but doesn't have 1 'not_already_hired_except_as' lines"))
elif three_advisor_roles:
if sic_status == 0:
if not_already_hired_status < 3:
results.append((char_name, paths[char], "Character has 3 advisor roles but doesn't have 3 'not_already_hired_except_as' lines"))
if sic_status == 1:
if not_already_hired_status < 2:
results.append((char_name, paths[char], "Character has 3 advisor roles (including 1 sic role) but doesn't have 2 'not_already_hired_except_as' lines"))
if sic_status > 1:
results.append((char_name, paths[char], "Character has > 1 sic roles"))
ResultsReporter.report_results(results=results, message="Issues with 'not_already_hired_except_as' lines were encountered. Check console output")
| 2.59375
| 3
|
resources/ytyp.py
|
Markus1812/Sollumz
| 1
|
12778423
|
<reponame>Markus1812/Sollumz
from .codewalker_xml import *
from .ymap import EntityListProperty, ExtensionsListProperty
from numpy import float32
class YTYP:
file_extension = ".ytyp.xml"
@staticmethod
def from_xml_file(filepath):
return CMapTypes.from_xml_file(filepath)
@staticmethod
def write_xml(cmap_types, filepath):
return cmap_types.write_xml(filepath)
class BaseArchetype(ElementTree):
tag_name = "Item"
def __init__(self):
super().__init__()
self.type = AttributeProperty("type", "CBaseArchetypeDef")
self.lod_dist = ValueProperty("lodDist")
self.flags = ValueProperty("flags")
self.special_attribute = ValueProperty("specialAttribute")
self.bb_min = VectorProperty("bbMin")
self.bb_max = VectorProperty("bbMax")
self.bs_center = VectorProperty("bsCentre")
self.bs_radius = ValueProperty("bsRadius")
self.hd_texture_dist = ValueProperty("hdTextureDist")
self.name = TextProperty("name")
self.texture_dictionary = TextProperty("textureDictionary")
self.clip_dictionary = TextProperty("clipDictionary")
self.drawable_dictionary = TextProperty("drawableDictionary")
self.physics_dictionary = TextProperty("physicsDictionary")
self.asset_type = TextProperty("assetType")
self.asset_name = TextProperty("assetName")
self.extensions = ExtensionsListProperty()
class TimeArchetype(BaseArchetype):
def __init__(self):
super().__init__()
self.type = AttributeProperty("type", "CTimeArchetypeDef")
self.time_flags = ValueProperty("timeFlags")
class Corner(ElementProperty):
value_types = (tuple)
tag_name = "Item"
def __init__(self, tag_name=None, value=None):
super().__init__("Item", value or tuple())
@staticmethod
def from_xml(element):
value = element.text.split(",")
value = [float(val) for val in value]
if len(value) > 3:
value = value[:3]
return Corner(value=tuple(value))
def to_xml(self):
if not self.value or len(self.value) < 1:
return None
elem = ET.Element(self.tag_name)
elem.text = ",".join([str(float32(val)) for val in self.value])
return elem
class CornersListProperty(ListProperty):
list_type = Corner
tag_name = "corners"
class AttachedObjectsBuffer(ElementProperty):
value_types = (int)
def __init__(self):
super().__init__(tag_name="attachedObjects", value=[])
@classmethod
def from_xml(cls, element: ET.Element):
new = cls()
if element.text:
indices = element.text.strip().replace("\n", "").split()
new.value = [int(i) for i in indices]
return new
def to_xml(self):
element = ET.Element(self.tag_name)
columns = 10
text = []
for index, entity_index in enumerate(self.value):
text.append(str(entity_index))
if index < len(self.value) - 1:
text.append(' ')
if (index + 1) % columns == 0:
text.append('\n')
element.text = ''.join(text)
return element
class Portal(ElementTree):
tag_name = "Item"
def __init__(self):
super().__init__()
self.room_from = ValueProperty("roomFrom")
self.room_to = ValueProperty("roomTo")
self.flags = ValueProperty("flags")
self.mirror_priority = ValueProperty("mirrorPriority")
self.opacity = ValueProperty("opacity")
self.audio_occlusion = ValueProperty("audioOcclusion")
self.corners = CornersListProperty()
self.attached_objects = AttachedObjectsBuffer()
class PortalsListProperty(ListProperty):
tag_name = "portals"
list_type = Portal
def __init__(self, tag_name=None, value=None):
super().__init__(tag_name="portals", value=value or [])
self.item_type = AttributeProperty("itemType", "CMloPortalDef")
class Room(ElementTree):
tag_name = "Item"
def __init__(self):
super().__init__()
self.name = TextProperty("name")
self.bb_min = VectorProperty("bbMin")
self.bb_max = VectorProperty("bbMax")
self.blend = ValueProperty("blend", 1)
self.timecycle_name = TextProperty("timecycleName")
self.secondary_timecycle_name = TextProperty("secondaryTimecycleName")
self.flags = ValueProperty("flags")
self.portal_count = ValueProperty("portalCount")
self.floor_id = ValueProperty("floorId")
self.exterior_visibility_depth = ValueProperty(
"exteriorVisibiltyDepth", -1)
self.attached_objects = AttachedObjectsBuffer()
class RoomsListProperty(ListProperty):
tag_name = "rooms"
list_type = Room
def __init__(self, tag_name=None, value=None):
super().__init__(tag_name="rooms", value=value or [])
self.item_type = AttributeProperty("itemType", "CMloRoomDef")
class EntitySet(ElementTree):
tag_name = "Item"
def __init__(self):
super().__init__()
self.name = TextProperty("name")
self.locations = TextProperty("locations")
self.entities = EntityListProperty()
class EntitySetsListProperty(ListProperty):
tag_name = "entitySets"
list_type = EntitySet
def __init__(self, tag_name=None, value=None):
super().__init__(tag_name="entitySets", value=value or [])
self.item_type = AttributeProperty("itemType", "CMloEntitySet")
class TimeCycleModifier(ElementTree):
tag_name = "Item"
def __init__(self):
self.name = TextProperty("name")
self.sphere = QuaternionProperty("sphere")
self.percentage = ValueProperty("percentage")
self.range = ValueProperty("range")
self.start_hour = ValueProperty("startHour")
self.end_hour = ValueProperty("endHour")
class TimeCycleModifiersListProperty(ListProperty):
tag_name = "timeCycleModifiers"
list_type = TimeCycleModifier
def __init__(self, tag_name=None, value=None):
super().__init__(tag_name="timeCycleModifiers", value=value or [])
self.item_type = AttributeProperty("itemType", "CMloTimeCycleModifier")
class MloArchetype(BaseArchetype):
def __init__(self):
super().__init__()
self.type = AttributeProperty("type", "CMloArchetypeDef")
self.mlo_flags = ValueProperty("mloFlags")
self.entities = EntityListProperty()
self.rooms = RoomsListProperty()
self.portals = PortalsListProperty()
self.entity_sets = EntitySetsListProperty()
self.timecycle_modifiers = TimeCycleModifiersListProperty()
class ArchetypesListProperty(ListProperty):
list_type = BaseArchetype
tag_name = "archetypes"
@staticmethod
def from_xml(element: ET.Element):
new = ArchetypesListProperty()
for child in element.iter():
if "type" in child.attrib:
arch_type = child.get("type")
if arch_type == "CBaseArchetypeDef":
new.value.append(BaseArchetype.from_xml(child))
elif arch_type == "CMloArchetypeDef":
new.value.append(MloArchetype.from_xml(child))
elif arch_type == "CTimeArchetypeDef":
new.value.append(TimeArchetype.from_xml(child))
return new
class CompositeEntityType(ElementTree):
tag_name = "Item"
def __init__(self):
super().__init__()
self.name = TextProperty("Name")
self.lod_dist = ValueProperty("lodDist")
self.flags = ValueProperty("flags")
self.special_attribute = ValueProperty("specialAttribute")
self.bb_min = VectorProperty("bbMin")
self.bb_max = VectorProperty("bbMax")
self.bs_center = VectorProperty("bsCentre")
self.bs_radius = ValueProperty("bsRadius")
self.start_model = TextProperty("StartModel")
self.end_model = TextProperty("EndModel")
self.start_imap_file = TextProperty("StartImapFile")
self.end_imap_file = TextProperty("EndImapFile")
self.ptfx_assetname = TextProperty("PtFxAssetName")
# TODO
# self.animations = AnimationsListProperty()
class CompositeEntityTypeListProperty(ListProperty):
list_type = CompositeEntityType
tag_name = "compositeEntityTypes"
def __init__(self, tag_name=None, value=None):
super().__init__(tag_name="compositeEntityTypes", value=value or [])
self.item_type = AttributeProperty("itemType", "CCompositeEntityType")
class CMapTypes(ElementTree):
tag_name = "CMapTypes"
def __init__(self):
super().__init__()
self.extensions = ExtensionsListProperty()
self.archetypes = ArchetypesListProperty()
self.name = TextProperty("name")
# Investigate: Not used in any ytyp file in the game?
# self.dependencies = DependenciesListProperty()
self.composite_entity_type = CompositeEntityTypeListProperty()
| 2.359375
| 2
|
tests/toolbox/test_Sloppy_derived_parameters.py
|
yuanz271/PyDSTool
| 0
|
12778424
|
<filename>tests/toolbox/test_Sloppy_derived_parameters.py
"""
Test of the derived_parameters feature (i.e. "RHSdefs" is True)
and also the feature that allows inclusion of the right-hand of an ODE into another ODE.
This is essentially a small extension on the following tutorial example:
http://www.ni.gsu.edu/~rclewley/PyDSTool/Tutorial/Tutorial_linear.html
"""
from PyDSTool.Toolbox.makeSloppyModel import makeSloppyModel
from numpy.testing import assert_allclose
def test_Sloppy_derived_parameters():
sloppyModelEg = {
'assignments': {},
'derived_params': {'k': 's1/s2', 'q': 's3*s4', 'm': 'q+s5'},
'functions': {},
'odes': {'x':'y + _y_RHS', 'y': '-k*x/m'},
'parameters':{'s1': 1, 's2': 10., 's3': 0.25, 's4': 1, 's5': 0.25},
'events': {},
'domains': {}
}
model_name = 'test_derived_parameters'
ics = {'x': 1, 'y': 0.4}
algparams = {'init_step': 0.1, 'stiff': True}
sModel = makeSloppyModel(model_name, sloppyModelEg, 'Vode_ODEsystem',
algParams=algparams, silent=False,
containsRHSdefs=True)
sModel.compute(trajname='test_derived_params',
force=True,
ics=ics,
tdata=[0, 20],
verboselevel=0
)
pts = sModel.sample('test_derived_params')
assert_allclose(pts[-1]['x'], -0.042398, rtol=1e-4)
assert_allclose(pts[-1]['y'], -0.073427, rtol=1e-4)
| 2.484375
| 2
|
project/experiments/exp_800_mile_stone/src/old/tmp_which_nodes_are_slow_read_tensorboard.py
|
liusida/thesis-bodies
| 0
|
12778425
|
import pandas as pd
from common.tflogs2pandas import tflog2pandas
import glob
df_results = pd.DataFrame()
filenames = glob.glob("output_data/tensorboard/model-*/PPO_1")
for filename in filenames:
print(filename)
df = tflog2pandas(filename)
df = df[df["metric"]=="time/fps"]
average_fps = df["value"].mean()
min_fps = df["value"].min()
print("average_fps: ", average_fps, ", min_fps: ", min_fps,)
df_results = df_results.append({
"path": filename,
"average_fps": average_fps,
"min_fps": min_fps,
}, ignore_index=True)
df_results.to_pickle("output_data/tmp/which_nodes_are_slow")
| 2.640625
| 3
|
deeplearning4j-core/src/main/resources/scripts/plot.py
|
atssada/deeplearning4j
| 2
|
12778426
|
<reponame>atssada/deeplearning4j
import math
from matplotlib.pyplot import hist, title, subplot, scatter, plot
import matplotlib.pyplot as plt
import numpy as np
from PIL import Image
import seaborn # improves matplotlib look and feel
import sys
import time
'''
Optimization Methods Visualalization
Graph tools to help visualize how optimization is performing
'''
GLOBAL_TIME = 1.5
def load_file(path):
return np.loadtxt(path, delimiter=',')
def sigmoid(hidden_mean):
return 1 / (1 + np.exp(-hidden_mean))
def render_plot(values, plot_type='histogram', chart_title=''):
if np.product(values.shape) < 2:
values = np.zeros((3, 3))
chart_title += '-fake'
if plot_type == 'histogram':
hist(values)
elif plot_type == "scatter":
scatter(values)
else:
print "The " + plot_type + " format is not supported. Please choose histogram or scatter."
magnitude = ' mm %g ' % np.mean(np.fabs(values))
chart_title += ' ' + magnitude
title(chart_title)
def render_activation_probability(dataPath, filename):
hidden_mean = load_file(dataPath)
img = Image.fromarray(sigmoid(hidden_mean) * 256)
if img.mode != 'RGB':
img = img.convert('RGB')
img.save(filename, 'PNG')
def plot_single_graph(path, chart_title, filename):
print 'Graphing ' + chart_title + '\n'
values = load_file(path)
plt.plot(values, 'b')
plt.title(chart_title)
plt.savefig(filename, format='png')
plt.show(block=False)
time.sleep(GLOBAL_TIME)
plt.close()
def plot_matrices(orig_path, plot_type, filename):
paths = orig_path.split(',')
for idx, path in enumerate(paths):
if idx % 2 == 0:
title = paths[idx + 1]
print 'Loading matrix ' + title + '\n'
matrix = load_file(path)
subplot(2, len(paths)/4, idx/2+1)
render_plot(matrix, plot_type, chart_title=title)
plt.tight_layout()
plt.savefig(filename, format='png')
plt.show(block=False)
time.sleep(GLOBAL_TIME)
plt.close()
# TODO Finish adapting. Code still does not fully run through.
# def render_filter(data_path, n_rows, n_cols, filename):
# weight_data = load_file(data_path).reshape((n_rows, n_cols))
# patch_width = weight_data.shape[1]
# patch_height = 1
#
# # Initialize background to dark gray
# filter_frame = np.ones((n_rows*patch_width, n_cols * patch_height), dtype='uint8')
#
# for row in xrange(int(n_rows/n_cols)):
# for col in xrange(n_cols):
# patch = weight_data[row * n_cols + col].reshape((patch_width, patch_height))
# norm_patch = ((patch - patch.min()) / (patch.max() - patch.min() + 1e-6))
# filter_frame[row * patch_width: row * patch_width + patch_width,
# col * patch_height:col * patch_height + patch_height] = norm_patch * 255
# img = Image.fromarray(filter_frame)
# img.savefig(filename)
# img.show()
#
# def render_filter(data_path, filename, filter_width=10, filter_height=10):
# print 'Rendering filter image...'
# weight_data = load_file(data_path)
# n_rows = weight_data.shape[0]
# n_cols = weight_data.shape[1]
# padding = 1
#
# # Initialize background to dark gray
# filter_frame = np.ones(((filter_width+padding) * filter_width, (filter_height+padding) * filter_height), dtype='uint8') * 51
#
# for row in xrange(n_rows):
# for col in xrange(n_cols):
# patch = weight_data[row * n_cols + col].reshape((filter_width, filter_height))
# norm_patch = ((patch - patch.min()) / (patch.max() - patch.min() + 1e-6))
# filter_frame[row * (filter_height+padding): row * (filter_height+padding)+filter_height, col * (filter_width+padding): col * (filter_width+padding)+filter_width] = norm_patch * 255
# filter_frame[row * (filter_height+padding): row * (filter_height+padding) + filter_height, col * (filter_width+padding): col *(filter_width+padding) + filter_width]
# img = Image.fromarray(filter_frame)
# if img.mode != 'RGB':
# img = img.convert('RGB')
# img.save(filename)
# def vis_square(data_path, filename, n_rows=28, n_cols=28, padsize=1, padval=0):
# data = load_file(data_path)
# data = data.reshape(n_rows, n_cols)
#
# data -= data.min()
# data /= data.max()
#
# # force the number of filters to be square
# n = int(np.ceil(np.sqrt(data.shape[0])))
# padding = ((0, n ** 2 - data.shape[0]), (0, padsize), (0, padsize)) + ((0, 0),) * (data.ndim - 3)
# data = np.pad(data, padding, mode='constant', constant_values=(padval, padval))
#
# # tile the filters into an image
# data = data.reshape((n, n) + data.shape[1:]).transpose((0, 2, 1, 3) + tuple(range(4, data.ndim + 1)))
# data = data.reshape((n * data.shape[1], n * data.shape[3]) + data.shape[4:])
#
# plt.imshow(data)
# time.sleep(GLOBAL_TIME)
# plt.savefig(data, filename)
if __name__ == '__main__':
if len(sys.argv) < 4:
print 'Please specify a command: One of hbias,weights,plot and a file path'
sys.exit(1)
plot_type = sys.argv[1]
path = sys.argv[2]
filename = sys.argv[3]
if plot_type == 'activations':
render_activation_probability(path, filename)
elif plot_type == 'single_matrix':
render_plot(path)
elif plot_type == 'histogram':
plot_matrices(path, plot_type, filename)
elif plot_type == 'scatter':
plot_matrices(path, plot_type, filename)
elif plot_type == 'loss':
plot_single_graph(path, plot_type, filename)
elif plot_type == 'accuracy':
plot_single_graph(path, plot_type, filename)
# elif sys.argv[1] == 'filter':
# if sys.argv[7]:
# n_rows = int(sys.argv[4])
# n_cols = int(sys.argv[5])
# filter_width = int(sys.argv[6])
# filter_height = int(sys.argv[7])
# render_filter(path, filename, n_rows, n_cols, filter_height, filter_width)
# elif sys.argv[5]:
# n_rows = int(sys.argv[4])
# n_cols = int(sys.argv[5])
# render_filter(path, filename, n_rows, n_cols)
# else:
# render_filter(path, filename)
| 3.296875
| 3
|
LeetCode/1723. Find Minimum Time to Finish All Jobs/solution.py
|
InnoFang/oh-my-algorithms
| 19
|
12778427
|
"""
60 / 60 test cases passed.
Runtime: 64 ms
Memory Usage: 14.5 MB
"""
class Solution:
def minimumTimeRequired(self, jobs: List[int], k: int) -> int:
def dfs(workers, idx, limit):
if idx >= len(jobs):
return True
for i in range(len(workers)):
if workers[i] + jobs[idx] <= limit:
workers[i] += jobs[idx]
if dfs(workers, idx + 1, limit):
return True
workers[i] -= jobs[idx]
if workers[i] == 0 or workers[i] + jobs[idx] == limit:
break
return False
jobs.sort(reverse=True)
l, r = max(jobs), sum(jobs)
while l < r:
mid = l + r >> 1
if dfs([0] * k, 0, mid):
r = mid
else:
l = mid + 1
return l
| 2.765625
| 3
|
the-platform-service-upgrade/healthcare/resources/disease/diabetes/model/net_handler.py
|
vivekbarsagadey/the-platform
| 1
|
12778428
|
<filename>the-platform-service-upgrade/healthcare/resources/disease/diabetes/model/net_handler.py
from tensorflow.python import keras
import os
BASE_FOLDER = os.path.abspath(os.path.dirname(__name__))
NEXT_PATH = "/healthcare/resources/disease/diabetes/model/save/"
FULL_PATH = BASE_FOLDER + NEXT_PATH
class nethandler:
def __init__(self):
print("Store Model init")
def savenet(self,model=None):
model_json = model.to_json()
with open(FULL_PATH+"model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save(FULL_PATH + "model.h5")
def loadnet(self):
loaded_model = keras.models.load_model(FULL_PATH + "model.h5")
print("Loaded model from disk")
return loaded_model
| 2.59375
| 3
|
app/main/forms.py
|
mercy-shii/Vblog
| 0
|
12778429
|
from flask_wtf import FlaskForm
from wtforms import StringField,TextAreaField,FileField,SubmitField
from wtforms.validators import Required
class CommentForm(FlaskForm):
title = StringField('Comment title',validators= [Required()])
comment = TextAreaField('Comment review')
submit = SubmitField('submit')
class BlogForm(FlaskForm):
title = StringField('Blog title',validators= [Required()])
message = TextAreaField('Blog Message',validators=[Required()])
submit = SubmitField('Submit')
class UpdateProfile(FlaskForm):
bio = TextAreaField('Tell us about yourself',validators=[Required()])
submit = SubmitField('submit')
| 2.65625
| 3
|
code/hwcloud/hws_service/evs_service.py
|
Hybrid-Cloud/cloud_manager
| 0
|
12778430
|
<gh_stars>0
__author__ = 'Administrator'
import json
from heat.engine.resources.hwcloud.hws_service import HWSService
class EVSService(HWSService):
def __init__(self, ak, sk, region, protocol, host, port):
super(EVSService, self).__init__(ak, sk, 'EVS', region, protocol, host, port)
def list(self, project_id, opts=None):
uri = '/v2/%s/cloudvolumes' % project_id
if opts:
str_opts = self.convertDictOptsToString(opts)
uri = '?'.join([uri, str_opts])
return self.get(uri)
def create_volume(self, project_id, availability_zone, size, volume_type,
backup_id=None, description=None, name=None, imageRef=None, count=None):
"""
{
"volume": {
"backup_id": null,
"count": 1,
"availability_zone": "az1.dc1",
"description": "test_volume_1",
"size": 120,
"name": "test_volume_1",
"imageRef": null,
"volume_type": "SSD"
}
}
:param project_id:
:param availability_zone:
:param size:
:param volume_type:
:param backup_id:
:param description:
:param name:
:param imageRef:
:param count:
:return: dict
{
"job_id": "70a599e0-31e7-49b7-b260-868f441e862b",
}
or
{
"error": {
"message": "XXXX",
"code": "XXX"
}
}
Get job detail result:
{
u'body': {
u'status': u'RUNNING',
u'fail_reason': None,
u'job_id': u'8aace0c651b0a02301521ae1f96c6138',
u'job_type': u'createVolume',
u'entities': {
u'volume_id': u'9bd6fa88-0e60-48e5-ae61-7e028dbdf045'
},
u'end_time': u'',
u'begin_time': u'2016-01-07T06: 59: 23.115Z',
u'error_code': None
},
u'status': 200
}
{
u'body': {
u'status': u'SUCCESS',
u'fail_reason': None,
u'job_id': u'8aace0c651b0a02301521ae1f96c6138',
u'job_type': u'createVolume',
u'entities': {
u'volume_id': u'9bd6fa88-0e60-48e5-ae61-7e028dbdf045'
},
u'end_time': u'2016-01-07T06: 59: 48.279Z',
u'begin_time': u'2016-01-07T06: 59: 23.115Z',
u'error_code': None
},
u'status': 200
}
Failed job result:
{
u'body': {
u'status': u'FAIL',
u'fail_reason': u"EbsCreateVolumeTask-fail:badRequest: Invalid input received: Availability zone 'cn-north-1' is invalid",
u'job_id': u'8aace0c651b0a02301521ab7e58660ca',
u'job_type': u'createVolume',
u'entities': {
},
u'end_time': u'2016-01-07T06: 13: 25.809Z',
u'begin_time': u'2016-01-07T06: 13: 25.509Z',
u'error_code': u'EVS.5400'
},
u'status': 200
}
"""
uri = '/v2/%s/cloudvolumes' % project_id
request_body_dict = {}
volume = {}
volume['availability_zone'] = availability_zone
volume['size'] = size
volume['volume_type'] = volume_type
if backup_id:
volume['backup_id'] = backup_id
if description:
volume['description'] = description
if name:
volume['name'] = name
if imageRef:
volume['imageRef'] = imageRef
if count:
volume['count'] = count
request_body_dict['volume'] = volume
request_body_string = json.dumps(request_body_dict)
response = self.post(uri, request_body_string)
return response
def delete_volume(self, project_id, volume_id):
"""
DELETE /v2/{tenant_id}/cloudvolumes/{volume_id}
:return:
"""
uri = '/v2/%s/cloudvolumes/%s' % (project_id, volume_id)
response = self.delete(uri)
return response
def get_volume_detail(self, project_id, volume_id):
uri = "/v2/%s/volumes/%s" % (project_id, volume_id)
response = self.get(uri)
return response
| 2.015625
| 2
|
mendeley/client_library.py
|
ScholarTools/ST_mendeley_python
| 0
|
12778431
|
# -*- coding: utf-8 -*-
"""
The goal of this code is to support hosting a client library. This module
should in the end function similarly to the Mendeley Desktop.
Syncing
-------------------------------------------
Jim's next goals
----------------
1) Handle deleted IDs - needs an API update
2) Meta Data Editor
- nice query interface
- needs to handle local/dirty docs
- autodownload files when opening ...
3) Update by PMID ...
4) How to sync deleted ids?
Features
--------
1) Initializes a representation of the documents stored in a user's library
2) Synchronizes the local library with updates that have been made remotely
Usage
-----
from mendeley import client_library
cl = client_library.UserLibrary(verbose=True)
wtf = cl.has_docs([14581232,10529706,12345])
"""
#Standard Library Imports
from typing import Optional, Union, TypeVar, List
import pickle
from datetime import datetime
from timeit import default_timer as ctime
import os
import sys
import json
#Third Party Imports
import pandas as pd
from sqlalchemy import desc
# Local imports
from .api import API
from .db_tables import DB, Document
from . import errors
from . import models
from . import utils
from . import config
from .utils import display_class, quotes
# Optional Local Imports
#-----------------------------
#These need to be updated
#from .optional import rr
#from .optional import pdf_retrieval
#from . import db_interface
# from . import archive_library
fstr = utils.float_or_none_to_string
cld = utils.get_list_class_display
class LibraryOptions(object):
#TODO: Support default sync resolution mechanism
#TODO: Load options from file???? - GUI?
pass
class UserLibrary:
"""
Attributes
----------
"""
api : 'API'
db : 'DB'
user_name : 'str'
verbose : 'bool'
cleaner : 'LibraryCleaner'
def __init__(self, user_name=None, verbose=False, sync=True,
force_new=False):
"""
Inputs
------
user_name : string (default None)
If no user is specified the default user is loaded from the
configuration file.
verbose : bool (default False)
sync : bool (default True)
force_new : bool (default False)
If true the library is not loaded from disk.
"""
self.api = API(user_name=user_name,verbose=verbose)
self.user_name = self.api.user_name
self.verbose = verbose
# path handling
# -------------
root_path = config.get_save_root(['client_library'], True)
save_name = utils.user_name_to_file_name(self.user_name) + '.pickle'
self.file_path = os.path.join(root_path, save_name)
self.db = DB(self.user_name)
self.db_session = self.db.get_session()
self.cleaner = LibraryCleaner(self.db)
if sync:
self.sync()
def __repr__(self):
pv = ['api', cld(self.api),
'db', cld(self.db),
'dirty_db', self.dirty_db,
'user_name', self.user_name,
'file_path', self.file_path,
'sync_result',cld(self.sync_result),
'verbose', self.verbose,
'methods', '--------------------'
'has_docs','Returns whether library has the documents']
return utils.property_values_to_string(pv)
def has_docs(self,ids,type='pmid'):
"""
Parameters
----------
ids :
type :
"""
output = []
session = self.db_session
if type == 'pmid':
for id in ids:
temp = session.query(self.db.Document.pmid).filter_by(pmid = id).first()
output.append(bool(temp))
elif type =='doi':
for id in ids:
temp = session.query(self.db.Document.doi).filter_by(doi = id).first()
output.append(bool(temp))
elif type == 'arxiv':
temp = session.query(self.db.Document.arxiv).filter_by(arxiv=id).first()
output.append(bool(temp))
else:
raise Exception('Unrecognized id type')
return output
def sync(self,verbose=None):
"""
Syncs the library with the Mendeley server.
Parameters
----------
verbose : bool (default, inherit from class value, self.verbose)
TODO:
? How do we know if something has been restored from the trash?
"""
if verbose is None:
verbose = self.verbose
self.sync_result = Sync(self.api, self.db, verbose=verbose)
# def archive(self):
# archivist = archive_library.Archivist(library=self, api=self.api)
# archivist.archive()
def get_documents(self,
query_dict,
as_dict=False):
session = self.db_session
temp = session.query(self.db.Document).filter_by(**query_dict)
#TODO: Support hiding deleted and trashed ...
docs = temp.all()
if docs and as_dict:
return [x.as_dict for x in docs]
else:
return docs
def get_document(self,
query_dict,
as_dict=False):
"""
Returns the document (i.e. metadata) based on a specified identifier.
Parameters
----------
as_dict : bool (default False)
- True, returned as dictionary
- False, SQLAlchemy objects
Improvements
------------
- add methods that return counts or partial queries for qeury building
Returns
-------
Examples
--------
from mendeley import client_library
c = client_library.UserLibrary(verbose=True)
doc = c.get_document({'title':'magazine article title'})
"""
session = self.db_session
temp = session.query(self.db.Document).filter_by(**query_dict)
doc = temp.first()
if doc and as_dict:
return doc.as_dict()
else:
return doc
def add_to_library(self,
doi=None,
pmid=None,
check_in_lib=False,
add_pdf=True,
file_path=None):
"""
JAH: I think this method is still under development ...
Parameters
----------
doi : string
check_in_lib : bool
If true,
add_pdf : bool
Improvements
------------
*
- allow adding via PMID
- pdf entry should be optional with default true
- also need to handle adding pdf if possible but no error
if not possible
"""
#JAH: Why doesn't this take in any inputs on the check???
if check_in_lib and self.check_for_document():
raise errors.DuplicateDocumentError('Document already exists in library.')
#----------------------------------------------------------------------
# Get paper information from DOI
"""
Even then, this requires a bit of thinking. Why are we asking rr for
paper information? Perhaps we need another repository ...
- Pubmed
- Crossref
- others????
"""
paper_info = rr.retrieve_all_info(input=doi, input_type='doi')
# Turn the BaseEntry object into a formatted dict for submission
# to the Mendeley API
formatted_entry = self._format_doc_entry(paper_info.entry)
# Create the new document
new_document = self.api.documents.create(formatted_entry)
"""
add_pdf
* I want to be able to specify the path to the file to add.
* Perhaps instead we want:
pdf = file_path
pdf = 'must_retrieve'
pdf = 'retrieve_or_request' - If not available, make a request for it
pdf = 'retrive_if_possible'
I'm not thrilled with this specific interface, but I'd like something
like this.
We might want an additional package that focuses on retrieving pdfs.
The big question is how to support letting these interfaces interact
efficiently without doing things multiple times. We can answer this
at a later time.
pdf retrieval:
- Interlibrary loan
- ScholarSolutions
- PyPub
"""
# Get pdf
if add_pdf:
pdf_content = pdf_retrieval.get_pdf(paper_info)
new_document.add_file({'file' : pdf_content})
def update_file_from_local(self, doi=None, pmid=None):
"""
This is for updating a file in Mendeley without losing the annotations.
The file must be saved somewhere locally, and the file path is selected
by using a pop up file selection window.
Parameters
----------
doi - DOI of document in library to update
pmid - PMID of document in library to update
"""
if doi is None and pmid is None:
raise KeyError('Please enter a DOI or PMID for the updating document.')
document = self.get_document(doi=doi, pmid=pmid, return_json=True)
if document is None:
raise errors.DOINotFoundError('Could not locate DOI in library.')
new_file_path = self._file_selector()
if new_file_path is None:
return
with open(new_file_path, 'rb') as file:
file_content = file.read()
doc_id = document.get('id')
saved_annotations_string = self.api.annotations.get(document_id=doc_id)
saved_annotations = json.loads(saved_annotations_string)
if isinstance(saved_annotations, list):
saved_annotations = saved_annotations[0]
has_file = document.get('file_attached')
if has_file:
_, _, file_id = self.api.files.get_file_content_from_doc_id(doc_id=doc_id, no_content=True)
self.api.files.delete(file_id=file_id)
params = {'title': document.get('title'), 'id': doc_id}
self.api.files.link_file(file=file_content, params=params)
# Reconfirm that the file was added
updated = self.get_document(doi=doi, pmid=pmid, return_json=True)
has_file = updated.get('file_attached')
if not has_file:
raise FileNotFoundError('File was not attached.')
new_annotations_string = self.api.annotations.get(document_id=doc_id)
if new_annotations_string is None or saved_annotations_string != new_annotations_string:
self.api.annotations.create(annotation_body=saved_annotations)
def _file_selector(self):
#TODO: Test this with non * imports
#
#Why is this line needed???
app = QApplication(sys.argv)
dialog = QFileDialog()
# dialog.setFileMode(QFileDialog.DirectoryOnly)
dialog.setViewMode(QFileDialog.List)
dialog.setDirectory(os.path.expanduser('~'))
if dialog.exec_():
filenames = dialog.selectedFiles()
return filenames[0]
else:
return None
def _format_doc_entry(self, entry):
"""
Mendeley API has specific input formatting when creating a document.
- Parses author names and separates into separate "first_name" and
"last_name" fields.
- Restricts keywords from being > 50 characters. If one is found,
it is split by spaces and saved as separate keywords.
- Changes "publication" to "publisher" to fit syntax.
- Sets "type" to "journal"
- Saves DOI within "identifiers" field.
Parameters
----------
entry : BaseEntry object
See pypub.scrapers.base_objects.py
Unformatted paper information, usually from PaperInfo class
Returns
-------
entry : dict
Paper information with proper formatting applied.
"""
if not isinstance(entry, dict):
entry = entry.__dict__
# Format author names
authors = entry.get('authors')
formatted_author_names = None
if authors is not None:
if isinstance(authors[0], str):
author_names = [x for x in authors]
elif isinstance(authors[0], dict):
author_names = [x.get('name') for x in authors]
else:
author_names = [x.name for x in authors]
formatted_author_names = []
# Parse author names
for name in author_names:
name_dict = dict()
name = name.strip()
parts = name.split(' ')
# If format is "firstname middleinitial. lastname"
if '.' in name and len(parts) == 3:
name_dict['first_name'] = parts[0]
name_dict['last_name'] = parts[2]
# If format is "lastname, firstname"
elif ',' in name:
name_dict['first_name'] = parts[1]
name_dict['last_name'] = parts[0]
# If format is "lastname firstinitial"
elif len(parts) == 2 and '.' in parts[1]:
name_dict['first_name'] = parts[1]
name_dict['last_name'] = parts[0]
# If format is only "lastname"
elif len(parts) == 1:
name_dict['last_name'] = parts[0]
name_dict['first_name'] = ''
# If there are multiple initials
elif len(parts) > 3:
initials = ''
for part in parts:
if '.' in part:
initials += part
else:
name_dict['last_name'] = part
name_dict['first_name'] = initials
# Otherwise assume format is "firstname lastname" or "firstinitial. lastname"
else:
name_dict['first_name'] = parts[0]
name_dict['last_name'] = parts[1]
formatted_author_names.append(name_dict)
# Make sure keywords are <= 50 characters
kw = entry.get('keywords')
if kw is not None:
# Check if it's one long string, and split if so
if isinstance(kw, str):
kw = kw.split(', ')
to_remove = []
for keyword in kw:
if len(keyword) > 50:
to_remove.append(keyword)
smaller_keywords = keyword.split(' ')
for word in smaller_keywords:
kw.append(word)
for long_word in to_remove:
kw.remove(long_word)
entry['keywords'] = kw
# Get rid of alpha characters in Volume field
vol = entry.get('volume')
if vol is not None:
entry['volume'] = ''.join(c for c in vol if not c.isalpha())
# Get rid of alpha characters in Year field
year = entry.get('year')
if year is not None:
entry['year'] = ''.join(c for c in year if not c.isalpha())
if entry['year'] == '':
entry['year'] = None
doi = entry.get('doi')
if doi is not None:
doi = doi.lower()
entry['identifiers'] = {'doi' : doi}
entry['authors'] = formatted_author_names
entry['publisher'] = entry['publication']
entry['type'] = 'journal'
return entry
class Sync(object):
"""
This object should perform the syncing and include some
debugging information as well.
Attributes
----------
raw : json
df :
"""
def __init__(self, api:API, db:DB, verbose=False):
"""
Inputs
------
api :
raw_json :
"""
self.db = db
self.api = api
self.verbose = verbose
self.verbose_print("Starting sync")
#What happens to trashed documents?
#- we can request trahsed documents ...
#There is no notification that a document has been trashed ...
#- we need to request trashed documents ...
#deleted_since
session = db.get_session()
#=> I want to get the times
#wtf = session.query(db.)
import pdb
pdb.set_trace()
#----------------------------------------------------------------------
#TODO: Does our code support an empty database?
last_modified = session.query(db.Document.last_modified).order_by(desc('last_modified')).first()
last_modified = last_modified[0]
new_docs = api.documents.get(modified_since=last_modified,limit=100,return_type='json')
result = db.add_documents(new_docs,session=session,drop_time=last_modified)
if result.n_different > 0:
self.verbose_print(result.get_summary_string())
else:
self.verbose_print("No new documents found in sync")
count = 0
while api.has_next_link:
count += 100
#TODO: Fix this to occur after we get the new ones
print("Requesting more docs starting at {}".format(count))
docs_to_add = api.next()
r2 = db.add_documents(docs_to_add,session=session,drop_time=last_modified)
self.verbose_print(r2.get_summary_string())
result.merge(r2)
self.add_result = result
session.commit()
#Deleted docs
#----------------------------------------------------------------------
deleted_docs = api.documents.get_deleted(return_type='json')
#Handling updated docs - sync to server
#----------------------------------------------------------------------
#Note, conflicts have already been handled at this point ...
dirty_docs = session.query(db.Document).filter_by(is_dirty=True).all() # type: List[Document]
if dirty_docs:
self.verbose_print()
for doc in dirty_docs:
if doc.is_trashed:
pass
elif doc.is_deleted:
pass
else:
#Update
temp = doc.as_dict()
r = api.documents.update(temp['id'], temp)
doc.commit(_is_dirty=False)
#Look for deleted docs
#Look for trash
session.close()
# #What if in trash?
# #What if deleted????
# temp = api.documents.get_by_id()
#Now, let's look at dirty docs ...
#Note, any conflicts will have already been handled ...
self.verbose_print("Sync completed")
#trashed_docs = api.trash.get()
"""
- /documents/?modified_since=2020-01-22T19:36:03.000Z&limit=100&view=all HTTP/1.1
- GET /documents/?limit=100&deleted_since=2020-01-22T19:36:03.000Z HTTP/1.1
- GET /trash/?modified_since=2020-01-22T19:36:03.000Z&limit=100&view=all HTTP/1.1
- GET /files/?include_trashed=true&limit=100&deleted_since=2020-01-22T19:36:09.000Z HTTP/1.1
- GET /files/?added_since=2020-01-22T19:36:09.000Z&include_trashed=true&limit=100 HTTP/1.1
- GET /annotations/?modified_since=2020-01-22T19:36:09.000Z&limit=200&include_trashed=true HTTP/1.1
- GET /annotations/?limit=200&include_trashed=true&deleted_since=2020-01-22T19:36:10.000Z HTTP/1.1
- GET /recently_read/ HTTP/1.1- POST /events/_batch/ HTTP/1.1
"""
def __repr__(self):
return display_class(self,
[ 'db', cld(self.db),
'api', cld(self.api),
'verbose', self.verbose,
'add_result',cld(self.add_result)])
def update_sync(self):
"""
Update Steps
------------
1.
"""
self.verbose_print('Running "UPDATE SYNC"')
start_sync_time = ctime()
# Let's work with everything as a dataframe
self.docs = _raw_to_data_frame(self.raw_json)
# Determine the document that was updated most recently. We'll ask for
# everything that changed after that time. This avoids time sync
# issues with the server and the local computer since everything
# is done relative to the timestamps from the server.
newest_modified_time = self.docs['last_modified'].max()
self.newest_modified_time = newest_modified_time
# The problem with the above approach is that Mendeley returns
# documents updated since AND at 'newest_modified_time'. This
# means that the call always returns >= 1 document.
# Try adding a second to 'newest_modified_time'
later_modified_time = newest_modified_time + pd.Timedelta('00:00:01')
# Remove old ids
#------------------------------------
self.get_trash_ids()
#self.get_deleted_ids(newest_modified_time)
self.get_deleted_ids(later_modified_time)
self.remove_old_ids()
# Process new and updated documents
# ------------------------------------
updates_and_new_entries_start_time = ctime()
self.verbose_print('Checking for modified or new documents')
#self.get_updates_and_new_entries(newest_modified_time)
self.get_updates_and_new_entries(later_modified_time)
self.time_modified_processing = ctime() - updates_and_new_entries_start_time
self.verbose_print('Done updating modified and new documents')
self.raw_json = self.docs['json'].tolist()
self.time_update_sync = ctime() - start_sync_time
self.verbose_print('Done running "UPDATE SYNC" in %s seconds' % fstr(self.time_update_sync))
def get_updates_and_new_entries(self, newest_modified_time):
"""
# 3) check modified since - add/update as necessary
#-------------------------------------------------
# I think for now to keep things simple we'll relate everything
# to the newest last modified value, rather than worrying about
# mismatches in time between the client and the server
"""
start_modified_time = ctime()
doc_set = self.api.documents.get(modified_since=newest_modified_time, view='all',limit=0)
nu_docs_as_json = [x.json for x in doc_set.docs]
self.new_and_updated_docs = doc_set.docs
self.time_modified_check = ctime() - start_modified_time
if len(nu_docs_as_json) == 0:
return
self.verbose_print('Request returned %d updated or new docs' % len(nu_docs_as_json))
df = _raw_to_data_frame(nu_docs_as_json)
is_new_mask = df['created'] > newest_modified_time
new_rows_df = df[is_new_mask]
updated_rows_df = df[~is_new_mask]
# Log the new entries in the database
#Old code
# #for x in range(len(new_rows_df)):
# row = new_rows_df.iloc[x]
# db_interface.add_to_db(row)
if len(new_rows_df) > 0:
self.verbose_print('%d new documents found' % len(new_rows_df))
self.docs = self.docs.append(new_rows_df)
self.verbose_print('Updating database with new entries')
# Log the new entries in the database
for x in range(len(new_rows_df)):
row = new_rows_df.iloc[x]
db_interface.add_to_db(row)
#JAH TODO: I would prefer to have the message of # updated
#first then messages about the dbupdates
#
# At a quick glance I need to look more closely at the indices work
# Log the updated entries in the database
for x in range(len(updated_rows_df)):
row = updated_rows_df.iloc[x]
db_interface.update_db_entry(row)
if len(updated_rows_df) > 0:
self.verbose_print('%d updated documents found' % len(updated_rows_df))
in_old_mask = updated_rows_df.index.isin(self.docs.index)
if not in_old_mask.all():
print('Logic error, updated entries are not in the original')
raise Exception('Logic error, updated entries are not in the original')
updated_indices = updated_rows_df.index
self.docs.drop(updated_indices, inplace=True)
self.docs = pd.concat([self.docs, updated_rows_df])
def get_trash_ids(self):
"""
Here we are looking for documents that have been moved to the trash.
??? Can we check the trash that's been moved back to the main
??? => would this show up as an update?
"""
trash_start_time = ctime()
self.verbose_print('Checking trash')
trash_set = self.api.trash.get(limit=0, view='ids')
self.trash_ids = trash_set.docs
self.verbose_print('Finished checking trash, %d documents found' % len(self.trash_ids))
self.time_trash_retrieval = ctime() - trash_start_time
def get_deleted_ids(self, newest_modified_time):
"""
"""
# 2) Check deleted
deletion_start_time = ctime()
self.verbose_print('Requesting deleted file IDs')
#TODO: What happens if newest_modified_time is empty????
# => Do we even run this code???
temp = self.api.documents.get(deleted_since=newest_modified_time,limit=0)
self.deleted_ids = temp.docs
self.verbose_print('Done requesting deleted file IDs, %d found' % len(self.deleted_ids))
self.time_deleted_check = ctime() - deletion_start_time
def remove_old_ids(self):
"""
JAH: When is this called????
"""
# Removal of ids
# --------------
ids_to_remove = self.trash_ids + self.deleted_ids
if len(ids_to_remove) > 0:
delete_mask = self.docs.index.isin(ids_to_remove)
keep_mask = ~delete_mask
self.n_docs_removed = sum(delete_mask)
self.docs = self.docs[keep_mask]
def verbose_print(self, msg):
if self.verbose:
print(msg)
class LibraryCleaner():
db : 'DB'
def __init__(self,db : DB):
self.db = db
def get_docs_no_pmid(self,since=None,sort=None,limit=None):
"""
sort:
'old_first'
'new_first'
:param since:
:param sort:
:param limit:
:return:
"""
#TODO: implement since ...
session = self.db.get_session()
Doc = self.db.Document
q = session.query(Doc).filter_by(pmid=None)
if sort is 'new_first' or sort is None:
q.order_by(Doc.last_modified)
else:
q.order_by(desc(Doc.last_modified))
if limit is not None:
q.limit(limit)
#desc
wtf = q.all()
import pdb
pdb.set_trace()
pass
def parse_datetime(x):
return datetime.strptime(x, "%Y-%m-%dT%H:%M:%S.%fZ")
# def datetime_to_string(x):
# return x.strftime("%Y-%m-%dT%H:%M:%S.%fZ")
def parse_issn(x):
# This value is not necessarily clean
# e.g 17517214 => 1751-7214???
try:
return x.get('issn', '')
except:
return ''
def parse_pmid(x):
try:
return x.get('pmid', '')
except:
return ''
def parse_doi(x):
try:
return x.get('doi', '').lower()
except:
return ''
def raise_(ex):
raise ex
| 2.25
| 2
|
imagr_site/imagr_site/settings.py
|
defzzd/django-imagr
| 0
|
12778432
|
"""
Django settings for imagr_site project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# settings for MailGun email
import credentials
credentials.set_credentials()
EMAIL_HOST = os.environ['EMAIL_HOST']
EMAIL_HOST_USER = os.environ['EMAIL_HOST_USER']
EMAIL_HOST_PASSWORD = os.environ['EMAIL_HOST_PASSWORD']
EMAIL_PORT = 25
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
import deployment
deployment.setup_deployment()
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = os.environ['DEBUG']
TEMPLATE_DEBUG = os.environ['TEMPLATE_DEBUG']
# There's only one host entry the Django app needs when it's running on
# gunicorn behind an Nginx server, and that is '*', because Nginx is
# doing all the filtering for us with our entry in the nginx.conf file.
# Django still wants ALLOWED_HOSTS to be a list, so:
ALLOWED_HOSTS = list(os.environ['ALLOWED_HOSTS'])
# CSRF cookie settings defaults should be permissible for this demonstration,
# because we don't need to handle a certificate yet.
# In reality we'd want to use a certificate and set them to True
# via the deployment file.
# CSRF_COOKIE_SECURE = os.environ['CSRF_COOKIE_SECURE']
# SESSION_COOKIE_SECURE = os.environ['SESSION_COOKIE_SECURE']
CONN_MAX_AGE = os.environ['CONN_MAX_AGE']
# Application definition
LOGIN_REDIRECT_URL = "imagr_app:front_page"
#LOGIN_URL = "RegistrationView"
#LOGOUT_URL
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'imagr_app',
'registration',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'imagr_site.urls'
WSGI_APPLICATION = 'imagr_site.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'imagr',
'USER': 'imagr',
'PASSWORD': os.environ['DATABASE_PASSWORD'],
'HOST': 'localhost',
}
}
# Thanks to Hybrid at:
# http://stackoverflow.com/questions/21978562/django-test-error-permission-denied-to-create-database-using-heroku-postgres
import sys
if 'test' in sys.argv:
DATABASES['default'] = {'ENGINE': 'django.db.backends.sqlite3'}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'America/Los_Angeles'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL = 'imagr_app.ImagrUser'
#LOGIN_URL = "/"
ACCOUNT_ACTIVATION_DAYS = 60
# These variables are set this way for deployment (overwriting values
# set above (like DEBUG="True")
STATIC_ROOT = "static/"
MEDIA_ROOT = "media/"
#DEBUG = False
ALLOWED_HOSTS = ['*',]
# There is a risk that the greater security of setting
# these to True will not work unless we get an SSL
# certificate, and we don't know yet whether Amazon EC2
# will give us a certificate or let us use one of theirs
# CSRF_COOKIE_SECURE = "True"
# SESSION_COOKIE_SECURE = "True"
# Performance Optimizations
| 1.929688
| 2
|
modules.py
|
amanwalia92/VisionChess
| 0
|
12778433
|
import cv2
import numpy as np
import time
'''
Parameters Used inside Code
'''
#Gaussian kernel size used for blurring
G_kernel_size = (3,3)
#canny thresholding parameters
canny_u_threshold = 200
canny_l_threshold = 80
# define the upper and lower boundaries of the HSV pixel
# intensities to be considered 'skin'
lower = np.array([0, 48, 80], dtype = "uint8")
upper = np.array([20, 255, 255], dtype = "uint8")
black_lower = np.array([0, 0, 0], dtype = "uint8")
black_upper = np.array([180, 255, 30], dtype = "uint8")
#threshhold for % of skin area detected
skinThresh = 0.00025
#Minimum number of whitepixels needed for square to be counted as occupied
min_white_count = 1
#minimum number of black detected pixels in square
min_black_pixels = 200
| 3.015625
| 3
|
mcedit_ui/layer_item.py
|
LagoLunatic/MinishEdit
| 10
|
12778434
|
from PySide2.QtGui import *
from PySide2.QtCore import *
from PySide2.QtWidgets import *
import traceback
from mclib.visual_zone import VisualZone
class LayerItem(QGraphicsRectItem):
def __init__(self, room, layer_index, renderer, main_window):
super().__init__()
self.room = room
self.layer_index = layer_index
self.renderer = renderer
self.rom = room.rom
self.main_window = main_window
try:
self.render_layer()
except Exception as e:
stack_trace = traceback.format_exc()
error_message = "Error rendering layer in room %02X-%02X:\n" % (room.area.area_index, room.room_index)
error_message += str(e) + "\n\n" + stack_trace
print(error_message)
def layer_clicked(self, x, y, button):
if x < 0 or y < 0 or x >= self.room.width or y >= self.room.height:
return
tile_x = x//0x10
tile_y = y//0x10
x = tile_x*0x10
y = tile_y*0x10
curr_tileset_scene = self.main_window.selected_tileset_graphics_scene
if button == Qt.LeftButton:
for x_off in range(curr_tileset_scene.selection_w):
for y_off in range(curr_tileset_scene.selection_h):
curr_tile_x_on_layer = tile_x + x_off
curr_tile_y_on_layer = tile_y + y_off
curr_x_on_layer = curr_tile_x_on_layer*0x10
curr_y_on_layer = curr_tile_y_on_layer*0x10
if curr_x_on_layer >= self.room.width:
continue
if curr_y_on_layer >= self.room.height:
continue
tile_index_16x16 = curr_tileset_scene.selected_tile_indexes[x_off + y_off*curr_tileset_scene.selection_w]
tile_pixmap = self.get_tile_pixmap_by_16x16_index(tile_index_16x16, curr_x_on_layer, curr_y_on_layer)
tile_item = self.tile_graphics_items_by_pos[curr_tile_x_on_layer][curr_tile_y_on_layer]
tile_item.setPixmap(tile_pixmap)
room_width_in_16x16_tiles = self.room.width//16
tile_index_on_layer = curr_tile_y_on_layer*room_width_in_16x16_tiles + curr_tile_x_on_layer
self.layer.data[tile_index_on_layer] = tile_index_16x16
self.layer.has_unsaved_changes = True
elif button == Qt.RightButton:
room_width_in_16x16_tiles = self.room.width//16
tile_index_on_layer = tile_y*room_width_in_16x16_tiles + tile_x
tile_index_on_tileset = self.layer.data[tile_index_on_layer]
curr_tileset_scene.select_tile_by_index(tile_index_on_tileset)
def render_layer(self):
room = self.room
area = room.area
layer_index = self.layer_index
if room.area.uses_256_color_bg1s:
if layer_index == 2:
self.render_layer_mapped(color_mode=256)
else:
# Their BG1s may be unused? They seem to error out when trying to render them. TODO figure them out
pass
else:
if layer_index == 3:
if area.get_gfx_asset_list(room.gfx_index).tile_mappings_8x8[layer_index] is None:
return
self.render_layer_mapped(color_mode=16)
elif room.layers_asset_list.tile_mappings_8x8[layer_index] is not None:
self.render_layer_mapped(color_mode=16)
else:
self.render_layer_16_color()
def render_layer_16_color(self):
room = self.room
area = room.area
layer_index = self.layer_index
self.tile_graphics_items_by_pos = []
for tile_x in range(room.width//0x10):
self.tile_graphics_items_by_pos.append([])
for tile_y in range(room.height//0x10):
self.tile_graphics_items_by_pos[tile_x].append(None)
gfx_asset_list = area.get_gfx_asset_list(room.gfx_index)
orig_gfx_data = gfx_asset_list.gfx_data
if layer_index in [1, 3]:
self.gfx_data = orig_gfx_data.read_raw(0x4000, len(orig_gfx_data)-0x4000)
else:
self.gfx_data = orig_gfx_data
self.palettes = self.renderer.generate_palettes_for_area_by_gfx_index(room.area, room.gfx_index)
self.tileset_data = room.area.tilesets_asset_list.tileset_datas[layer_index]
if self.tileset_data is None:
return
self.layer = room.layers_asset_list.layers[layer_index]
if self.layer is None:
raise Exception("Layer BG%d has no layer data" % layer_index)
if len(self.layer.data) == 0:
raise Exception("Layer BG%d has zero-length layer data" % layer_index)
if self.layer.data[0] == 0xFFFF:
# No real layer data here
return
self.cached_8x8_tile_images_by_tile_attrs_and_zone_ids = {}
room_width_in_16x16_tiles = room.width//16
self.cached_tile_pixmaps_by_16x16_index = {}
for i in range(len(self.layer.data)):
tile_index_16x16 = self.layer.data[i]
x = (i % room_width_in_16x16_tiles)*16
y = (i // room_width_in_16x16_tiles)*16
tile_pixmap = self.get_tile_pixmap_by_16x16_index(tile_index_16x16, x, y)
tile_item = QGraphicsPixmapItem(tile_pixmap, self)
tile_item.setPos(x, y)
self.tile_graphics_items_by_pos[x//0x10][y//0x10] = tile_item
def get_tile_pixmap_by_16x16_index(self, tile_index_16x16, x, y):
if tile_index_16x16 in self.cached_tile_pixmaps_by_16x16_index:
tile_pixmap = self.cached_tile_pixmaps_by_16x16_index[tile_index_16x16]
else:
tile_pixmap = self.render_tile_pixmap_by_16x16_tile_index(tile_index_16x16, x, y)
self.cached_tile_pixmaps_by_16x16_index[tile_index_16x16] = tile_pixmap
return tile_pixmap
def render_tile_pixmap_by_16x16_tile_index(self, tile_index_16x16, x, y):
room = self.room
layer_index = self.layer_index
gfx_data = self.gfx_data
palettes = self.palettes
zone_ids = []
if self.room.zone_lists:
zone_ids = VisualZone.get_zone_ids_overlapping_point(self.room.zone_lists, x, y)
if zone_ids:
gfx_data = gfx_data.copy()
for zone_id in zone_ids:
zone_data = room.visual_zone_datas[zone_id]
if zone_data.palette_group_index is not None:
palettes = self.renderer.generate_palettes_from_palette_group_by_index(zone_data.palette_group_index)
for zone_gfx_data_ptr, zone_gfx_load_offset in zone_data.gfx_load_datas:
if layer_index in [1, 3]:
zone_gfx_load_offset -= 0x4000
if zone_gfx_load_offset < 0:
continue
zone_gfx_data = self.rom.read_raw(zone_gfx_data_ptr, 0x1000)
gfx_data.write_raw(zone_gfx_load_offset, zone_gfx_data)
tile_image_16x16 = QImage(16, 16, QImage.Format_ARGB32)
tile_image_16x16.fill(0)
painter = QPainter(tile_image_16x16)
zone_ids_tuple = tuple(zone_ids)
if zone_ids_tuple not in self.cached_8x8_tile_images_by_tile_attrs_and_zone_ids:
self.cached_8x8_tile_images_by_tile_attrs_and_zone_ids[zone_ids_tuple] = {}
cached_8x8_tile_images_by_tile_attrs = self.cached_8x8_tile_images_by_tile_attrs_and_zone_ids[zone_ids_tuple]
try:
for tile_8x8_i in range(4):
tile_attrs = self.tileset_data[tile_index_16x16*4 + tile_8x8_i]
horizontal_flip = (tile_attrs & 0x0400) > 0
vertical_flip = (tile_attrs & 0x0800) > 0
# Remove flip bits so all 4 orientations can be cached together as one.
tile_attrs &= (~0x0C00)
if tile_attrs in cached_8x8_tile_images_by_tile_attrs:
data = cached_8x8_tile_images_by_tile_attrs[tile_attrs]
else:
pil_image = self.renderer.render_tile_by_tile_attrs(tile_attrs, gfx_data, palettes)
data = pil_image.tobytes('raw', 'BGRA')
cached_8x8_tile_images_by_tile_attrs[tile_attrs] = data
# For some reason, QImages can't be cached safely, they would become corrupted looking.
# So cache just the image data instead.
tile_image_8x8 = QImage(data, 8, 8, QImage.Format_ARGB32)
if horizontal_flip and vertical_flip:
tile_image_8x8 = tile_image_8x8.transformed(QTransform.fromScale(-1, -1))
elif horizontal_flip:
tile_image_8x8 = tile_image_8x8.transformed(QTransform.fromScale(-1, 1))
elif vertical_flip:
tile_image_8x8 = tile_image_8x8.transformed(QTransform.fromScale(1, -1))
x_on_16x16_tile = (tile_8x8_i % 2)*8
y_on_16x16_tile = (tile_8x8_i // 2)*8
painter.drawImage(x_on_16x16_tile, y_on_16x16_tile, tile_image_8x8)
except:
# Need to properly end the painter or the program will crash
painter.end()
raise
painter.end()
tile_pixmap = QPixmap.fromImage(tile_image_16x16)
return tile_pixmap
def render_layer_mapped(self, color_mode=256):
room = self.room
layer_index = self.layer_index
palettes = self.renderer.generate_palettes_for_area_by_gfx_index(room.area, room.gfx_index)
layer_image = self.renderer.render_layer_mapped(self.room, palettes, layer_index, color_mode=color_mode)
data = layer_image.tobytes('raw', 'BGRA')
qimage = QImage(data, layer_image.size[0], layer_image.size[1], QImage.Format_ARGB32)
layer_pixmap = QPixmap.fromImage(qimage)
graphics_item = QGraphicsPixmapItem(layer_pixmap, self)
| 2.40625
| 2
|
examples/myemph.py
|
fractaledmind/pandocfilters
| 1
|
12778435
|
#!/usr/bin/env python
from pandocfilters import toJSONFilter, RawInline
"""
Pandoc filter that causes emphasis to be rendered using
the custom macro '\myemph{...}' rather than '\emph{...}'
in latex. Other output formats are unaffected.
"""
def latex(s):
return RawInline('latex', s)
def myemph(k, v, f, meta):
if k == 'Emph' and f == 'latex':
return [latex('\\myemph{')] + v + [latex('}')]
if __name__ == "__main__":
toJSONFilter(myemph)
| 2.46875
| 2
|
nsynth.py
|
ifrit98/music-transformer
| 1
|
12778436
|
<gh_stars>1-10
import os
import numpy as np
import matplotlib.pyplot as plt
from magenta.models.nsynth import utils
from magenta.models.nsynth.wavenet import fastgen
from IPython.display import Audio
def load_encoding(fname, sample_length=None, sr=16000, ckpt='model.ckpt-200000'):
audio = utils.load_audio(fname, sample_length=sample_length, sr=sr)
encoding = fastgen.encode(audio, ckpt, sample_length)
return audio, encoding
def _crossfade(encoding1, encoding2):
return fade(encoding1, 'out') + fade(encoding2, 'in')
def crossfade():
return fastgen.synthesize(_crossfade(enc1, enc2), save_paths=['crossfade.wav'])
def interpolate():
sample_length = 80000
# from https://www.freesound.org/people/MustardPlug/sounds/395058/
aud1, enc1 = load_encoding('395058__mustardplug__breakbeat-hiphop-a4-4bar-96bpm.wav', sample_length)
# from https://www.freesound.org/people/xserra/sounds/176098/
aud2, enc2 = load_encoding('176098__xserra__cello-cant-dels-ocells.wav', sample_length)
enc_mix = (enc1 + enc2) / 2.0
fig, axs = plt.subplots(3, 1, figsize=(10, 7))
axs[0].plot(enc1[0]);
axs[0].set_title('Encoding 1')
axs[1].plot(enc2[0]);
axs[1].set_title('Encoding 2')
axs[2].plot(enc_mix[0]);
axs[2].set_title('Average')
fastgen.synthesize(enc_mix, save_paths='mix.wav')
def encode():
# from https://www.freesound.org/people/MustardPlug/sounds/395058/
# fname = '395058__mustardplug__breakbeat-hiphop-a4-4bar-96bpm.wav'
fname = './wav/mehldau-1.wav'
sr = 44100
audio = utils.load_audio(fname, sample_length=44100, sr=sr)
sample_length = audio.shape[0]
print('{} samples, {} seconds'.format(sample_length, sample_length / float(sr)))
encoding = fastgen.encode(audio, './wavenet-ckpt/model.ckpt-200000', sample_length)
print(encoding.shape)
np.save(fname + '.npy', encoding)
fig, axs = plt.subplots(2, 1, figsize=(10, 5))
axs[0].plot(audio);
axs[0].set_title('Audio Signal')
axs[1].plot(encoding[0]);
axs[1].set_title('NSynth Encoding')
return encoding
def decode(fname, sample_length=44100, sr=16000):
fastgen.synthesize(encoding, save_paths=['gen_' + fname], samples_per_save=sample_length)
synthesis = utils.load_audio('gen_' + fname, sample_length=sample_length, sr=sr)
return synthesis
def main():
encoding = encode()
if __name__ == "__main__":
main()
| 2.234375
| 2
|
httpclient.py
|
MensahDev/CMPUT404-assignment-web-client-master
| 0
|
12778437
|
#!/usr/bin/env python3
# coding: utf-8
# Copyright 2016 <NAME>, https://github.com/tywtyw2002, and https://github.com/treedust
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Do not use urllib's HTTP GET and POST mechanisms.
# Write your own HTTP GET and POST
# The point is to understand what you have to send and get experience with it
import sys
import socket
import re
# you may use urllib to encode data appropriately
import urllib.parse
# for debugging
DEBUG = 0
DEFAULT_HTTP_PORT = 80
def help():
print("httpclient.py [GET/POST] [URL]\n")
class HTTPResponse(object):
def __init__(self, code=200, body=""):
self.code = code
self.body = body
self.headers = dict()
def __str__(self):
''' for debugging '''
s = {"code": self.code, "body": self.body, "headers": self.headers}
return str(s)
class HTTPClient(object):
#def get_host_port(self,url):
def connect(self, host, port):
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.socket.connect((host, port))
return True
except Exception as e:
print("Problem in connection to %s on port %d" % (host, port))
return False
def get_code(self, data):
''' the work of get_code, get_headers and get_body is
done by 1 parse of response in parse_response(..) '''
return None
def get_headers(self, data):
return None
def get_body(self, data):
return None
def sendall(self, data):
self.socket.sendall(data.encode('utf-8'))
def close(self):
self.socket.close()
# read everything from the socket
def recvall(self, sock):
buffer = bytearray()
done = False
while not done:
part = sock.recv(1024)
if (part):
buffer.extend(part)
else:
done = not part
return buffer.decode('utf-8')
def GET(self, url, args=None):
code = 500
body = ""
valid, host, port, path = self.parse_url(url)
if not valid:
print("[GET] Malformed HTTP URL: %s" % url)
return HTTPResponse(code, body)
if not port:
# when if requesting on a URL with no port use default
# port 80
port = DEFAULT_HTTP_PORT
if not path or path == "":
path = "/"
if not self.connect(host, port):
return HTTPResponse(code, body)
# got sample http GET request format from
# curl -v http://www.cs.ualberta.ca
req = "GET " + path + " HTTP/1.1\r\n"
req += "Host: " + host + ":" + str(port) + "\r\n"
req += "User-Agent: " + "curl/7.71.1" + "\r\n"
req += "Accept: " + "*/*" + "\r\n"
req += "\r\n"
req += path
if DEBUG:
print("[GET] Requesting...")
print(req + "\n********************")
self.sendall(req)
response = self.recvall(self.socket)
self.close()
if DEBUG:
print("*****Response:******")
print(response + "\n********************")
return self.parse_response(response)
def POST(self, url, args=None):
'''
POST on URL.
TODO: GET and POST have a lot of common code: scope of refactoring
'''
code = 500
body = ""
valid, host, port, path = self.parse_url(url)
if not valid:
print("[POST] Malformed HTTP URL: %s" % url)
return HTTPResponse(code, body)
if not port:
# when if requesting on a URL with no port use default
# port 80
port = DEFAULT_HTTP_PORT
if not path or path == "":
path = "/"
if not self.connect(host, port):
return HTTPResponse(code, body)
# got sample http POST request format from
# curl -v -d "a=aaa&b=bbbb" -X POST http://127.0.0.1:3000
if args:
payload = urllib.parse.urlencode(args)
payload_len = len(payload)
else:
payload_len = 0
req = "POST " + path + " HTTP/1.1\r\n"
req += "Host: " + host + ":" + str(port) + "\r\n"
req += "User-Agent: " + "curl/7.71.1" + "\r\n"
req += "Accept: " + "*/*" + "\r\n"
req += "Content-Length: " + str(payload_len) + "\r\n"
req += "Content-Type: application/x-www-form-urlencoded\r\n"
req += "\r\n"
if args:
req += payload
if DEBUG:
print("[POST] Requesting...")
print(req + "\n********************")
self.sendall(req)
response = self.recvall(self.socket)
self.close()
if DEBUG:
print("*****Response:******")
print(response + "\n********************")
return self.parse_response(response)
def parse_url(self, url):
'''
A valid URL starts with http:// or https://.
Then has a host and a port separated by comma.
This returns <valid>, host, port, path
where, valid is True/False, and host and port from the url
'''
parsed = urllib.parse.urlparse(url)
scheme = parsed.scheme
if scheme != "http" and scheme != "https":
return False, None, None, None
return True, parsed.hostname, parsed.port, parsed.path
def parse_response(self, response_str):
'''
Parse an http response as a string, extract body, status code and
headers, and return an httpclient.HTTPResponse object
'''
response_obj = HTTPResponse(500, '')
lines = response_str.split("\n")
if len(lines) == 0:
return response_obj
if not lines[0].startswith('HTTP/1.0 ') and not lines[0].startswith('HTTP/1.1 '):
if DEBUG:
print("Bad 1st line in response. Expected HTTP/1.0 or HTTP/1.1")
return response_obj
resp_line_pattern = re.compile("HTTP/1\.. (\d+) .*")
matches = resp_line_pattern.match(lines[0])
if not matches:
if DEBUG:
print("Bad 1st line in response: %s" % lines[0])
return response_obj
code = int(matches.group(1))
response_obj.code = code
# parse headers
i = 1
while i < len(lines):
header_line = lines[i].strip()
if header_line == "":
break
tok = header_line.split(":")
if len(tok) < 2:
# header_name: header_val is not there
if DEBUG:
print("[WARN] Bad header line::: %s" % header_line)
else:
header_name = tok[0].strip()
header_val = ''.join(tok[1:])
header_val = header_val.strip()
response_obj.headers[header_name] = header_val
i += 1
# extract body if exists
body = ''
if i+1 < len(lines):
body = lines[i+1]
response_obj.body = body
return response_obj
def command(self, url, command="GET", args=None):
if (command == "POST"):
return self.POST( url, args )
else:
return self.GET( url, args )
if __name__ == "__main__":
client = HTTPClient()
command = "GET"
if (len(sys.argv) <= 1):
help()
sys.exit(1)
elif (len(sys.argv) == 3):
print(client.command( sys.argv[2], sys.argv[1] ))
else:
print(client.command( sys.argv[1] ))
| 3.484375
| 3
|
huasheng/huashengtoutiao_app.py
|
IMWoolei/Tiring-Spiders
| 16
|
12778438
|
<filename>huasheng/huashengtoutiao_app.py
# -*- coding: utf-8 -*-
# @Author : Leo
import json
import time
import uuid
import base64
import requests
from Crypto.Cipher import AES
from Crypto.Util.Padding import pad
"""
花生头条APP
正文内容获取,根据web-js流程提取
AES对称加密
"""
class HuashengToutiao:
"""花生头条APP"""
def __init__(self, board_info):
# 板块请求信息
self.board_info = board_info
# AES加密key,定值
self.aes_key = '<KEY>'
# CBC模式偏移量,定值
self.iv = '0000000000000000'
# 新闻列表页和正文URL
self.news_url = 'https://news-api.xcmad.com/news/public/securityApiHandler.do'
# 请求头
self.headers = {'Content-Type': 'text/plain;charset=utf-8'}
# 正文数据未加密提交数据
self.content_data_format = {
'body': {
'id': '0ff0bfa5-f18c-4dd5-9df2-d661b2aa10af'},
'header': {
'action': 202,
'appVersion': 100,
'cmdName': 'app_h5',
'phoneName': 'android',
'platformCode': 'Android',
'platformVersion': '4.0',
'traceID': 'h5-1568275124205',
'token': '',
'userId': '',
'uuid': 'h5835017521957362'}}
def get_news_contents(self):
"""获取新闻列表"""
# 将提交的板块信息序列化
board_str = json.dumps(self.board_info)
board_encrypt_str = self._aes_encrypt(text=board_str)
# 获取新闻列表
board_resp = requests.post(url=self.news_url, data=board_encrypt_str, headers=self.headers)
if board_resp.ok:
# 列表页数据
pages_data = board_resp.json()
if isinstance(pages_data, dict) and pages_data.get('code') == 0:
news_items = pages_data.get('data')
return [self.get_news_detail(news_id=item.get('unid')) for item in news_items]
return []
def get_news_detail(self, news_id: str):
"""获取单个新闻正文数据
:param news_id: 新闻ID"""
content_submit = self.content_data_format.copy()
content_submit['body']['id'] = news_id
# h5-毫秒时间戳
content_submit['header']['traceID'] = 'h5-' + str(int(time.time() * 1000))
# 随机生成的16位字符串
content_submit['header']['uuid'] = 'h' + str(uuid.uuid1().int)[:16]
detail_str = json.dumps(content_submit)
detail_encrypt_str = self._aes_encrypt(text=detail_str)
try:
# 获取新闻详情页
detail_resp = requests.post(url=self.news_url, data=detail_encrypt_str, headers=self.headers)
if detail_resp.ok:
detail_data = detail_resp.json()
if isinstance(detail_data, dict) and detail_data.get('code') == 0:
return detail_data.get('data')
except requests.exceptions.RequestException:
print('请求新闻详情页出错')
return None
def _aes_decrypt(self, text):
"""aes解密"""
pass
def _aes_encrypt(self, text):
"""aes加密
:param text: 明文字符串"""
aes = AES.new(self.aes_key.encode(), AES.MODE_CBC, iv=self.iv.encode())
# 选择pkcs7补全
pad_pkcs7 = pad(text.encode('utf-8'), AES.block_size, style='pkcs7')
aes_encrypt_ = aes.encrypt(pad_pkcs7)
return base64.b64encode(aes_encrypt_).decode()
if __name__ == '__main__':
from prettyprinter import cpprint
# 以推荐板块为例测试
temp_board = {
'header': {
'platformVersion': '8.1.0',
'platformCode': 'Android',
'cmdName': 'app360',
'token': '',
'appVersion': '1.4.8',
'uuid': '868661030024913',
'ts': 1566381935556,
'traceID': 'A20190821180535033688',
'action': '212',
'phoneName': 'Xiaomi_Redmi 5A',
'package': 'com.xcm.huasheng'},
'body': {
'category': '0,21,10,22,12,6,7,20,3',
'gender': '1'}
}
spider = HuashengToutiao(board_info=temp_board)
result = spider.get_news_contents()
cpprint(result)
| 2.234375
| 2
|
qg_kelvin/analysis/energy.py
|
bderembl/mitgcm_configs
| 1
|
12778439
|
<filename>qg_kelvin/analysis/energy.py
#!/usr/bin/env python
import numpy as np
import matplotlib.pyplot as plt
import MITgcmutils as mit
import scipy.io.netcdf as netcdf
plt.ion()
flag_tile = 1
dir0 = '/run/media/bderembl/workd/MITgcm/myrun/test_kw_energetics/run04/'
#dir0 = '/home/bderembl/work/MITgcm/myrun/test_kw_energetics/run/'
dir1 = dir0 + 'mnc*/'
dir2 = dir0 + 'mnc_test_0001/'
file0 = 'grid.t*'
file1 = 'state.*'
if flag_tile == 0:
file0 = 'grid.t001.nc'
file1 = 'state.0000000000.t001.nc'
alphat = 2e-4
go = 9.81
# grid
if flag_tile == 1:
f0 = mit.mnc_files(dir1 + file0)
else:
f0 = netcdf.netcdf_file(dir2 + file0,'r')
RC = f0.variables['RC'][:].copy()
DRC = f0.variables['drC'][:].copy()
DRF = f0.variables['drF'][:].copy()
RF = f0.variables['RF'][:].copy()
XC = f0.variables['XC'][:,:].copy()
YC = f0.variables['YC'][:,:].copy()
si_y,si_x = XC.shape
si_z = RC.size
dx = XC[1,1] - XC[0,0]
dy = YC[1,1] - YC[0,0]
dz = RC[1] - RC[0]
dv = np.abs(dx*dy*dz)
if flag_tile == 1:
f1 = mit.mnc_files(dir1 + file1)
else:
f1 = netcdf.netcdf_file(dir2 + file1,'r')
T = f1.variables['T'][:].copy()
si_t = len(T)
temp = f1.variables['Temp'][0,:si_z,:si_y,:si_x].copy()
RC3d = (0*temp + 1)*(RC.reshape(si_z,1,1))
# compute KE, PE
ener = np.zeros((si_t,5))
ener_tmp = np.zeros((si_t,si_z))
for nt in range (0,si_t):
u = f1.variables['U'][nt,:si_z,:si_y,:si_x].copy()
v = f1.variables['V'][nt,:si_z,:si_y,:si_x].copy()
# w = f1.variables['W'][nt,:si_z,:si_y,:si_x]
temp = f1.variables['Temp'][nt,:si_z,:si_y,:si_x].copy()
eta = f1.variables['Eta'][nt,:si_y,:si_x].copy()
ener[nt,0] = 0.5*np.sum((u**2 + v**2 ))
# without upper and lower boundaries
ener[nt,1] = go*np.sum((1-alphat*(temp[1:-1,:,:]+273.1))*(RC[1:-1].reshape(si_z-2,1,1)))
# with upper and lower boundaries
#ener[nt,1] = go*np.sum((1-alphat*(temp[:,:,:]+273.1))*(RC[:].reshape(si_z,1,1)))
#ener[nt,1] = go*np.sum((1-alphat*(temp[1:-1,:,:]+273.1))*(RC3d+eta)[1:-1,:,:])
# ener_tmp[nt,:] = go*np.sum(np.sum((1-alphat*(temp+273.1))*(RC3d+eta),1),1)
# ener[nt,1] = 0.5*go*np.sum((1-alphat*(temp+273.1))*(RF[:-1]**2-RF[1:]**2).reshape(si_z,1,1))
ener[:,0] = ener[:,0]*dv
ener[:,1] = ener[:,1]*dv
#ener[:,1] = ener[:,1]*dx*dy
enera = ener - ener[0,:]
plt.figure()
plt.plot(enera[:-1,0],'k',label='KEa')
plt.plot(enera[:-1,1],'r',label='PEa')
plt.ticklabel_format(style='sci', axis='y', scilimits=(0,0))
plt.legend(loc=2)
plt.grid()
plt.xlabel('Time (day)')
plt.ylabel('Energy (m^5/s^2)')
#plt.savefig('energy_diag.pdf',bbox_inches='tight')
#np.savetxt('ener_mit.dat',enera)
| 1.804688
| 2
|
convert/__init__.py
|
zyronix/maske
| 0
|
12778440
|
import keepalived
plugins_dic = {}
plugins_dic['keepalived'] = keepalived
| 1.257813
| 1
|
mdnb/model.py
|
trslater/mdnb
| 0
|
12778441
|
<gh_stars>0
from dataclasses import dataclass
from functools import cached_property
from pathlib import Path
from typing import Union
from urllib.parse import urlsplit
import markdown
@dataclass(frozen=True)
class Document:
"""Represents a single document in the project"""
md_path: Path
md_dir: Path
md_str: str
html_dir: Path
@cached_property
def html_path(self) -> Path:
return self.html_dir/(self.md_path
.relative_to(self.md_dir)
.with_suffix(".html"))
@cached_property
def html_str(self) -> str:
return markdown.markdown(self.md_str,
output_format="html5",
tab_length=2)
@classmethod
def from_file(cls,
md_path: Union[Path, str],
md_dir: Union[Path, str]=".",
html_dir: Union[Path, str]="public"):
if isinstance(md_path, str):
md_path = Path(md_path)
if isinstance(md_dir, str):
md_dir = Path(md_dir)
if isinstance(html_dir, str):
html_dir = Path(html_dir)
return cls(md_path, md_dir, md_path.read_text(), html_dir)
@classmethod
def from_uri(cls,
url: str,
md_dir: Union[Path, str]=".",
html_dir: Union[Path, str]="public"):
if isinstance(md_dir, str):
md_dir = Path(md_dir)
if isinstance(html_dir, str):
html_dir = Path(html_dir)
rel_path = urlsplit(url).path
# If starts with '/', remove it
if rel_path and rel_path[0] == "/":
rel_path = rel_path[1:]
rel_path = Path(rel_path)
if rel_path.suffix not in (".html", ".htm"):
rel_path = rel_path/"index.html"
md_path = md_dir/rel_path.with_suffix(".md")
return cls(md_path, md_dir, md_path.read_text(), html_dir)
def save_html(self) -> None:
self.html_path.write_text(self.html_str)
| 2.71875
| 3
|
src/ggplib/statemachine/depthcharges.py
|
richemslie/ggplib
| 11
|
12778442
|
import time
import random
from ggplib.util import log
def depth_charges(sm, seconds):
# play for n seconds
seconds = float(seconds)
log.info("depth_charges() : playing for %s seconds" % seconds)
role_count = len(sm.get_roles())
# cache some objects
joint_move = sm.get_joint_move()
base_state = sm.new_base_state()
# resolution is assumed to be good enough not to cheat too much here (we return
# msecs_taken so it is all good)
start_time = cur_time = time.time()
end_time = start_time + seconds
rollouts = 0
num_state_changes = 0
all_scores = [[] for i in range(role_count)]
while cur_time < end_time:
# the number of moves of the game
depth = 0
# tells the state machine to reset everything and return to initial state
sm.reset()
# while the game has not ended
while not sm.is_terminal():
# choose a random move for each role
for role_index in range(role_count):
ls = sm.get_legal_state(role_index)
choice = ls.get_legal(random.randrange(0, ls.get_count()))
joint_move.set(role_index, choice)
# play move, the base_state will be new state
sm.next_state(joint_move, base_state)
# update the state machine to new state
sm.update_bases(base_state)
# increment the depth
depth += 1
# simulate side effect of getting the scores from the statemachine
for ri in range(role_count):
all_scores[ri].append(sm.get_goal_value(ri))
# stats
rollouts += 1
num_state_changes += depth
# update the time
cur_time = time.time()
rollouts_per_second = rollouts / seconds
log.info("rollouts per second %s" % rollouts_per_second)
log.info("average time msecs %s" % ((seconds / rollouts) * 1000))
log.info("average depth %s" % (num_state_changes / rollouts))
for ri, role in enumerate(sm.get_roles()):
total_score = sum(all_scores[ri])
log.info("average score for %s : %s" % (role, total_score / float(rollouts)))
| 2.71875
| 3
|
examples/worker/simplejob.py
|
pooya/disco
| 786
|
12778443
|
from disco.job import SimpleJob
class SimpleJob(SimpleJob):
def map(self, worker, task, **jobargs):
worker.output(task, partition=None).file.append('hello world!')
def reduce(self, worker, task, **jobargs):
worker.output(task, partition=None).file.append('goodbye world!')
| 2.765625
| 3
|
moment/test/test_add_operator.py
|
KrixTam/pymoment
| 1
|
12778444
|
import unittest
from moment import moment
from datetime import timedelta
class TestAddOperator(unittest.TestCase):
def test_day(self):
a = moment('20201228').add(3, 'd')
b = moment('20201228') + timedelta(days=3)
self.assertEqual(a, b)
def test_second(self):
a = moment('20201228').add(80, 's')
b = moment('20201228') + timedelta(seconds=80)
self.assertEqual(a, b)
def test_millisecond(self):
a = moment('20201228').add(183, 'ms')
b = moment('20201228') + timedelta(milliseconds=183)
self.assertEqual(a, b)
def test_minute(self):
a = moment('20201228').add(7, 'm')
b = moment('20201228') + timedelta(minutes=7)
self.assertEqual(a, b)
def test_hour(self):
a = moment('20201228').add(13, 'h')
b = moment('20201228') + timedelta(hours=13)
self.assertEqual(a, b)
def test_hour(self):
a = moment('20201228').add(5, 'w')
b = moment('20201228') + timedelta(weeks=5)
self.assertEqual(a, b)
def test_not_implement(self):
with self.assertRaises(TypeError):
a = moment('2021-4-2 04:02:09.957031 +0800')
a + 2
if __name__ == '__main__':
unittest.main()
| 3.46875
| 3
|
benwaonline_auth/schemas.py
|
goosechooser/benwaonline-auth
| 0
|
12778445
|
<filename>benwaonline_auth/schemas.py
from marshmallow import Schema, fields, post_load
from benwaonline_auth.models import User, Token, Client
class UserSchema(Schema):
user_id = fields.Str()
refresh_token = fields.Nested("TokenSchema", exclude=("user",))
created_on = fields.DateTime()
@post_load
def make_user(self, data, **kwargs):
return User(**data)
class TokenSchema(Schema):
code = fields.Str()
created_on = fields.DateTime()
expires_in = fields.TimeDelta()
expires_on = fields.DateTime()
user = fields.Nested("UserSchema", exclude=("refresh_token",))
scopes = fields.Str()
@post_load
def make_token(self, data, **kwargs):
return Token(**data)
class ClientSchema(Schema):
name = fields.Str()
client_id = fields.Str()
client_secret = fields.Str()
is_confidential = fields.Bool()
blacklisted = fields.Bool()
response_type = fields.Str()
redirect_uris = fields.Str(attribute="_redirect_uris")
default_scopes = fields.Str(attribute="_default_scopes")
refresh_tokens = fields.List(fields.Nested("TokenSchema"))
@post_load
def make_client(self, data, **kwargs):
return Client(**data)
| 2.25
| 2
|
python/contrib/garbage_picture/src/classify.py
|
Dedederek/samples
| 0
|
12778446
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import os
import acl
from flask import Flask, g
from flask_restful import reqparse, Api, Resource
from flask_httpauth import HTTPTokenAuth
import base64
from utils import *
from acl_dvpp import Dvpp
from acl_model import Model
from acl_image import AclImage
from image_net_classes import get_image_net_class
from PIL import Image, ImageDraw, ImageFont
import numpy as np
ret = acl.init()
check_ret("acl.rt.set_device", ret)
class Classify(object):
def __init__(self, model_path, model_width, model_height):
self.device_id = 0
self.context = None
self.stream = None
self._model_path = model_path
self._model_width = model_width
self._model_height = model_height
self._dvpp = None
def __del__(self):
if self._model:
del self._model
if self._dvpp:
del self._dvpp
if self.stream:
acl.rt.destroy_stream(self.stream)
if self.context:
acl.rt.destroy_context(self.context)
acl.rt.reset_device(self.device_id)
acl.finalize()
print("[Sample] class Samle release source success")
def destroy(self):
self.__del__
def _init_resource(self):
print("[Sample] init resource stage:")
#ret = acl.init()
#check_ret("acl.rt.set_device", ret)
ret = acl.rt.set_device(self.device_id)
check_ret("acl.rt.set_device", ret)
self.context, ret = acl.rt.create_context(self.device_id)
check_ret("acl.rt.create_context", ret)
self.stream, ret = acl.rt.create_stream()
check_ret("acl.rt.create_stream", ret)
self.run_mode, ret = acl.rt.get_run_mode()
check_ret("acl.rt.get_run_mode", ret)
print("Init resource stage success")
def init(self):
self._init_resource()
self._dvpp = Dvpp(self.stream, self.run_mode)
ret = self._dvpp.init_resource()
if ret != SUCCESS:
print("Init dvpp failed")
return FAILED
self._model = Model(self.run_mode, self._model_path)
ret = self._model.init_resource()
if ret != SUCCESS:
print("Init model failed")
return FAILED
return SUCCESS
def pre_process(self, image):
yuv_image = self._dvpp.jpegd(image)
print("decode jpeg end")
resized_image = self._dvpp.resize(yuv_image,
self._model_width, self._model_height)
print("resize yuv end")
return resized_image
def inference(self, resized_image):
return self._model.execute(resized_image.data(), resized_image.size)
def post_process(self, infer_output, image_file):
print("post process")
data = infer_output[0]
vals = data.flatten()
top_k = vals.argsort()[-1:-6:-1]
print("images:{}".format(image_file))
print("======== top5 inference results: =============")
for n in top_k:
object_class = get_image_net_class(n)
print("label:%d confidence: %f, class: %s" % (n, vals[n], object_class))
object_class = get_image_net_class(top_k[0])
return object_class
MODEL_PATH = "./model/googlenet_yuv.om"
MODEL_WIDTH = 224
MODEL_HEIGHT = 224
def main_process():
classify = Classify(MODEL_PATH, MODEL_WIDTH, MODEL_HEIGHT)
ret = classify.init()
if not os.path.isdir('./outputs'):
os.mkdir('./outputs')
image_dir = "./origin"
images_list = [os.path.join(image_dir, img)
for img in os.listdir(image_dir)
if os.path.splitext(img)[1] in IMG_EXT]
for image_file in images_list:
image = AclImage(image_file)
resized_image = classify.pre_process(image)
print("pre process end")
result = classify.inference(resized_image)
result_img_encode = classify.post_process(result, image_file)
#acl_resource.destroy()
#classify.destroy()
return result_img_encode
def base64_decode(img_encode, img_type):
img_np = np.fromstring(base64.b64decode(img_encode), np.uint8)
img_file = "./origin/origin." + img_type
with open(img_file, 'wb') as f:
f.write(base64.b64decode(img_encode))
f.close()
return img_np
class TodoList(Resource):
def post(self):
args = parser_put.parse_args()
img_data_base64_encode = args['img_data']
img_type = args['img_type']
img_decode = base64_decode(img_data_base64_encode, img_type)
#print(img_decode)
result_img_encode = main_process()
os.remove("./origin/origin.jpg")
q = 400
for n in range(1,1000):
if str(result_img_encode)==get_image_net_class(n):
q=200
break
result = {"result":str(result_img_encode),"code":q}
return result
app = Flask(__name__)
api = Api(app)
parser_put = reqparse.RequestParser()
parser_put.add_argument("img_data", type=str, required=True, help="need img data")
parser_put.add_argument("img_type", type=str, required=True, help="need img type")
api.add_resource(TodoList, "/users")
if __name__ == '__main__':
app.run(host="192.168.0.169", port=7002, debug=True)
| 2.140625
| 2
|
cards/picture_help_card.py
|
lamanchy/stackoverflow
| 0
|
12778447
|
<filename>cards/picture_help_card.py
import os
from PIL import Image
from cards.text_help_card import TextHelpCard
from colors import getrgb
from language import get_language
from pil_quality_pdf.rendering import mm_to_px
from pil_quality_pdf.transformation import resize
class PictureHelpCard(TextHelpCard):
DX = 20.0
def __init__(self, color, title, text, picture_getter):
super().__init__(color, title, text)
self.picture_getter = picture_getter
self.id = None
def box_draw(self, card, move, picture=None, color=None):
assert picture is not None or color is not None
box_size = mm_to_px((self.width - move, self.width - move))
if picture is None:
picture = Image.new('RGB',
box_size,
getrgb(color))
picture = picture.crop((0, 0,) + box_size)
card.paste(picture, mm_to_px(move / 2, move / 2))
def get_card(self):
self.top = mm_to_px(self.width - 7)
card = super().get_card()
self.box_draw(card, self.DX - .5, color="grey")
self.box_draw(card, 20, self.picture_getter())
self.paste_arrows(card)
return card
def paste_arrows(self, card):
id = f"{self.id}-01.png"
candidates = [file for file in os.listdir(f"help/arrows/{get_language()}") if id == file]
if len(candidates) != 1:
print(f"There is no arrow for {self.title}, id: {id}")
exit(1)
file = candidates[0]
im = Image.open(f"help/arrows/{get_language()}/" + file)
im = resize(im, card.size)
card.paste(im, mask=im)
| 2.828125
| 3
|
B_Python_and_friends/solutions/ex1_5.py
|
oercompbiomed/CBM101
| 7
|
12778448
|
n = 15
for i in range(n):
k = (n-i)//2
print(' '*k, '*'*i)
| 3.078125
| 3
|
raft/server.py
|
kurin/py-raft
| 62
|
12778449
|
from __future__ import print_function
import sys
import time
import uuid
import copy
import random
import logging
import threading
try:
import Queue
except ImportError: # for python3
import queue as Queue
import msgpack
import raft.store as store
import raft.tcp as channel
import raft.log as log
def make_server(port=9289, bootstraps=None):
queue = Queue.Queue()
server = Server(queue, port, bootstraps)
server.start()
return queue
def iteritems(dictobj):
if sys.version_info[0] == 2:
return dictobj.itemsiter()
else:
return dictobj.items()
class Server(threading.Thread):
def __init__(self, queue, port, bootstraps):
self.port = port
self.load()
self.bootstraps = bootstraps
self.queue = queue
self.role = 'follower'
self.channel = channel.start(port, self.uuid)
self.last_update = time.time()
self.commitidx = 0
self.update_uuid = None
self.leader = None
self.newpeers = None
self.oldpeers = None
threading.Thread.__init__(self)
self.daemon = True
#
## startup and state methods
#
def load(self):
self.term, self.voted, llog, self.peers, \
self.uuid = store.read_state(self.port)
self.log = log.RaftLog(llog)
def save(self):
store.write_state(self.port, self.term, self.voted,
self.log.dump(), self.peers, self.uuid)
def run(self):
self.running = True
while self.running:
for peer in self.peers:
if not peer in self.channel and peer != self.uuid:
self.channel.connect(self.peers[peer])
for addr in self.bootstraps:
self.channel.connectbs(addr, self.bootstrap_cb)
channelans = self.channel.recv(0.15)
if channelans:
for peer, msgs in channelans:
for msg in msgs:
self.handle_message(msg, peer)
else:
self.housekeeping()
#
## message handling
#
def handle_message(self, msg, addr):
# got a new message
# update our term if applicable, and dispatch the message
# to the appropriate handler. finally, if we are still
# (or have become) the leader, send out heartbeats
try:
msg = msgpack.unpackb(msg, use_list=False, encoding='utf-8')
except msgpack.UnpackException:
return
mtype = msg['type']
term = msg.get('term', None)
msg['src'] = addr
uuid = msg.get('id', None)
# no matter what, if our term is old, update and step down
if term and term > self.term and self.valid_peer(uuid):
# okay, well, only if it's from a valid source
self.term = term
self.voted = None
self.role = 'follower'
mname = 'handle_msg_%s_%s' % (self.role, mtype)
if hasattr(self, mname):
getattr(self, mname)(msg)
if self.role == 'leader' and time.time() - self.last_update > 0.3:
self.send_ae()
def handle_msg_candidate_bootstrap(self, msg):
self.handle_msg_follower_bootstrap(msg)
def handle_msg_follower_bootstrap(self, msg):
# bootstrap packets solve the problem of how we find the
# id of our peers. we don't want to have to copy uuids around
# when they could just mail them to each other.
print(msg)
print(self.peers)
def handle_msg_leader_ae_reply(self, msg):
# we are a leader who has received an ae ack
# if the update was rejected, it's because the follower
# has an incorrect log entry, so send an update for that
# log entry as well
# if the update succeeded, record that in the log and,
# if the log has been recorded by enough followers, mark
# it committed.
uuid = msg['id']
if not self.valid_peer(uuid):
return
success = msg['success']
index = msg['index']
if success:
self.next_index[uuid] = index
if self.log.get_commit_index() < index:
self.msg_recorded(msg)
else:
# exponentially reduce the index for peers
# this way if they're only missing a couple log entries,
# we only have to send 2 or 4, but if they're missing
# a couple thousand we'll find out in less than 2k round
# trips
oldidx = self.next_index.get(uuid, 0)
diff = self.log.maxindex() - oldidx
diff = max(diff, 1)
oldidx -= diff
self.next_index[uuid] = max(oldidx, 0)
def handle_msg_follower_ae(self, msg):
# we are a follower who just got an append entries rpc
# reset the timeout counter
uuid = msg['id']
if not self.valid_peer(uuid):
return
term = msg['term']
if term < self.term:
return
self.last_update = time.time()
self.leader = msg['id']
logs = msg['entries']
previdx = msg['previdx']
prevterm = msg['prevterm']
if not self.log.exists(previdx, prevterm):
rpc = self.ae_rpc_reply(previdx, prevterm, False)
self.send_to_peer(rpc, self.leader)
return
cidx = msg['commitidx']
if cidx > self.commitidx: # don't lower the commit index
self.commitidx = cidx
self.log.force_commit(cidx)
if self.update_uuid:
self.check_update_committed()
if not logs:
# heartbeat
return
for ent in sorted(logs):
val = logs[ent]
self.process_possible_update(val)
self.log.add(val)
maxmsg = self.log.get_by_index(self.log.maxindex())
rpc = self.ae_rpc_reply(maxmsg['index'], maxmsg['term'], True)
self.send_to_peer(rpc, self.leader)
def handle_msg_candidate_ae(self, msg):
# someone else was elected during our candidacy
term = msg['term']
uuid = msg['id']
if not self.valid_peer(uuid):
return
if term < self.term:
# illegitimate, toss it
return
self.role = 'follower'
self.handle_msg_follower_ae(msg)
def handle_msg_follower_cq(self, msg):
try:
rpc = self.cr_rdr_rpc(msg['id'])
src = msg['src']
self.send_to_peer(rpc, src)
except:
# we're allowed not to respond at all, in this case,
# so if we crashed for some reason, just ignore it
return
def handle_msg_leader_cq(self, msg):
src = msg['src']
if msg['id'] is None:
msgid = uuid.uuid4().hex
msg['id'] = msgid
self.add_to_log(msg)
rpc = self.cr_rpc_ack(msg['id'])
self.send_to_peer(rpc, src)
def handle_msg_leader_cq_inq(self, msg):
src = msg['src']
msgid = msg['id']
info = {}
inquiry = self.log.get_by_uuid(msgid)
if inquiry is None:
info['status'] = 'unknown'
elif inquiry['index'] > self.commitidx:
info['status'] = 'pending'
else:
info['status'] = 'committed'
rpc = self.cr_rpc_ack(msgid, info)
self.send_to_peer(rpc, src)
def handle_msg_candidate_rv(self, msg):
# don't vote for a different candidate!
uuid = msg['id']
if self.uuid == uuid:
# huh
return
if not self.valid_peer(uuid):
return
rpc = self.rv_rpc_reply(False)
self.send_to_peer(rpc, uuid)
def handle_msg_follower_rv(self, msg):
term = msg['term']
uuid = msg['id']
if not self.valid_peer(uuid):
return
olog = {msg['log_index']: {
'index': msg['log_index'],
'term': msg['log_term'],
'msgid': '',
'msg': {}}}
olog = log.RaftLog(olog)
if term < self.term:
# someone with a smaller term wants to get elected
# as if
rpc = self.rv_rpc_reply(False)
self.send_to_peer(rpc, uuid)
return
if (self.voted is None or self.voted == uuid) and self.log <= olog:
# we can vote for this guy
self.voted = uuid
self.save()
rpc = self.rv_rpc_reply(True)
self.last_update = time.time()
self.send_to_peer(rpc, uuid)
return
# we probably voted for somebody else, or the log is old
rpc = self.rv_rpc_reply(False)
self.send_to_peer(rpc, uuid)
def handle_msg_candidate_rv_reply(self, msg):
uuid = msg['id']
if not self.valid_peer(uuid):
return
voted = msg['voted']
if voted:
self.cronies.add(uuid)
else:
self.refused.add(uuid)
if len(self.cronies) >= self.quorum():
# won the election
self.role = 'leader'
self.next_index = {}
self.commitidx = self.log.get_commit_index()
maxidx = self.log.maxindex()
for uuid in self.all_peers():
# just start by pretending everyone is caught up,
# they'll let us know if not
self.next_index[uuid] = maxidx
def handle_msg_leader_pu(self, msg):
if self.update_uuid:
# we're either already in the middle of this, or we're
# in the middle of something *else*, so piss off
return
uuid = msg['id']
# got a new update request
# it will consist of machines to add and to remove
# here we perform the first phase of the update, by
# telling clients to add the new machines to their
# existing peer set.
msg['phase'] = 1
self.newpeers = msg['config'] # adopt the new config right away
if not self.newpeers:
return
self.update_uuid = uuid
self.add_to_log(msg)
def housekeeping(self):
now = time.time()
if self.role == 'candidate':
elapsed = now - self.election_start
if now - self.last_update > 0.5 and self.role == 'follower':
# got no heartbeats; leader is probably dead
# establish candidacy and run for election
self.call_election()
elif self.role == 'candidate' and elapsed < self.election_timeout:
# we're in an election and haven't won, but the
# timeout isn't expired. repoll peers that haven't
# responded yet
self.campaign()
elif self.role == 'candidate':
# the election timeout *has* expired, and we *still*
# haven't won or lost. call a new election.
self.call_election()
elif self.role == 'leader':
# send a heartbeat
self.send_ae()
#
## convenience methods
#
def send_ae(self):
self.last_update = time.time()
for uuid in self.all_peers():
if uuid == self.uuid: # no selfies
continue
ni = self.next_index.get(uuid, self.log.maxindex())
logs = self.log.logs_after_index(ni)
rpc = self.ae_rpc(uuid, logs)
self.send_to_peer(rpc, uuid)
def call_election(self):
self.term += 1
self.voted = self.uuid
self.save()
self.cronies = set()
self.refused = set()
self.cronies.add(self.uuid)
self.election_start = time.time()
self.election_timeout = 0.5 * random.random() + 0.5
self.role = 'candidate'
self.campaign()
def campaign(self):
voted = self.cronies.union(self.refused) # everyone who voted
voters = set(self.peers)
if self.newpeers:
voters = voters.union(set(self.newpeers))
remaining = voters.difference(voted) # peers who haven't
rpc = self.rv_rpc()
for uuid in remaining:
self.send_to_peer(rpc, uuid)
def check_update_committed(self):
# we (a follower) just learned that one or more
# logs were committed, *and* we are in the middle of an
# update. check to see if that was phase 2 of the update,
# and remove old hosts if so
umsg = self.log.get_by_uuid(self.update_uuid)
if umsg['index'] > self.commitidx:
# isn't yet committed
return
data = umsg['msg']
if data['phase'] == 2:
self.oldpeers = None
self.update_uuid = None
if not self.uuid in self.all_peers():
self.running = False
def process_possible_update(self, msg):
if not 'msg' in msg:
return
data = msg['msg']
if not 'type' in data:
return
if data['type'] != 'pu':
return
phase = data['phase']
uuid = data['id']
if self.update_uuid == uuid:
# we've already done this
return
self.update_uuid = uuid # in case we become leader during this debacle
if phase == 1:
self.newpeers = data['config']
elif phase == 2 and self.newpeers:
self.oldpeers = self.peers
self.peers = self.newpeers
self.newpeers = None
def possible_update_commit(self):
# we're in an update; see if the update msg
# has committed, and go to phase 2 or finish
if not self.log.is_committed_by_uuid(self.update_uuid):
# it hasn't
return
umsg = self.log.get_by_uuid(self.update_uuid)
data = copy.deepcopy(umsg['msg'])
if data['phase'] == 1 and self.newpeers:
# the *first* phase of the update has been committed
# new leaders are guaranteed to be in the union of the
# old and new configs. now update the configuration
# to the new one only.
data['phase'] = 2
newid = uuid.uuid4().hex
self.update_uuid = newid
data['id'] = newid
self.oldpeers = self.peers
self.peers = self.newpeers
self.newpeers = None
logentry = log.logentry(self.term, newid, data)
self.log.add(logentry)
else:
# the *second* phase is now committed. tell all our
# current peers about the successful commit, drop
# the old config entirely and, if necessary, step down
self.send_ae() # send this to peers who might be about to dispeer
self.oldpeers = None
self.update_uuid = None
if not self.uuid in self.peers:
self.running = False
def all_peers(self):
for host in self.peers:
yield host
if self.newpeers:
for host in self.newpeers:
yield host
if self.oldpeers:
for host in self.oldpeers:
yield host
def valid_peer(self, uuid):
if uuid in self.peers:
return True
if self.newpeers and uuid in self.newpeers:
return True
if self.oldpeers and uuid in self.oldpeers:
return True
return False
def get_peer_addr(self, uuid):
if uuid in self.peers:
return self.peers[uuid]
if self.newpeers and uuid in self.newpeers:
return self.newpeers[uuid]
if self.oldpeers and uuid in self.oldpeers:
return self.oldpeers[uuid]
def send_to_peer(self, rpc, uuid):
self.channel.send(rpc, uuid)
def quorum(self):
peers = set(self.peers)
if self.newpeers:
peers.union(set(self.newpeers))
# oldpeers don't get a vote
# use sets because there could be dupes
np = len(peers)
return np/2 + 1
def msg_recorded(self, msg):
# we're a leader and we just got an ack from
# a follower who might have been the one to
# commit an entry
term = msg['term']
index = msg['index']
uuid = msg['id']
self.log.add_ack(index, term, uuid)
if self.log.num_acked(index) >= self.quorum() and term == self.term:
self.log.commit(index, term)
assert index >= self.commitidx
oldidx = self.commitidx
self.commitidx = index
if self.update_uuid:
# if there's an update going on, see if our commit
# is actionable
self.possible_update_commit()
# otherwise just see what messages are now runnable
self.run_committed_messages(oldidx)
def add_to_log(self, msg):
uuid = msg['id']
logentry = log.logentry(self.term, uuid, msg)
index = self.log.add(logentry)
self.save()
self.log.add_ack(index, self.term, self.uuid)
def run_committed_messages(self, oldidx):
committed = self.log.committed_logs_after_index(oldidx)
for _, val in sorted(iteritems(committed)):
msg = val['msg']
msgid = msg['id']
data = msg['data']
self.queue.put((msgid, data))
def bootstrap_cb(self, uuid, addr):
self.bootstraps.remove(addr)
self.peers[uuid] = addr
#
## rpc methods
#
def rv_rpc(self):
log_index, log_term = self.log.get_max_index_term()
rpc = {
'type': 'rv',
'term': self.term,
'id': self.uuid,
'log_index': log_index,
'log_term': log_term,
}
return msgpack.packb(rpc)
def rv_rpc_reply(self, voted):
rpc = {
'type': 'rv_reply',
'id': self.uuid,
'term': self.term,
'voted': voted
}
return msgpack.packb(rpc)
def ae_rpc(self, peeruuid, append={}):
previdx = self.next_index.get(peeruuid, self.log.maxindex())
rpc = {
'type': 'ae',
'term': self.term,
'id': self.uuid,
'previdx': previdx,
'prevterm': self.log.get_term_of(previdx),
'entries': append,
'commitidx': self.commitidx,
}
return msgpack.packb(rpc)
def ae_rpc_reply(self, index, term, success):
rpc = {
'type': 'ae_reply',
'term': term,
'id': self.uuid,
'index': index,
'success': success
}
return msgpack.packb(rpc)
def cr_rpc(self, qid, ans):
# client response RPC
# qid = query id, ans is arbitrary data
# if the qid is None, we make one up and
# return it when we ack it
rpc = {
'type': 'cr',
'id': qid,
'data': ans
}
return msgpack.packb(rpc)
def cr_rpc_ack(self, qid, info=None):
# client response RPC
# qid = query id, ans is arbitrary data
rpc = {
'type': 'cr_ack',
'id': qid,
'info': info
}
return msgpack.packb(rpc)
def cr_rdr_rpc(self, msgid):
# client response redirect; just point them
# at the master
if not self.leader:
# we don't know where to send them
raise RuntimeError
rpc = {
'type': 'cr_rdr',
'id': msgid,
'addr': self.get_peer_addr(self.leader),
'leader': self.leader
}
return msgpack.packb(rpc)
def bootstrap_rpc(self):
rpc = {
'type': 'bootstrap',
'id': self.uuid
}
return msgpack.packb(rpc)
| 2.34375
| 2
|
filesystem/test/testbasefile.py
|
redmond-penguin/musicplayer
| 0
|
12778450
|
import unittest
from filesystem.basefile import BaseFile
import os
class TestBaseFile(unittest.TestCase):
def setUp(self):
os.mknod("/tmp/testfile")
if not os.path.isfile("/tmp/testfile"):
raise Exception("Cannot create /tmp/testfile")
self.file = BaseFile("/tmp/testfile")
self.updater = None
def update(self, caller):
self.updater = caller
def tearDown(self):
if os.path.isfile("/tmp/testfile"):
os.remove("/tmp/testfile")
def test_get_path(self):
self.assertEqual(self.file.get_path(), "/tmp/testfile")
def test_create(self):
file2 = BaseFile("testname")
self.assertIsInstance(file2, BaseFile)
self.assertEqual(file2.get_path(), "testname")
def test_create_wrong_argument_to_constructor(self):
with self.assertRaises(TypeError):
file2 = BaseFile(self.file)
with self.assertRaises(TypeError):
file2 = BaseFile(None)
with self.assertRaises(TypeError):
file2 = BaseFile(15)
def test_perform_operation(self):
return_value = self.file.perform_operation("/bin/echo")
self.assertEqual(return_value, 0)
def test_perform_operation_before_arg(self):
return_value = self.file.perform_operation("/bin/echo", ["before"])
self.assertEqual(return_value, 0)
def test_perform_operation_after_arg(self):
return_value = self.file.perform_operation("/bin/echo", None, ["after"])
self.assertEqual(return_value, 0)
def test_perform_operation_before_and_after_arg(self):
return_value = self.file.perform_operation("/bin/echo", ["before"], ["after"])
self.assertEqual(return_value, 0)
def test_perform_operation_wrong_arg(self):
return_value = self.file.perform_operation("/bin/sed")
self.assertEqual(return_value, 4)
def test_perform_operation_unknown_command(self):
with self.assertRaises(OSError):
return_value = self.file.perform_operation("dummytest")
def test_delete_file(self):
self.file.delete()
self.assertFalse(os.path.isfile(self.file.get_path()))
def test_delete_nonexistent_file(self):
file2 = BaseFile("dummytest")
with self.assertRaises(OSError) as cm:
file2.delete()
self.assertEqual(cm.exception.filename, "dummytest")
def test_attach(self):
self.file.attach(self)
self.assertEqual(len(self.file._observers), 1)
self.assertEqual(self.file._observers[0], self)
file2 = BaseFile("dummytest")
self.file.attach(file2)
self.assertEqual(len(self.file._observers), 2)
self.assertEqual(self.file._observers[1], file2)
def test_detach(self):
self.file.attach(self)
self.file.detach(self)
self.assertEqual(len(self.file._observers), 0)
def test_notify(self):
self.file.notify()
self.assertIsNone(self.updater)
self.file.attach(self)
self.file.notify()
self.assertEqual(self.updater, self.file)
self.file.detach(self)
self.updater = None
self.file.notify()
self.assertIsNone(self.updater)
if __name__ == '__main__':
unittest.main()
| 3.203125
| 3
|