hexsha
stringlengths 40
40
| size
int64 5
2.06M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
248
| max_stars_repo_name
stringlengths 5
125
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
248
| max_issues_repo_name
stringlengths 5
125
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
248
| max_forks_repo_name
stringlengths 5
125
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 5
2.06M
| avg_line_length
float64 1
1.02M
| max_line_length
int64 3
1.03M
| alphanum_fraction
float64 0
1
| count_classes
int64 0
1.6M
| score_classes
float64 0
1
| count_generators
int64 0
651k
| score_generators
float64 0
1
| count_decorators
int64 0
990k
| score_decorators
float64 0
1
| count_async_functions
int64 0
235k
| score_async_functions
float64 0
1
| count_documentation
int64 0
1.04M
| score_documentation
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
b7f8ec16e2bfb80be5a624728d6c0040fc0bbacb
| 16,352
|
py
|
Python
|
cpp-linux/Release/envcpp.py
|
thu-media/Comyco
|
38cc0266b1c0a9f20e48a173d0157452cb411b85
|
[
"BSD-2-Clause"
] | 40
|
2019-08-09T07:33:41.000Z
|
2021-11-26T06:58:44.000Z
|
cpp-linux/Release/envcpp.py
|
ragnarkor/Comyco
|
38cc0266b1c0a9f20e48a173d0157452cb411b85
|
[
"BSD-2-Clause"
] | 9
|
2019-10-09T03:10:46.000Z
|
2021-12-26T15:31:15.000Z
|
cpp-linux/Release/envcpp.py
|
ragnarkor/Comyco
|
38cc0266b1c0a9f20e48a173d0157452cb411b85
|
[
"BSD-2-Clause"
] | 12
|
2019-11-06T08:31:19.000Z
|
2021-11-12T09:56:37.000Z
|
# This file was automatically generated by SWIG (http://www.swig.org).
# Version 4.0.0
#
# Do not make changes to this file unless you know what you are doing--modify
# the SWIG interface file instead.
from sys import version_info as _swig_python_version_info
if _swig_python_version_info < (2, 7, 0):
raise RuntimeError('Python 2.7 or later required')
# Import the low-level C/C++ module
if __package__ or '.' in __name__:
from . import _envcpp
else:
import _envcpp
try:
import builtins as __builtin__
except ImportError:
import __builtin__
def _swig_setattr_nondynamic(self, class_type, name, value, static=1):
if name == "thisown":
return self.this.own(value)
if name == "this":
if type(value).__name__ == 'SwigPyObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name, None)
if method:
return method(self, value)
if not static:
object.__setattr__(self, name, value)
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self, class_type, name, value):
return _swig_setattr_nondynamic(self, class_type, name, value, 0)
def _swig_getattr(self, class_type, name):
if name == "thisown":
return self.this.own()
method = class_type.__swig_getmethods__.get(name, None)
if method:
return method(self)
raise AttributeError("'%s' object has no attribute '%s'" % (class_type.__name__, name))
def _swig_repr(self):
try:
strthis = "proxy of " + self.this.__repr__()
except __builtin__.Exception:
strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
def _swig_setattr_nondynamic_instance_variable(set):
def set_instance_attr(self, name, value):
if name == "thisown":
self.this.own(value)
elif name == "this":
set(self, name, value)
elif hasattr(self, name) and isinstance(getattr(type(self), name), property):
set(self, name, value)
else:
raise AttributeError("You cannot add instance attributes to %s" % self)
return set_instance_attr
def _swig_setattr_nondynamic_class_variable(set):
def set_class_attr(cls, name, value):
if hasattr(cls, name) and not isinstance(getattr(cls, name), property):
set(cls, name, value)
else:
raise AttributeError("You cannot add class attributes to %s" % cls)
return set_class_attr
def _swig_add_metaclass(metaclass):
"""Class decorator for adding a metaclass to a SWIG wrapped class - a slimmed down version of six.add_metaclass"""
def wrapper(cls):
return metaclass(cls.__name__, cls.__bases__, cls.__dict__.copy())
return wrapper
class _SwigNonDynamicMeta(type):
"""Meta class to enforce nondynamic attributes (no new attributes) for a class"""
__setattr__ = _swig_setattr_nondynamic_class_variable(type.__setattr__)
class SwigPyIterator(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
def __init__(self, *args, **kwargs):
raise AttributeError("No constructor defined - class is abstract")
__repr__ = _swig_repr
__swig_destroy__ = _envcpp.delete_SwigPyIterator
def value(self):
return _envcpp.SwigPyIterator_value(self)
def incr(self, n=1):
return _envcpp.SwigPyIterator_incr(self, n)
def decr(self, n=1):
return _envcpp.SwigPyIterator_decr(self, n)
def distance(self, x):
return _envcpp.SwigPyIterator_distance(self, x)
def equal(self, x):
return _envcpp.SwigPyIterator_equal(self, x)
def copy(self):
return _envcpp.SwigPyIterator_copy(self)
def next(self):
return _envcpp.SwigPyIterator_next(self)
def __next__(self):
return _envcpp.SwigPyIterator___next__(self)
def previous(self):
return _envcpp.SwigPyIterator_previous(self)
def advance(self, n):
return _envcpp.SwigPyIterator_advance(self, n)
def __eq__(self, x):
return _envcpp.SwigPyIterator___eq__(self, x)
def __ne__(self, x):
return _envcpp.SwigPyIterator___ne__(self, x)
def __iadd__(self, n):
return _envcpp.SwigPyIterator___iadd__(self, n)
def __isub__(self, n):
return _envcpp.SwigPyIterator___isub__(self, n)
def __add__(self, n):
return _envcpp.SwigPyIterator___add__(self, n)
def __sub__(self, *args):
return _envcpp.SwigPyIterator___sub__(self, *args)
def __iter__(self):
return self
# Register SwigPyIterator in _envcpp:
_envcpp.SwigPyIterator_swigregister(SwigPyIterator)
class vectori(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self):
return _envcpp.vectori_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _envcpp.vectori___nonzero__(self)
def __bool__(self):
return _envcpp.vectori___bool__(self)
def __len__(self):
return _envcpp.vectori___len__(self)
def __getslice__(self, i, j):
return _envcpp.vectori___getslice__(self, i, j)
def __setslice__(self, *args):
return _envcpp.vectori___setslice__(self, *args)
def __delslice__(self, i, j):
return _envcpp.vectori___delslice__(self, i, j)
def __delitem__(self, *args):
return _envcpp.vectori___delitem__(self, *args)
def __getitem__(self, *args):
return _envcpp.vectori___getitem__(self, *args)
def __setitem__(self, *args):
return _envcpp.vectori___setitem__(self, *args)
def pop(self):
return _envcpp.vectori_pop(self)
def append(self, x):
return _envcpp.vectori_append(self, x)
def empty(self):
return _envcpp.vectori_empty(self)
def size(self):
return _envcpp.vectori_size(self)
def swap(self, v):
return _envcpp.vectori_swap(self, v)
def begin(self):
return _envcpp.vectori_begin(self)
def end(self):
return _envcpp.vectori_end(self)
def rbegin(self):
return _envcpp.vectori_rbegin(self)
def rend(self):
return _envcpp.vectori_rend(self)
def clear(self):
return _envcpp.vectori_clear(self)
def get_allocator(self):
return _envcpp.vectori_get_allocator(self)
def pop_back(self):
return _envcpp.vectori_pop_back(self)
def erase(self, *args):
return _envcpp.vectori_erase(self, *args)
def __init__(self, *args):
_envcpp.vectori_swiginit(self, _envcpp.new_vectori(*args))
def push_back(self, x):
return _envcpp.vectori_push_back(self, x)
def front(self):
return _envcpp.vectori_front(self)
def back(self):
return _envcpp.vectori_back(self)
def assign(self, n, x):
return _envcpp.vectori_assign(self, n, x)
def resize(self, *args):
return _envcpp.vectori_resize(self, *args)
def insert(self, *args):
return _envcpp.vectori_insert(self, *args)
def reserve(self, n):
return _envcpp.vectori_reserve(self, n)
def capacity(self):
return _envcpp.vectori_capacity(self)
__swig_destroy__ = _envcpp.delete_vectori
# Register vectori in _envcpp:
_envcpp.vectori_swigregister(vectori)
class vectord(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self):
return _envcpp.vectord_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _envcpp.vectord___nonzero__(self)
def __bool__(self):
return _envcpp.vectord___bool__(self)
def __len__(self):
return _envcpp.vectord___len__(self)
def __getslice__(self, i, j):
return _envcpp.vectord___getslice__(self, i, j)
def __setslice__(self, *args):
return _envcpp.vectord___setslice__(self, *args)
def __delslice__(self, i, j):
return _envcpp.vectord___delslice__(self, i, j)
def __delitem__(self, *args):
return _envcpp.vectord___delitem__(self, *args)
def __getitem__(self, *args):
return _envcpp.vectord___getitem__(self, *args)
def __setitem__(self, *args):
return _envcpp.vectord___setitem__(self, *args)
def pop(self):
return _envcpp.vectord_pop(self)
def append(self, x):
return _envcpp.vectord_append(self, x)
def empty(self):
return _envcpp.vectord_empty(self)
def size(self):
return _envcpp.vectord_size(self)
def swap(self, v):
return _envcpp.vectord_swap(self, v)
def begin(self):
return _envcpp.vectord_begin(self)
def end(self):
return _envcpp.vectord_end(self)
def rbegin(self):
return _envcpp.vectord_rbegin(self)
def rend(self):
return _envcpp.vectord_rend(self)
def clear(self):
return _envcpp.vectord_clear(self)
def get_allocator(self):
return _envcpp.vectord_get_allocator(self)
def pop_back(self):
return _envcpp.vectord_pop_back(self)
def erase(self, *args):
return _envcpp.vectord_erase(self, *args)
def __init__(self, *args):
_envcpp.vectord_swiginit(self, _envcpp.new_vectord(*args))
def push_back(self, x):
return _envcpp.vectord_push_back(self, x)
def front(self):
return _envcpp.vectord_front(self)
def back(self):
return _envcpp.vectord_back(self)
def assign(self, n, x):
return _envcpp.vectord_assign(self, n, x)
def resize(self, *args):
return _envcpp.vectord_resize(self, *args)
def insert(self, *args):
return _envcpp.vectord_insert(self, *args)
def reserve(self, n):
return _envcpp.vectord_reserve(self, n)
def capacity(self):
return _envcpp.vectord_capacity(self)
__swig_destroy__ = _envcpp.delete_vectord
# Register vectord in _envcpp:
_envcpp.vectord_swigregister(vectord)
class vectors(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def iterator(self):
return _envcpp.vectors_iterator(self)
def __iter__(self):
return self.iterator()
def __nonzero__(self):
return _envcpp.vectors___nonzero__(self)
def __bool__(self):
return _envcpp.vectors___bool__(self)
def __len__(self):
return _envcpp.vectors___len__(self)
def __getslice__(self, i, j):
return _envcpp.vectors___getslice__(self, i, j)
def __setslice__(self, *args):
return _envcpp.vectors___setslice__(self, *args)
def __delslice__(self, i, j):
return _envcpp.vectors___delslice__(self, i, j)
def __delitem__(self, *args):
return _envcpp.vectors___delitem__(self, *args)
def __getitem__(self, *args):
return _envcpp.vectors___getitem__(self, *args)
def __setitem__(self, *args):
return _envcpp.vectors___setitem__(self, *args)
def pop(self):
return _envcpp.vectors_pop(self)
def append(self, x):
return _envcpp.vectors_append(self, x)
def empty(self):
return _envcpp.vectors_empty(self)
def size(self):
return _envcpp.vectors_size(self)
def swap(self, v):
return _envcpp.vectors_swap(self, v)
def begin(self):
return _envcpp.vectors_begin(self)
def end(self):
return _envcpp.vectors_end(self)
def rbegin(self):
return _envcpp.vectors_rbegin(self)
def rend(self):
return _envcpp.vectors_rend(self)
def clear(self):
return _envcpp.vectors_clear(self)
def get_allocator(self):
return _envcpp.vectors_get_allocator(self)
def pop_back(self):
return _envcpp.vectors_pop_back(self)
def erase(self, *args):
return _envcpp.vectors_erase(self, *args)
def __init__(self, *args):
_envcpp.vectors_swiginit(self, _envcpp.new_vectors(*args))
def push_back(self, x):
return _envcpp.vectors_push_back(self, x)
def front(self):
return _envcpp.vectors_front(self)
def back(self):
return _envcpp.vectors_back(self)
def assign(self, n, x):
return _envcpp.vectors_assign(self, n, x)
def resize(self, *args):
return _envcpp.vectors_resize(self, *args)
def insert(self, *args):
return _envcpp.vectors_insert(self, *args)
def reserve(self, n):
return _envcpp.vectors_reserve(self, n)
def capacity(self):
return _envcpp.vectors_capacity(self)
__swig_destroy__ = _envcpp.delete_vectors
# Register vectors in _envcpp:
_envcpp.vectors_swigregister(vectors)
class Environment(object):
thisown = property(lambda x: x.this.own(), lambda x, v: x.this.own(v), doc='The membership flag')
__repr__ = _swig_repr
def __init__(self, filedir):
_envcpp.Environment_swiginit(self, _envcpp.new_Environment(filedir))
__swig_destroy__ = _envcpp.delete_Environment
def get_download_time(self, video_chunk_size):
return _envcpp.Environment_get_download_time(self, video_chunk_size)
def reset_download_time(self):
return _envcpp.Environment_reset_download_time(self)
def get_video_chunk(self, quality):
return _envcpp.Environment_get_video_chunk(self, quality)
def get_optimal(self, last_video_vmaf):
return _envcpp.Environment_get_optimal(self, last_video_vmaf)
optimal = property(_envcpp.Environment_optimal_get, _envcpp.Environment_optimal_set)
delay0 = property(_envcpp.Environment_delay0_get, _envcpp.Environment_delay0_set)
sleep_time0 = property(_envcpp.Environment_sleep_time0_get, _envcpp.Environment_sleep_time0_set)
return_buffer_size0 = property(_envcpp.Environment_return_buffer_size0_get, _envcpp.Environment_return_buffer_size0_set)
rebuf0 = property(_envcpp.Environment_rebuf0_get, _envcpp.Environment_rebuf0_set)
video_chunk_size0 = property(_envcpp.Environment_video_chunk_size0_get, _envcpp.Environment_video_chunk_size0_set)
end_of_video0 = property(_envcpp.Environment_end_of_video0_get, _envcpp.Environment_end_of_video0_set)
video_chunk_remain0 = property(_envcpp.Environment_video_chunk_remain0_get, _envcpp.Environment_video_chunk_remain0_set)
video_chunk_vmaf0 = property(_envcpp.Environment_video_chunk_vmaf0_get, _envcpp.Environment_video_chunk_vmaf0_set)
all_cooked_bw = property(_envcpp.Environment_all_cooked_bw_get, _envcpp.Environment_all_cooked_bw_set)
all_cooked_time = property(_envcpp.Environment_all_cooked_time_get, _envcpp.Environment_all_cooked_time_set)
CHUNK_COMBO_OPTIONS = property(_envcpp.Environment_CHUNK_COMBO_OPTIONS_get, _envcpp.Environment_CHUNK_COMBO_OPTIONS_set)
all_file_names = property(_envcpp.Environment_all_file_names_get, _envcpp.Environment_all_file_names_set)
video_chunk_counter = property(_envcpp.Environment_video_chunk_counter_get, _envcpp.Environment_video_chunk_counter_set)
buffer_size = property(_envcpp.Environment_buffer_size_get, _envcpp.Environment_buffer_size_set)
trace_idx = property(_envcpp.Environment_trace_idx_get, _envcpp.Environment_trace_idx_set)
cooked_time = property(_envcpp.Environment_cooked_time_get, _envcpp.Environment_cooked_time_set)
cooked_bw = property(_envcpp.Environment_cooked_bw_get, _envcpp.Environment_cooked_bw_set)
mahimahi_start_ptr = property(_envcpp.Environment_mahimahi_start_ptr_get, _envcpp.Environment_mahimahi_start_ptr_set)
mahimahi_ptr = property(_envcpp.Environment_mahimahi_ptr_get, _envcpp.Environment_mahimahi_ptr_set)
last_mahimahi_time = property(_envcpp.Environment_last_mahimahi_time_get, _envcpp.Environment_last_mahimahi_time_set)
virtual_mahimahi_ptr = property(_envcpp.Environment_virtual_mahimahi_ptr_get, _envcpp.Environment_virtual_mahimahi_ptr_set)
virtual_last_mahimahi_time = property(_envcpp.Environment_virtual_last_mahimahi_time_get, _envcpp.Environment_virtual_last_mahimahi_time_set)
# Register Environment in _envcpp:
_envcpp.Environment_swigregister(Environment)
| 31.446154
| 145
| 0.707131
| 13,136
| 0.803327
| 0
| 0
| 0
| 0
| 0
| 0
| 999
| 0.061093
|
b7f8f59fb0fb637edfdf3e834168a1ea050cd659
| 3,912
|
py
|
Python
|
eda_rf.py
|
lel23/Student-Performance-Prediction
|
93f850d299f6e6ad88a90e606f494fcd931e56b6
|
[
"MIT"
] | 1
|
2021-11-27T01:55:44.000Z
|
2021-11-27T01:55:44.000Z
|
eda_rf.py
|
lel23/Student-Performance-Prediction
|
93f850d299f6e6ad88a90e606f494fcd931e56b6
|
[
"MIT"
] | null | null | null |
eda_rf.py
|
lel23/Student-Performance-Prediction
|
93f850d299f6e6ad88a90e606f494fcd931e56b6
|
[
"MIT"
] | 1
|
2021-12-13T15:46:43.000Z
|
2021-12-13T15:46:43.000Z
|
"""
Final Project
EDA
"""
import pandas as pd
import matplotlib.pyplot as plt
from mlxtend.plotting import scatterplotmatrix
import numpy as np
import seaborn as sns
from imblearn.over_sampling import SMOTE
from sklearn.utils import resample
from mlxtend.plotting import heatmap
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.feature_selection import SelectFromModel
import sys
from sklearn.model_selection import train_test_split
from collections import Counter
df = pd.read_csv('student-mat-edited.csv')
df['school'] = df['school'].replace(['GP', 'MS'], [1, 0])
df['sex'] = df['sex'].replace(['M', 'F'], [1, 0])
df['address'] = df['address'].replace(['U', 'R'], [1, 0])
df['famsize'] = df['famsize'].replace(['GT3', 'LE3'], [1, 0])
df['Pstatus'] = df['Pstatus'].replace(['T', 'A'], [1, 0])
df = df.replace(to_replace={'yes':1, 'no':0})
df = pd.get_dummies(df, prefix= ['Mjob', 'Fjob', 'reason', 'guardian'])
#code from: https://stackoverflow.com/questions/46168450/replace-a-specific-range-of-values-in-a-pandas-dataframe
#convert the scores to integers representing the letter grade range specified in the paper. higher the number, the higher the grade
df['scores'] = df[['G1', 'G2', 'G3']].mean(axis=1)
df['scores'] = np.where(df['scores'].between(0, 10), 0, df['scores'])
df['scores'] = np.where(df['scores'].between(10, 12), 1, df['scores'])
df['scores'] = np.where(df['scores'].between(12, 14), 2, df['scores'])
df['scores'] = np.where(df['scores'].between(14, 16), 3, df['scores'])
df['scores'] = np.where(df['scores'].between(16, 21), 4, df['scores'])
df['scores'] = df['scores'].astype(np.int)
df = df.drop(index=1, columns=['G1', 'G2', 'G3'])
#separate into features and target
X = df[[i for i in list(df.columns) if i != 'scores']]
y = df['scores']
# fixing class imbalance
#https://machinelearningmastery.com/multi-class-imbalanced-classification/
oversample = SMOTE(random_state=0)
X, y = oversample.fit_resample(X, y)
# splitting training and test data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=0, stratify=y)
# min-max scaling
mms = MinMaxScaler()
X_train_norm = mms.fit_transform(X_train)
X_test_norm = mms.transform(X_test)
# standardizing the data
stdsc = StandardScaler()
X_train_std = stdsc.fit_transform(X_train)
X_test_std = stdsc.transform(X_test)
# Random Forest Feature Selection
feat_labels = X.columns
forest = RandomForestClassifier(n_estimators=500, random_state=0)
forest.fit(X_train, y_train)
importances = forest.feature_importances_
indices = np.argsort(importances)[::-1]
for f in range(X_train.shape[1]):
print("%2d) %-*s %f" % (f + 1, 30, feat_labels[indices[f]], importances[indices[f]]))
plt.title('Feature Importance')
plt.bar(range(X_train.shape[1]), importances[indices], align='center')
plt.xticks(range(X_train.shape[1]), feat_labels[indices], rotation=90)
plt.xlim([-1, X_train.shape[1]])
plt.tight_layout()
plt.savefig("rf_selection.png")
plt.show()
sfm = SelectFromModel(forest, threshold=0.04, prefit=True)
X_selected = sfm.transform(X_train)
print('Number of features that meet this threshold', 'criterion:', X_selected.shape[1])
# # Now, let's print the features that met the threshold criterion for feature selection that we set earlier (note that this code snippet does not appear in the actual book but was added to this notebook later for illustrative purposes):
cols = []
for f in range(X_selected.shape[1]):
cols.append(feat_labels[indices[f]])
print("%2d) %-*s %f" % (f + 1, 30,
feat_labels[indices[f]],
importances[indices[f]]))
# Correlation heatmap
cols.append("scores")
cm = np.corrcoef(df[cols].values.T)
hm = heatmap(cm, row_names=cols, column_names=cols, figsize=(10, 8))
plt.savefig("corr_matrix.png")
plt.show()
| 35.563636
| 238
| 0.707311
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,288
| 0.329243
|
b7f97933fc0d2a4db780092adb873088bd108cdc
| 3,771
|
py
|
Python
|
feastruct/fea/utils.py
|
geosharma/feastruct
|
67cbf1c07d5f718c5eed4a1ac69e5cf0dc588ca1
|
[
"MIT"
] | 37
|
2018-11-08T12:51:53.000Z
|
2022-02-01T19:40:48.000Z
|
feastruct/fea/utils.py
|
geosharma/feastruct
|
67cbf1c07d5f718c5eed4a1ac69e5cf0dc588ca1
|
[
"MIT"
] | 2
|
2018-11-01T12:39:24.000Z
|
2022-01-23T01:26:47.000Z
|
feastruct/fea/utils.py
|
geosharma/feastruct
|
67cbf1c07d5f718c5eed4a1ac69e5cf0dc588ca1
|
[
"MIT"
] | 12
|
2019-04-09T04:14:02.000Z
|
2022-01-08T14:04:32.000Z
|
import numpy as np
def gauss_points(el_type, n):
"""Returns the Gaussian weights and locations for *n* point Gaussian integration of a finite
element. Refer to xxx for a list of the element types.
:param string el_type: String describing the element type
:param int n: Number of Gauss points
:returns: The integration weights *(n x 1)* and an *(n x i)* matrix consisting of the values of
the *i* shape functions for *n* Gauss points
:rtype: tuple(list[float], :class:`numpy.ndarray`)
"""
if el_type == 'Tri6':
# one point gaussian integration
if n == 1:
weights = [1]
gps = np.array([[1.0 / 3, 1.0 / 3, 1.0 / 3]])
# three point gaussian integration
elif n == 3:
weights = [1.0 / 3, 1.0 / 3, 1.0 / 3]
gps = np.array([
[2.0 / 3, 1.0 / 6, 1.0 / 6],
[1.0 / 6, 2.0 / 3, 1.0 / 6],
[1.0 / 6, 1.0 / 6, 2.0 / 3]
])
# six point gaussian integration
elif n == 6:
g1 = 1.0 / 18 * (8 - np.sqrt(10) + np.sqrt(38 - 44 * np.sqrt(2.0 / 5)))
g2 = 1.0 / 18 * (8 - np.sqrt(10) - np.sqrt(38 - 44 * np.sqrt(2.0 / 5)))
w1 = (620 + np.sqrt(213125 - 53320 * np.sqrt(10))) / 3720
w2 = (620 - np.sqrt(213125 - 53320 * np.sqrt(10))) / 3720
weights = [w2, w2, w2, w1, w1, w1]
gps = np.array([
[1 - 2 * g2, g2, g2],
[g2, 1 - 2 * g2, g2],
[g2, g2, 1 - 2 * g2],
[g1, g1, 1 - 2 * g1],
[1 - 2 * g1, g1, g1],
[g1, 1 - 2 * g1, g1]
])
return (weights, gps)
def shape_function(el_type, coords, gp):
"""Computes shape functions, shape function derivatives and the determinant of the Jacobian
matrix for a number of different finite elements at a given Gauss point. Refer to xxx for a
list of the element types.
:param string el_type: String describing the element type
:param coords: Global coordinates of the element nodes *(n x 3)*, where *n* is the number of
nodes
:type coords: :class:`numpy.ndarray`
:param gp: Isoparametric location of the Gauss point
:type gp: :class:`numpy.ndarray`
:returns: The value of the shape functions *N(i)* at the given Gauss point *(1 x n)*, the
derivative of the shape functions in the j-th global direction *B(i,j)* *(3 x n)* and the
determinant of the Jacobian matrix *j*
:rtype: tuple(:class:`numpy.ndarray`, :class:`numpy.ndarray`, float)
"""
if el_type == 'Tri6':
# location of isoparametric co-ordinates for each Gauss point
eta = gp[0]
xi = gp[1]
zeta = gp[2]
# value of the shape functions
N = np.array([
eta * (2 * eta - 1),
xi * (2 * xi - 1),
zeta * (2 * zeta - 1),
4 * eta * xi,
4 * xi * zeta,
4 * eta * zeta
])
# derivatives of the sf wrt the isoparametric co-ordinates
B_iso = np.array([
[4 * eta - 1, 0, 0, 4 * xi, 0, 4 * zeta],
[0, 4 * xi - 1, 0, 4 * eta, 4 * zeta, 0],
[0, 0, 4 * zeta - 1, 0, 4 * xi, 4 * eta]
])
# form Jacobian matrix
J_upper = np.array([[1, 1, 1]])
J_lower = np.dot(coords, np.transpose(B_iso))
J = np.vstack((J_upper, J_lower))
# calculate the jacobian
j = 0.5 * np.linalg.det(J)
# cacluate the P matrix
P = np.dot(np.linalg.inv(J), np.array([[0, 0], [1, 0], [0, 1]]))
# calculate the B matrix in terms of cartesian co-ordinates
B = np.transpose(np.dot(np.transpose(B_iso), P))
return (N, B, j)
| 35.914286
| 99
| 0.515248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,705
| 0.452135
|
b7f9ec5f1030d590ec1e3d249bcbd427149dded0
| 1,447
|
py
|
Python
|
webscrap.py
|
ircykk/webscrap
|
b43d2a1075dbe6c6644391c3b79785375b207559
|
[
"MIT"
] | null | null | null |
webscrap.py
|
ircykk/webscrap
|
b43d2a1075dbe6c6644391c3b79785375b207559
|
[
"MIT"
] | 2
|
2021-03-31T19:16:56.000Z
|
2021-12-13T20:19:00.000Z
|
webscrap.py
|
ircykk/webscrap
|
b43d2a1075dbe6c6644391c3b79785375b207559
|
[
"MIT"
] | null | null | null |
import requests
import time
import argparse
import sys
import os
from bs4 import BeautifulSoup
from urllib.parse import urlparse
def is_url(url):
try:
result = urlparse(url)
return all([result.scheme, result.netloc])
except ValueError:
return False
def fetch_urls(page):
r = requests.get(page)
soup = BeautifulSoup(r.text, 'lxml')
for a in soup.find_all('a', href=True):
url = a.get('href')
# http://example.com == http://example.com/
url = url.rstrip('/')
if is_url(url) and url not in urls:
urls.append(url)
def print_progress (iteration, total):
print('\r%s/%s [%s...]' % (iteration, total, urls[-1][:64]), end = '\r')
# Instantiate the parser
parser = argparse.ArgumentParser(description='URL scrapper')
parser.add_argument('--url', help='Root URL page')
parser.add_argument('--limit', type=int, default=1000, help='Limit urls to scrape')
parser.add_argument('--output', default='output.csv', help='Path to output file')
args = parser.parse_args()
urls = []
urls_visited = []
if is_url(args.url) != True:
print('Invalid root URL [--url]')
sys.exit(1)
fetch_urls(args.url)
urls_visited.append(args.url);
for url in urls:
if len(urls) > args.limit:
break
print_progress(len(urls), args.limit)
if url not in urls_visited:
urls_visited.append(url);
fetch_urls(url)
# Save output
os.remove(args.output)
with open(args.output, 'a') as output:
for url in urls:
output.write(url + '\n')
| 22.968254
| 83
| 0.691776
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 262
| 0.181064
|
b7fa464b97651a98f542160b4536fc5d2f36512c
| 3,035
|
py
|
Python
|
lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri_checksum.py
|
shift-left-test/meta-shift
|
effce9bea894f990703cc047157e3f30d53d9365
|
[
"MIT"
] | 2
|
2022-01-19T02:39:43.000Z
|
2022-02-07T01:58:17.000Z
|
lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri_checksum.py
|
shift-left-test/meta-shift
|
effce9bea894f990703cc047157e3f30d53d9365
|
[
"MIT"
] | null | null | null |
lib/recipetool/shift_oelint_adv/rule_base/rule_var_src_uri_checksum.py
|
shift-left-test/meta-shift
|
effce9bea894f990703cc047157e3f30d53d9365
|
[
"MIT"
] | null | null | null |
from shift_oelint_parser.cls_item import Variable
from shift_oelint_adv.cls_rule import Rule
from shift_oelint_parser.helper_files import get_scr_components
from shift_oelint_parser.parser import INLINE_BLOCK
class VarSRCUriOptions(Rule):
def __init__(self):
super(VarSRCUriOptions, self).__init__(id="oelint.vars.srcurichecksum",
severity="error",
message="<FOO>")
def check(self, _file, stash):
res = []
items = stash.GetItemsFor(filename=_file, classifier=Variable.CLASSIFIER,
attribute=Variable.ATTR_VAR, attributeValue="SRC_URI")
md5sum = []
sha256sum = []
res_candidate = []
for i in items:
if i.Flag.endswith("md5sum"):
if i.Flag == "md5sum":
md5sum.append("")
else:
md5sum.append(i.Flag.rsplit(".", 1)[0])
elif i.Flag.endswith("sha256sum"):
if i.Flag == "sha256sum":
sha256sum.append("")
else:
sha256sum.append(i.Flag.rsplit(".", 1)[0])
else:
lines = [y.strip('"') for y in i.get_items() if y]
for x in lines:
if x == INLINE_BLOCK:
continue
_url = get_scr_components(x)
if _url["scheme"] in ["http", "https", "ftp", "ftps", "sftp", "s3"]:
name = ""
if "name" in _url["options"]:
name = _url["options"]["name"]
res_candidate.append((name, i.Origin, i.InFileLine + lines.index(x)))
res_candidate.sort(key=lambda tup: tup[0])
no_name_src_uri = False
for (name, filename, filelines) in res_candidate:
message = ""
if name == "":
if no_name_src_uri:
message = "if SRC_URI have multiple URLs, each URL has checksum"
else:
if "" not in md5sum:
message = "SRC_URI[md5sum]"
if "" not in sha256sum:
if len(message) > 0:
message += ", "
message += "SRC_URI[sha256sum]"
if len(message) > 0:
message += " is(are) needed"
no_name_src_uri = True
else:
if name not in md5sum:
message = "SRC_URI[%s.md5sum]" % name
if name not in sha256sum:
if len(message) > 0:
message += ", "
message += "SRC_URI[%s.sha256sum]" % name
if len(message) > 0:
message += " is(are) needed"
if len(message) > 0:
res += self.finding(filename, filelines, message)
return res
| 41.013514
| 93
| 0.45832
| 2,823
| 0.930148
| 0
| 0
| 0
| 0
| 0
| 0
| 360
| 0.118616
|
b7fab4376dcf24e3dbd079130cdac6cf32133a5b
| 1,084
|
py
|
Python
|
verba/apps/auth/backends.py
|
nhsuk/verba
|
c0354ae2012a046e7f7cc7482e293737de9d28bc
|
[
"MIT"
] | null | null | null |
verba/apps/auth/backends.py
|
nhsuk/verba
|
c0354ae2012a046e7f7cc7482e293737de9d28bc
|
[
"MIT"
] | 2
|
2016-08-11T09:30:41.000Z
|
2016-08-11T15:04:08.000Z
|
verba/apps/auth/backends.py
|
nhsuk/verba
|
c0354ae2012a046e7f7cc7482e293737de9d28bc
|
[
"MIT"
] | 1
|
2021-04-11T07:41:27.000Z
|
2021-04-11T07:41:27.000Z
|
from github import User as GitHubUser
from github.auth import get_token
from github.exceptions import AuthValidationError
from . import get_user_model
class VerbaBackend(object):
"""
Django authentication backend which authenticates against the GitHub API.
"""
def authenticate(self, code=None):
"""
Returns a valid `VerbaUser` if the authentication is successful
or None if the token is invalid.
"""
try:
token = get_token(code)
except AuthValidationError:
return
github_user = GitHubUser.get_logged_in(token)
UserModel = get_user_model() # noqa
return UserModel(
pk=github_user.username,
token=token,
user_data={
'name': github_user.name,
'email': github_user.email,
'avatar_url': github_user.avatar_url
}
)
def get_user(self, pk, token, user_data={}):
UserModel = get_user_model() # noqa
return UserModel(pk, token, user_data=user_data)
| 28.526316
| 77
| 0.612546
| 929
| 0.857011
| 0
| 0
| 0
| 0
| 0
| 0
| 254
| 0.234317
|
b7fb6f9d3e04e66224e9cdb811584decc5862d2f
| 798
|
py
|
Python
|
examples/apds9960_color_simpletest.py
|
tannewt/Adafruit_CircuitPython_APDS9960
|
becfa166b91124aa0f2ed1e5bb1ecee7a4d86fab
|
[
"MIT"
] | null | null | null |
examples/apds9960_color_simpletest.py
|
tannewt/Adafruit_CircuitPython_APDS9960
|
becfa166b91124aa0f2ed1e5bb1ecee7a4d86fab
|
[
"MIT"
] | null | null | null |
examples/apds9960_color_simpletest.py
|
tannewt/Adafruit_CircuitPython_APDS9960
|
becfa166b91124aa0f2ed1e5bb1ecee7a4d86fab
|
[
"MIT"
] | null | null | null |
import time
import board
import busio
import digitalio
from adafruit_apds9960.apds9960 import APDS9960
from adafruit_apds9960 import colorutility
i2c = busio.I2C(board.SCL, board.SDA)
int_pin = digitalio.DigitalInOut(board.A2)
apds = APDS9960(i2c)
apds.enable_color = True
while True:
#create some variables to store the color data in
#wait for color data to be ready
while not apds.color_data_ready:
time.sleep(0.005)
#get the data and print the different channels
r, g, b, c = apds.color_data
print("red: ", r)
print("green: ", g)
print("blue: ", b)
print("clear: ", c)
print("color temp {}".format(colorutility.calculate_color_temperature(r, g, b)))
print("light lux {}".format(colorutility.calculate_lux(r, g, b)))
time.sleep(0.5)
| 24.9375
| 84
| 0.699248
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 189
| 0.236842
|
b7fbdc11c64c416322347545771908c98a2d730b
| 158
|
py
|
Python
|
abc/abc205/abc205b.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | 1
|
2019-08-21T00:49:34.000Z
|
2019-08-21T00:49:34.000Z
|
abc/abc205/abc205b.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
abc/abc205/abc205b.py
|
c-yan/atcoder
|
940e49d576e6a2d734288fadaf368e486480a948
|
[
"MIT"
] | null | null | null |
N, *A = map(int, open(0).read().split())
A.sort()
for i in range(N):
if i == A[i] - 1:
continue
print('No')
break
else:
print('Yes')
| 14.363636
| 40
| 0.487342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 9
| 0.056962
|
b7fc5371e78fe759e9cfc9ac2a197cc1a24c7ba9
| 1,114
|
py
|
Python
|
CPAC/cwas/tests/features/steps/base_cwas.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | 1
|
2021-08-02T23:23:39.000Z
|
2021-08-02T23:23:39.000Z
|
CPAC/cwas/tests/features/steps/base_cwas.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | null | null | null |
CPAC/cwas/tests/features/steps/base_cwas.py
|
Lawreros/C-PAC
|
ce26ba9a38cbd401cd405150eeed23b805007724
|
[
"BSD-3-Clause"
] | 2
|
2021-08-02T23:23:40.000Z
|
2022-02-26T12:39:30.000Z
|
from behave import *
from hamcrest import assert_that, is_not, greater_than
import numpy as np
import nibabel as nib
import rpy2.robjects as robjects
from rpy2.robjects.numpy2ri import numpy2ri
from rpy2.robjects.packages import importr
robjects.conversion.py2ri = numpy2ri
from os import path as op
import sys
curfile = op.abspath(__file__)
testpath = op.dirname(op.dirname(op.dirname(curfile)))
rpath = op.join(testpath, "R")
pypath = op.dirname(testpath)
sys.path.append(pypath)
from cwas import *
from utils import *
def custom_corrcoef(X, Y=None):
"""Each of the columns in X will be correlated with each of the columns in
Y. Each column represents a variable, with the rows containing the observations."""
if Y is None:
Y = X
if X.shape[0] != Y.shape[0]:
raise Exception("X and Y must have the same number of rows.")
X = X.astype(float)
Y = Y.astype(float)
X -= X.mean(axis=0)[np.newaxis,...]
Y -= Y.mean(axis=0)
xx = np.sum(X**2, axis=0)
yy = np.sum(Y**2, axis=0)
r = np.dot(X.T, Y)/np.sqrt(np.multiply.outer(xx,yy))
return r
| 25.906977
| 87
| 0.684022
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 210
| 0.18851
|
b7fcfd8dcf5ce827a8535f6ece099e74d61fb49d
| 15,109
|
py
|
Python
|
Analysis/CardioVascularLab/ExVivo/exvivo.py
|
sassystacks/TissueMechanicsLab
|
0f881a57ebf7cbadfeb2041daabd4e4b79b25b91
|
[
"MIT"
] | null | null | null |
Analysis/CardioVascularLab/ExVivo/exvivo.py
|
sassystacks/TissueMechanicsLab
|
0f881a57ebf7cbadfeb2041daabd4e4b79b25b91
|
[
"MIT"
] | null | null | null |
Analysis/CardioVascularLab/ExVivo/exvivo.py
|
sassystacks/TissueMechanicsLab
|
0f881a57ebf7cbadfeb2041daabd4e4b79b25b91
|
[
"MIT"
] | null | null | null |
import sys
sys.path.append('..')
from Analyzer.TransitionProperties import ProcessTransitionProperties
from tkinter import *
from tkinter import messagebox, ttk, filedialog
# from tkFileDialog import *
import uniaxanalysis.getproperties as getprops
from uniaxanalysis.plotdata import DataPlotter
from uniaxanalysis.saveproperties import write_props_csv
from exvivoframes import *
from matplotlib import pyplot as plt
import time
'''
The GUI for uniax data analysis of soft tissue.
inputs:
- Dimensions file - a file with format: sample name, width, thickness and initial distance
- directory - Folder with raw uniax data files in csv format with format: time, distance, force
To Do:
- polymorphic method for handling input data (variable names to get) <done>
- control when line for manual control shows up <done>
- test rdp for finding linear region - done (check implementation)
- fix point picking on plot so that can work in desceding order of x value - <done>
- tick boxes for properties <done>
- config file
- scroll bar for large data sets <done>
Bugs:
- work out bug in the 2nd order gaussian - done
- work out bug in the display for automatic linear find
- destroy instance of toolbar on graph create
- destroy instance of plot everytime
'''
class StartPage:
def __init__(self, master):
# print "Start Page class started"
# Some properties that Rubab and Mohammaded complained soooooooooo much
# to get..... jesus Muba
self.straintype = 'engineering' # can change to engineering, and lamda
self.stresstype = 'cauchy' # can change between cauchy and piola
self.master = master
self.buttonsdict = {}
self.fig = plt.figure(1)
self.transitionProps = ProcessTransitionProperties(eps=0.025)
self.plotter = DataPlotter()
# For Data Extraction
self.specimenHeaders = ["Sample", "Zone", "Region", "Specimen", "Direction"]
self.dimensionHeaders = ["Width","Thickness","Length"]
self.headersOut = ["Sample", "Zone", "Region", "Specimen", "Direction", "PointID","Strength","Stiffness"]
# this is the format of file so
# self.fileform = ["Sample", "_", "Zone", "Region","Specimen", "Direction"] #AAA data
self.fileform = ["Sample", "_","Z", "Zone", "Region","Specimen", "_","Direction"] #NIH BAV data
self.fname = '/Volumes/Biomechanics_LabShare/Abdominal\ Aortic\ Aneurysms\ Ex-vivo\ testing/Mechanical\ Testing/Uniaxial/2016-Jun10/AAA_Dimensions_2016-Jun10.csv'
self.dirname = '/Volumes/Biomechanics_LabShare/Abdominal\ Aortic\ Aneurysms\ Ex-vivo\ testing/Mechanical\ Testing/Uniaxial/2016-Jun10/FAIL'
# test things
self.fnameOut = 'TestOutputs.csv'
'''
#~~~~~~~~~~~~~~~~~~~~~~~~~ Main Layout ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
border = 3
self.frame1 = Frame(self.master, borderwidth=border, relief='raised')
self.frame1.grid(row=0, column=0, sticky='news')
self.frame2 = Frame(self.master, borderwidth=border, relief='raised')
self.frame2.grid(row=1, column=0, sticky='news', ipady=20)
self.frame3 = Frame(self.master, borderwidth=border, relief='raised')
self.frame3.grid(row=2, column=0, sticky='ew', ipady=20)
self.frame4 = Frame(self.master, borderwidth=border, relief='raised')
self.frame4.grid(row=1, column=1, sticky='ew', ipady=20)
self.frame5 = Frame(self.master, borderwidth=border, relief='raised')
self.frame5.grid(row=0, column=1, sticky='nsew', ipady=20)
self.t_frame6 = Frame(self.master, width=200,height=150, relief='raised')
self.frame6 = Frame6.Frame_6(self.t_frame6)
self.t_frame6.grid(row=0, column=2,sticky='news')
self.t_frame7 = Frame(self.master, borderwidth=border, relief='raised')
self.frame7 = Frame7.Frame_7(self.t_frame7,self.plotter)
self.t_frame7.grid(row=1, column=2,sticky='ns', ipady=20)
self.t_frame8 = Frame(self.master, borderwidth=border, relief='raised')
self.frame8 = Frame8.Frame_8(self.t_frame8, self.transitionProps)
self.t_frame8.grid(row=2, column=2,sticky='ns', ipady=20)
'''
~~~~~~~~~~~~~~~~~~~~~~ Frame 1 Widgets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
label = Label(self.frame1, text="Start Page")
label.grid(row=0, column=0)
button1 = Button(self.frame1, text="Dimensions File",
command=self.chooseDims)
button1.grid(row=1, column=0)
button2 = Button(self.frame1, text="Top Directory",
command=self.chooseDir)
button2.grid(row=2, column=0)
button3 = Button(self.frame1, text="Run SetupData",
command=self.setupData)
button3.grid(row=3, column=0)
'''
~~~~~~~~~~~~~~~~~~~~~~ Frame 2 Widgets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
# self.frame2.grid_rowconfigure(0, weight=1)
# self.frame2.grid_columnconfigure(0, weight=1)
# self.frame2.grid_propagate(False)
self.buttonCanvas = Canvas(self.frame2)
self.xButtonScroller = Scrollbar(self.frame2,orient="horizontal",
command=self.buttonCanvas.xview)
self.yButtonScroller = Scrollbar(self.frame2,
command=self.buttonCanvas.yview)
self.buttonFrame = Frame(self.buttonCanvas)
self.buttonCanvas.create_window((4,10), window=self.buttonFrame, anchor="nw",
tags="self.frame")
self.buttonFrame.bind("<Configure>", self.onFrameConfigure)
self.buttonCanvas.config(yscrollcommand=self.yButtonScroller.set)
self.buttonCanvas.config(xscrollcommand=self.xButtonScroller.set)
self.buttonCanvas.grid(row=0,column=0,sticky='nwse')
self.yButtonScroller.grid(row=0,column=1,sticky='ns')
self.xButtonScroller.grid(row=1,column=0,sticky='ew')
'''
~~~~~~~~~~~~~~~~~~~~~~ Frame 3 Widgets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
button4 = Button(self.frame3, text="Good", bg='green',
command=self.write_analysis)
button4.grid(row=0, column=0, sticky='w')
changeLabel = Label(self.frame3, text="Properties to Change")
changeLabel.grid(row=0, column=1)
button5 = Button(self.frame3, text="Ultimate Stress",
command=self.get_uts)
button5.grid(row=1, column=1)
button5 = Button(self.frame3, text="Linear Stiffness",
command=self.get_linear)
button5.grid(row=2, column=1)
'''
~~~~~~~~~~~~~~~~~~~~~~ Frame 4 Widgets ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
canvas = self.plotter.plot_graph(self.frame4, self.frame5, Row=0, Col=0)
'''
~~~~~~~~~~~~~~~ key Bindings ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
self.master.bind('<Escape>', lambda e: self.master.destroy())
self.master.bind('<Return>', self.frame8._UpdateEpsilonCallback())
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~ Frame 1 functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
def chooseDims(self):
self.fname = filedialog.askopenfilename()
def chooseDir(self):
self.dirname = filedialog.askdirectory()
def setupData(self):
# check if there is an filename for dimensions and Directory
# name for the corresponding raw data files
if self.fname and self.dirname:
import uniaxanalysis.parsecsv
# Dictionary to pass to parsecsv for obtaining data on specimen
args_dict = {
'dimsfile': self.fname,
'topdir': self.dirname,
'timestep': 0.05,
'headersOut': self.headersOut,
'specimenHeaders': self.specimenHeaders,
'dimsHeaders': self.dimensionHeaders,
'fileform': self.fileform,
}
# instantiate parsecsv class to get the data to plot and analyze
self.csvDataParser = uniaxanalysis.parsecsv(**args_dict)
# Create the list of specimens to be tested from Dimensions file
self.sampleList = self.csvDataParser.getMatchingData(
self.csvDataParser.dimsFile,
self.csvDataParser.topDir)
self.addButtons()
else:
print("please get a directory and a dimensions file for the analysis")
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~ Frame 2 functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
def addButtons(self):
# place a button for each sample in a panel
import math
# create button names from each sample in the list
buttonnames = [name[0] for name in self.sampleList]
# Make 3 columns of buttons
row = math.ceil(len(buttonnames)/3.0)
col = 3
padlist = [(i, j) for i in range(int(row)) for j in range(col)]
diff = len(padlist) - len(buttonnames)
if diff > 0:
padlist = padlist[:-diff]
# Create a rectangular list of objects to store all of the sample names as
# tk button objects
fullList = zip(buttonnames, padlist)
#
for name, indx in fullList:
self.buttonsdict[name] = Button(self.buttonFrame, text=name)
self.buttonsdict[name]['command'] = lambda sample = name: self.getGraph(sample)
self.buttonsdict[name].grid(row=indx[0], column=indx[1])
def onFrameConfigure(self, event):
'''Reset the scroll region to encompass the inner frame'''
self.buttonCanvas.configure(scrollregion=self.buttonCanvas.bbox("all"))
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~ Frame 3 functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
def get_uts(self):
# get the ultimate stress and strain at ultimate stress on the graph
utstr, uts = self.props.manual_max(self.props.strain, self.props.stress,
self.plotter.xs, self.plotter.ys)
self.plotter.set_max_point(utstr, uts)
def get_linear(self):
modulusElasticity, regionData = self.props.manual_linear(self.props.strain, self.props.stress,
self.plotter.xs, self.plotter.ys)
self.plotter.set_linear_region(regionData[0], regionData[1])
def write_analysis(self):
# import pdb;pdb.set_trace()
# This function writes the value to a csv and destroys the button object in the GUI
# Add stiffness to the list, if not append an empty string
if self.props.stiffness:
self.csvDataParser.outputDict[self.props.sample]['Stiffness'] \
= self.props.stiffness
else:
self.csvDataParser.outputDict[self.props.sample]['Stiffness'] \
= "NaN"
# Add strength to the list, if not append an empty string
if self.props.strength:
self.csvDataParser.outputDict[self.props.sample]['Strength'] \
= self.props.strength
else:
self.csvDataParser.outputDict[self.props.sample]['Strength'] \
= "NaN"
# Add all of the trasition props to the output
transitionProps = self.transitionProps._outputAllValues()
for prop, val in transitionProps.items():
self.csvDataParser.outputDict[self.props.sample][prop] = val
if prop not in self.headersOut:
self.headersOut.append(prop)
# print(self.csvDataParser.outputDict[self.props.sample])
# Write the properties to the csv file specified
write_props_csv(self.fnameOut, self.csvDataParser.outputDict,
self.props.sample, self.headersOut)
# destroy the button
self.buttonsdict[self.props.sample].destroy()
del self.props
# This is a hack and could be done better.... just need to get analysis done right now
# Destroy frame5 to get rid of the toolbar
self.frame5.destroy()
# Remake the frame to add another toolbar to
self.frame5 = Frame(self.master, borderwidth=5, relief='raised')
self.frame5.grid(row=0, column=1, sticky='nsew', ipady=20)
'''
~~~~~~~~~~~~~~~~~~~~~~~~~~ Frame 4 functions ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
'''
def getTransitionProperties(self):
'''
This sets all the transition properties for plotting
'''
import numpy as np
stress_strain = np.stack((self.props.strain[:self.props.failIndx],
self.props.stress[:self.props.failIndx]),
axis=-1)
stress_strain_norm = np.stack((self.props.strain_norm[:self.props.failIndx],
self.props.stress_norm[:self.props.failIndx]),
axis=-1)
self.transitionProps._setStressStrain(stress_strain,stress_strain_norm)
self.transitionProps._runTransitionProps()
propDict = self.transitionProps._outputAllValues()
propDict['MaxStrain_'] = self.props.strain[self.props.failIndx]
propDict['StartStrain'] = self.props.strain[0]
propDict['StartStress'] = self.props.stress[0]
propDict['HighStiffness'] = self.transitionProps.rdp[-2:, :]
print(propDict['HighStiffness'])
propDict['RDP'] = self.transitionProps.rdp
self.plotter.set_props(propDict)
def getGraph(self, samplename):
self.fig.clear()
# Iterate through sample list to find matching sample
for sample in self.sampleList:
if samplename == sample[0]:
# Get all of the properties for this sample
self.props = getprops(fileDimslist=sample, smooth_width=29,
std=7, chkderivate=0.04,
stresstype=self.stresstype,
straintype=self.straintype)
self.getTransitionProperties()
# create an instance of DataPlotter class and pass instance of
# getproperties
self.plotter.setClass(self.props)
self.plotter.setSample(sample[0])
self.frame7._SetCheckState()
break
else:
print("Couldn't find the file")
canvas = self.plotter.plot_graph(self.frame4, self.frame5, Row=0, Col=0)
def main():
root = Tk()
mainApp = StartPage(root)
root.attributes('-fullscreen', True)
# root.geometry("500x500")
root.mainloop()
if __name__ == '__main__':
main()
| 38.347716
| 170
| 0.581243
| 13,601
| 0.900192
| 0
| 0
| 0
| 0
| 0
| 0
| 4,964
| 0.328546
|
b7fdfc063cfae7dcf94caa90899dd03c0a4da68d
| 8,028
|
py
|
Python
|
cats/cats.py
|
BrandtH22/CAT-admin-tool
|
f58f76e5b3af5484089652616c17c669c4adebb7
|
[
"Apache-2.0"
] | 1
|
2022-03-22T21:59:15.000Z
|
2022-03-22T21:59:15.000Z
|
cats/cats.py
|
BrandtH22/CAT-admin-tool
|
f58f76e5b3af5484089652616c17c669c4adebb7
|
[
"Apache-2.0"
] | null | null | null |
cats/cats.py
|
BrandtH22/CAT-admin-tool
|
f58f76e5b3af5484089652616c17c669c4adebb7
|
[
"Apache-2.0"
] | null | null | null |
import click
import aiohttp
import asyncio
import re
import json
from typing import Optional, Tuple, Iterable, Union, List
from blspy import G2Element, AugSchemeMPL
from chia.cmds.wallet_funcs import get_wallet
from chia.rpc.wallet_rpc_client import WalletRpcClient
from chia.util.default_root import DEFAULT_ROOT_PATH
from chia.util.config import load_config
from chia.util.ints import uint16
from chia.util.byte_types import hexstr_to_bytes
from chia.types.blockchain_format.program import Program
from clvm_tools.clvmc import compile_clvm_text
from clvm_tools.binutils import assemble
from chia.types.spend_bundle import SpendBundle
from chia.wallet.cc_wallet.cc_utils import (
construct_cc_puzzle,
CC_MOD,
SpendableCC,
unsigned_spend_bundle_for_spendable_ccs,
)
from chia.util.bech32m import decode_puzzle_hash
# Loading the client requires the standard chia root directory configuration that all of the chia commands rely on
async def get_client() -> Optional[WalletRpcClient]:
try:
config = load_config(DEFAULT_ROOT_PATH, "config.yaml")
self_hostname = config["self_hostname"]
full_node_rpc_port = config["wallet"]["rpc_port"]
full_node_client = await WalletRpcClient.create(
self_hostname, uint16(full_node_rpc_port), DEFAULT_ROOT_PATH, config
)
return full_node_client
except Exception as e:
if isinstance(e, aiohttp.ClientConnectorError):
print(
f"Connection error. Check if full node is running at {full_node_rpc_port}"
)
else:
print(f"Exception from 'harvester' {e}")
return None
async def get_signed_tx(fingerprint, ph, amt, fee):
try:
wallet_client: WalletRpcClient = await get_client()
wallet_client_f, _ = await get_wallet(wallet_client, fingerprint)
return await wallet_client.create_signed_transaction(
[{"puzzle_hash": ph, "amount": amt}], fee=fee
)
finally:
wallet_client.close()
await wallet_client.await_closed()
# The clvm loaders in this library automatically search for includable files in the directory './include'
def append_include(search_paths: Iterable[str]) -> List[str]:
if search_paths:
search_list = list(search_paths)
search_list.append("./include")
return search_list
else:
return ["./include"]
def parse_program(program: Union[str, Program], include: Iterable = []) -> Program:
if isinstance(program, Program):
return program
else:
if "(" in program: # If it's raw clvm
prog = Program.to(assemble(program))
elif "." not in program: # If it's a byte string
prog = Program.from_bytes(hexstr_to_bytes(program))
else: # If it's a file
with open(program, "r") as file:
filestring: str = file.read()
if "(" in filestring: # If it's not compiled
# TODO: This should probably be more robust
if re.compile(r"\(mod\s").search(filestring): # If it's Chialisp
prog = Program.to(
compile_clvm_text(filestring, append_include(include))
)
else: # If it's CLVM
prog = Program.to(assemble(filestring))
else: # If it's serialized CLVM
prog = Program.from_bytes(hexstr_to_bytes(filestring))
return prog
CONTEXT_SETTINGS = dict(help_option_names=["-h", "--help"])
@click.command()
@click.pass_context
@click.option(
"-l",
"--tail",
required=True,
help="The TAIL program to launch this CAT with",
)
@click.option(
"-c",
"--curry",
multiple=True,
help="An argument to curry into the TAIL",
)
@click.option(
"-s",
"--solution",
required=True,
default="()",
show_default=True,
help="The solution to the TAIL program",
)
@click.option(
"-t",
"--send-to",
required=True,
help="The address these CATs will appear at once they are issued",
)
@click.option(
"-a",
"--amount",
required=True,
type=int,
help="The amount to issue in mojos (regular XCH will be used to fund this)",
)
@click.option(
"-m",
"--fee",
required=True,
default=0,
show_default=True,
help="The XCH fee to use for this issuance",
)
@click.option(
"-f",
"--fingerprint",
type=int,
help="The wallet fingerprint to use as funds",
)
@click.option(
"-sig",
"--signature",
multiple=True,
help="A signature to aggregate with the transaction",
)
@click.option(
"-as",
"--spend",
multiple=True,
help="An additional spend to aggregate with the transaction",
)
@click.option(
"-b",
"--as-bytes",
is_flag=True,
help="Output the spend bundle as a sequence of bytes instead of JSON",
)
@click.option(
"-sc",
"--select-coin",
is_flag=True,
help="Stop the process once a coin from the wallet has been selected and return the coin",
)
def cli(
ctx: click.Context,
tail: str,
curry: Tuple[str],
solution: str,
send_to: str,
amount: int,
fee: int,
fingerprint: int,
signature: Tuple[str],
spend: Tuple[str],
as_bytes: bool,
select_coin: bool,
):
ctx.ensure_object(dict)
tail = parse_program(tail)
curried_args = [assemble(arg) for arg in curry]
solution = parse_program(solution)
address = decode_puzzle_hash(send_to)
aggregated_signature = G2Element()
for sig in signature:
aggregated_signature = AugSchemeMPL.aggregate(
[aggregated_signature, G2Element.from_bytes(hexstr_to_bytes(sig))]
)
aggregated_spend = SpendBundle([], G2Element())
for bundle in spend:
aggregated_spend = SpendBundle.aggregate(
[aggregated_spend, SpendBundle.from_bytes(hexstr_to_bytes(bundle))]
)
# Construct the TAIL
if len(curried_args) > 0:
curried_tail = tail.curry(*curried_args)
else:
curried_tail = tail
# Construct the intermediate puzzle
p2_puzzle = Program.to(
(1, [[51, 0, -113, curried_tail, solution], [51, address, amount, [address]]])
)
# Wrap the intermediate puzzle in a CAT wrapper
cat_puzzle = construct_cc_puzzle(CC_MOD, curried_tail.get_tree_hash(), p2_puzzle)
cat_ph = cat_puzzle.get_tree_hash()
# Get a signed transaction from the wallet
signed_tx = asyncio.get_event_loop().run_until_complete(
get_signed_tx(fingerprint, cat_ph, amount, fee)
)
eve_coin = list(
filter(lambda c: c.puzzle_hash == cat_ph, signed_tx.spend_bundle.additions())
)[0]
# This is where we exit if we're only looking for the selected coin
if select_coin:
primary_coin = list(
filter(lambda c: c.name() == eve_coin.parent_coin_info, signed_tx.spend_bundle.removals())
)[0]
print(json.dumps(primary_coin.to_json_dict(), sort_keys=True, indent=4))
print(f"Name: {primary_coin.name()}")
return
# Create the CAT spend
spendable_eve = SpendableCC(
eve_coin,
curried_tail.get_tree_hash(),
p2_puzzle,
Program.to([]),
limitations_solution=solution,
limitations_program_reveal=curried_tail,
)
eve_spend = unsigned_spend_bundle_for_spendable_ccs(CC_MOD, [spendable_eve])
# Aggregate everything together
final_bundle = SpendBundle.aggregate(
[
signed_tx.spend_bundle,
eve_spend,
aggregated_spend,
SpendBundle([], aggregated_signature),
]
)
if as_bytes:
final_bundle = bytes(final_bundle).hex()
else:
final_bundle = json.dumps(final_bundle.to_json_dict(), sort_keys=True, indent=4)
print(f"Asset ID: {curried_tail.get_tree_hash()}")
print(f"Spend Bundle: {final_bundle}")
def main():
cli()
if __name__ == "__main__":
main()
| 29.733333
| 114
| 0.646736
| 0
| 0
| 0
| 0
| 4,385
| 0.546213
| 1,114
| 0.138764
| 1,749
| 0.217862
|
b7ff6526e37679ba17f2e315aceade4303222790
| 1,997
|
py
|
Python
|
tagging/tag_net.py
|
zhuzhutingru123/Semantics-AssistedVideoCaptioning
|
28c7b3fa57964f734f0fb38ecb89c9e8e21e5aaf
|
[
"MIT"
] | 55
|
2019-09-23T12:21:47.000Z
|
2022-03-29T19:50:57.000Z
|
tagging/tag_net.py
|
zhuzhutingru123/Semantics-AssistedVideoCaptioning
|
28c7b3fa57964f734f0fb38ecb89c9e8e21e5aaf
|
[
"MIT"
] | 13
|
2019-10-02T05:10:03.000Z
|
2021-11-03T11:33:32.000Z
|
tagging/tag_net.py
|
WingsBrokenAngel/Semantics-AssistedVideoCaptioning
|
409ca8b5be336d8957f3345825c8815a3070af19
|
[
"MIT"
] | 15
|
2019-09-20T07:10:47.000Z
|
2022-03-11T09:05:18.000Z
|
# -*- coding: utf-8 -*-
# Author: Haoran Chen
# Date: 2019-4-28
import tensorflow as tf
from tensorflow import placeholder, glorot_normal_initializer, zeros_initializer
from tensorflow.nn import dropout
import numpy as np
n_z = 3584
n_y = 300
MSVD_PATH = None
MSRVTT_PATH = None
MSVD_GT_PATH = None
MSRVTT_GT_PATH = None
max_epochs = 1000
lr = 0.0002
batch_size = 128
keep_prob = 1.0
batch_size = 64
class TagNet():
def __init__(self):
self.graph = tf.Graph()
with self.graph.as_default():
self.y = placeholder(tf.float32, [None, n_y])
self.z = placeholder(tf.float32, [None, n_z])
self.keep_prob = placeholder(tf.float32, [])
self.Wy1 = tf.get_variable('Wy1', [n_z, 512], tf.float32, glorot_normal_initializer())
self.by1 = tf.get_variable('by1', [512], tf.float32, zeros_initializer())
self.Wy2 = tf.get_variable('Wy2', [512, 512], tf.float32, glorot_normal_initializer())
self.by2 = tf.get_variable('by2', [512], tf.float32, zeros_initializer())
self.Wy3 = tf.get_variable('Wy3', [512, n_y], tf.float32, glorot_normal_initializer())
self.by3 = tf.get_variable('by3', [n_y], tf.float32, zeros_initializer())
z = dropout(self.z, self.keep_prob)
h = tf.nn.relu(tf.matmul(z, self.Wy1) + self.by1)
h = dropout(h, self.keep_prob)
h = tf.nn.relu(tf.matmul(h, self.Wy2) + self.by2)
h = dropout(h, self.keep_prob)
self.pred = tf.sigmoid(tf.matmul(h, self.Wy3) + self.by3)
cost = -self.y * tf.log(self.pred + 1e-6) - (1. - self.y) * tf.log(1. - self.pred + 1e-6)
self.cost = tf.reduce_mean(tf.reduce_sum(cost, 1))
self.pred_mask = tf.cast(self.pred >= 0.5, tf.int32)
self.tmp = tf.cast(self.y, tf.int32)
self.acc_mask = tf.cast(tf.equal(self.tmp, self.pred_mask), tf.float32)
self.acc = tf.reduce_mean(self.acc_mask)
| 33.847458
| 101
| 0.613921
| 1,585
| 0.793691
| 0
| 0
| 0
| 0
| 0
| 0
| 91
| 0.045568
|
b7ffe90a656352b24d635be78e2f3b9924c3cd33
| 1,625
|
py
|
Python
|
example/keraslogistic/cloudmesh_ai/logistic_regression.py
|
cloudmesh-community/fa19-516-174
|
1b1aed0dcb4aa2fbe70de86a281c089a75f7aa72
|
[
"Apache-2.0"
] | null | null | null |
example/keraslogistic/cloudmesh_ai/logistic_regression.py
|
cloudmesh-community/fa19-516-174
|
1b1aed0dcb4aa2fbe70de86a281c089a75f7aa72
|
[
"Apache-2.0"
] | null | null | null |
example/keraslogistic/cloudmesh_ai/logistic_regression.py
|
cloudmesh-community/fa19-516-174
|
1b1aed0dcb4aa2fbe70de86a281c089a75f7aa72
|
[
"Apache-2.0"
] | null | null | null |
import pandas as pd
from cloudmesh import mongo
from flask import request
from flask_pymongo import PyMongo
from sklearn.feature_selection import SelectKBest, chi2
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from .file import upload
def fit(body):
# Put input file in dataframe
train = pd.read_csv(upload.trainset, index_col=0)
test = pd.read_csv(upload.testset, index_col=0)
ytrain = train['labels']
ytest = test['labels']
xtrain = train.drop(['labels'], axis=1, )
xtest = test.drop(['labels'], axis=1)
# fe = SelectKBest(chi2, k=15)
# xnew = fe.fit_transform(xtrain, ytrain.values.reshape(-1, ))
# xnew = pd.DataFrame(xnew)
# cols = xtrain.columns.values[fe.get_support()]
xtrain_final = xtrain[['feat_48', 'feat_64', 'feat_105', 'feat_136', 'feat_153',
'feat_241', 'feat_336', 'feat_338', 'feat_378', 'feat_411',
'feat_442', 'feat_453', 'feat_472', 'feat_475', 'feat_493']]
xtest_final = xtest[['feat_48', 'feat_64', 'feat_105', 'feat_136', 'feat_153',
'feat_241', 'feat_336', 'feat_338', 'feat_378', 'feat_411',
'feat_442', 'feat_453', 'feat_472', 'feat_475', 'feat_493']]
lg = LogisticRegression()
lg.fit(xtrain_final, ytrain.values.reshape(-1, ))
return lg, xtest_final, ytest
def predict(body, file=None):
ypred = fit.lg.predict(fit.xtest_final)
acc = accuracy_score(ypred, fit.ytest)
print("The test accuracy for this logistic regression model is", acc)
return
| 36.931818
| 87
| 0.647385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 593
| 0.364923
|
4d0003163267427736e0367162b90a4c31a4952a
| 18,450
|
py
|
Python
|
Scripts/plot_ObservationsPrediction_RawHiatus_OHClevels-lag-EDA_v2.py
|
zmlabe/predictGMSTrate
|
2bde4a106de1988d772f15a52d283d23bb7128f4
|
[
"MIT"
] | 2
|
2022-01-20T20:20:04.000Z
|
2022-02-21T12:33:37.000Z
|
Dark_Scripts/plot_ObservationsPrediction_RawHiatus_OHClevels-lag-EDA_v2.py
|
zmlabe/predictGMSTrate
|
2bde4a106de1988d772f15a52d283d23bb7128f4
|
[
"MIT"
] | null | null | null |
Dark_Scripts/plot_ObservationsPrediction_RawHiatus_OHClevels-lag-EDA_v2.py
|
zmlabe/predictGMSTrate
|
2bde4a106de1988d772f15a52d283d23bb7128f4
|
[
"MIT"
] | 3
|
2022-01-19T16:25:37.000Z
|
2022-03-22T13:25:00.000Z
|
"""
Explore raw composites based on indices from predicted testing data and
showing all the difference OHC levels for OBSERVATIONS
Author : Zachary M. Labe
Date : 21 September 2021
Version : 2 (mostly for testing)
"""
### Import packages
import sys
import matplotlib.pyplot as plt
import numpy as np
import calc_Utilities as UT
from mpl_toolkits.basemap import Basemap, addcyclic, shiftgrid
import palettable.cubehelix as cm
import cmocean as cmocean
import calc_dataFunctions as df
import calc_Stats as dSS
from netCDF4 import Dataset
### Plotting defaults
plt.rc('text',usetex=True)
plt.rc('font',**{'family':'sans-serif','sans-serif':['Avant Garde']})
###############################################################################
###############################################################################
###############################################################################
### Data preliminaries
modelGCMs = ['CESM2le']
dataset_obs = 'ERA5'
allDataLabels = modelGCMs
monthlychoiceq = ['annual']
variables = ['T2M']
vari_predict = ['SST','OHC100','OHC300','OHC700']
reg_name = 'SMILEGlobe'
level = 'surface'
###############################################################################
###############################################################################
randomalso = False
timeper = 'hiatus'
shuffletype = 'GAUSS'
###############################################################################
###############################################################################
land_only = False
ocean_only = False
###############################################################################
###############################################################################
baseline = np.arange(1951,1980+1,1)
###############################################################################
###############################################################################
window = 0
if window == 0:
rm_standard_dev = False
ravel_modelens = False
ravelmodeltime = False
else:
rm_standard_dev = True
ravelmodeltime = False
ravel_modelens = True
yearsall = np.arange(1979+window,2099+1,1)
yearsobs = np.arange(1979+window,2020+1,1)
###############################################################################
###############################################################################
numOfEns = 40
lentime = len(yearsall)
###############################################################################
###############################################################################
lat_bounds,lon_bounds = UT.regions(reg_name)
###############################################################################
###############################################################################
ravelyearsbinary = False
ravelbinary = False
lensalso = True
###############################################################################
###############################################################################
### Remove ensemble mean
rm_ensemble_mean = True
###############################################################################
###############################################################################
### Accuracy for composites
accurate = True
if accurate == True:
typemodel = 'correcthiatus_obs'
elif accurate == False:
typemodel = 'extrahiatus_obs'
elif accurate == 'WRONG':
typemodel = 'wronghiatus_obs'
elif accurate == 'HIATUS':
typemodel = 'allhiatus_obs'
###############################################################################
###############################################################################
### Call functions
trendlength = 10
AGWstart = 1990
years_newmodel = np.arange(AGWstart,yearsall[-1]-8,1)
years_newobs = np.arange(AGWstart,yearsobs[-1]-8,1)
vv = 0
mo = 0
variq = variables[vv]
monthlychoice = monthlychoiceq[mo]
directoryfigure = '/Users/zlabe/Desktop/GmstTrendPrediction/ANN_v2/Obs/'
saveData = monthlychoice + '_' + variq + '_' + reg_name + '_' + dataset_obs
print('*Filename == < %s >' % saveData)
###############################################################################
###############################################################################
### Function to read in predictor variables (SST/OHC)
def read_primary_dataset(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data,lats,lons = df.readFiles(variq,dataset,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
datar,lats,lons = df.getRegion(data,lats,lons,lat_bounds,lon_bounds)
print('\nOur dataset: ',dataset,' is shaped',data.shape)
return datar,lats,lons
def read_obs_dataset(variq,dataset_obs,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds):
data_obs,lats_obs,lons_obs = df.readFiles(variq,dataset_obs,monthlychoice,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,timeper)
data_obs,lats_obs,lons_obs = df.getRegion(data_obs,lats_obs,lons_obs,lat_bounds,lon_bounds)
print('our OBS dataset: ',dataset_obs,' is shaped',data_obs.shape)
return data_obs,lats_obs,lons_obs
###############################################################################
###############################################################################
### Loop through to read all the variables
ohcHIATUS = np.empty((len(vari_predict),92,144))
for vvv in range(len(vari_predict)):
### Function to read in predictor variables (SST/OHC)
models_var = []
for i in range(len(modelGCMs)):
if vari_predict[vvv][:3] == 'OHC':
obs_predict = 'OHC'
else:
obs_predict = 'ERA5'
obsq_var,lats,lons = read_obs_dataset(vari_predict[vvv],obs_predict,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds)
### Save predictor
models_var.append(obsq_var)
models_var = np.asarray(models_var).squeeze()
### Remove ensemble mean
if rm_ensemble_mean == True:
models_var = dSS.remove_trend_obs(models_var,'surface')
print('\n*Removed observational linear trend*')
### Standardize
models_varravel = models_var.squeeze().reshape(yearsobs.shape[0],lats.shape[0]*lons.shape[0])
meanvar = np.nanmean(models_varravel,axis=0)
stdvar = np.nanstd(models_varravel,axis=0)
modelsstd_varravel = (models_varravel-meanvar)/stdvar
models_var = modelsstd_varravel.reshape(yearsobs.shape[0],lats.shape[0],lons.shape[0])
### Slice for number of years
yearsq_m = np.where((yearsobs >= AGWstart))[0]
models_slice = models_var[yearsq_m,:,:]
if rm_ensemble_mean == False:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [20,20]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.05
actFun = 'relu'
fractWeight = 0.5
elif rm_ensemble_mean == True:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = 0.5
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
directorymodel = '/Users/zlabe/Documents/Research/GmstTrendPrediction/SavedModels/'
savename = 'ANNv2_'+'OHC100'+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
### Directories to save files
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/'
###############################################################################
###############################################################################
###############################################################################
### Read in data for testing predictions and actual hiatuses
actual_test = np.genfromtxt(directorydata + 'obsActualLabels_' + savename + '.txt')
predict_test = np.genfromtxt(directorydata + 'obsLabels_' + savename+ '.txt')
### Reshape arrays for [ensemble,year]
act_re = actual_test
pre_re = predict_test
### Slice ensembles for testing data
ohcready = models_slice[:,:,:].squeeze()
### Pick all hiatuses
if accurate == True: ### correct predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 1):
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == False: ### picks all hiatus predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if pre_re[yr] == 1:
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == 'WRONG': ### picks hiatus but is wrong
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 0):
ohc_allenscomp.append(ohcready[yr,:,:])
elif accurate == 'HIATUS': ### accurate climate change
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (act_re[yr] == 1):
ohc_allenscomp.append(ohcready[yr,:,:])
else:
print(ValueError('SOMETHING IS WRONG WITH ACCURACY COMPOSITES!'))
sys.exit()
### Composite across all years to get hiatuses
ohcHIATUS[vvv,:,:] = np.nanmean(np.asarray(ohc_allenscomp),axis=0)
###############################################################################
###############################################################################
### Loop through to read all the variables
lag1 = 3
lag2 = 7
lag = lag2-lag1
ohcHIATUSlag = np.empty((len(vari_predict),92,144))
for vvv in range(len(vari_predict)):
### Function to read in predictor variables (SST/OHC)
models_var = []
for i in range(len(modelGCMs)):
if vari_predict[vvv][:3] == 'OHC':
obs_predict = 'OHC'
else:
obs_predict = 'ERA5'
obsq_var,lats,lons = read_obs_dataset(vari_predict[vvv],obs_predict,numOfEns,lensalso,randomalso,ravelyearsbinary,ravelbinary,shuffletype,lat_bounds=lat_bounds,lon_bounds=lon_bounds)
### Save predictor
models_var.append(obsq_var)
models_var = np.asarray(models_var).squeeze()
### Remove ensemble mean
if rm_ensemble_mean == True:
models_var = dSS.remove_trend_obs(models_var,'surface')
print('\n*Removed observational linear trend*')
### Standardize
models_varravel = models_var.squeeze().reshape(yearsobs.shape[0],lats.shape[0]*lons.shape[0])
meanvar = np.nanmean(models_varravel,axis=0)
stdvar = np.nanstd(models_varravel,axis=0)
modelsstd_varravel = (models_varravel-meanvar)/stdvar
models_var = modelsstd_varravel.reshape(yearsobs.shape[0],lats.shape[0],lons.shape[0])
### Slice for number of years
yearsq_m = np.where((yearsobs >= AGWstart))[0]
models_slice = models_var[yearsq_m,:,:]
if rm_ensemble_mean == False:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [20,20]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.05
actFun = 'relu'
fractWeight = 0.5
elif rm_ensemble_mean == True:
variq = 'T2M'
fac = 0.7
random_segment_seed = int(np.genfromtxt('/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/SelectedSegmentSeed.txt',unpack=True))
random_network_seed = 87750
hidden = [30,30]
n_epochs = 500
batch_size = 128
lr_here = 0.001
ridgePenalty = 0.5
actFun = 'relu'
fractWeight = 0.5
else:
print(ValueError('SOMETHING IS WRONG WITH DATA PROCESSING!'))
sys.exit()
### Naming conventions for files
directorymodel = '/Users/zlabe/Documents/Research/GmstTrendPrediction/SavedModels/'
savename = 'ANNv2_'+'OHC100'+'_hiatus_' + actFun + '_L2_'+ str(ridgePenalty)+ '_LR_' + str(lr_here)+ '_Batch'+ str(batch_size)+ '_Iters' + str(n_epochs) + '_' + str(len(hidden)) + 'x' + str(hidden[0]) + '_SegSeed' + str(random_segment_seed) + '_NetSeed'+ str(random_network_seed)
if(rm_ensemble_mean==True):
savename = savename + '_EnsembleMeanRemoved'
### Directories to save files
directorydata = '/Users/zlabe/Documents/Research/GmstTrendPrediction/Data/'
###############################################################################
###############################################################################
###############################################################################
### Read in data for testing predictions and actual hiatuses
actual_test = np.genfromtxt(directorydata + 'obsActualLabels_' + savename + '.txt')
predict_test = np.genfromtxt(directorydata + 'obsLabels_' + savename+ '.txt')
### Reshape arrays for [ensemble,year]
act_re = actual_test
pre_re = predict_test
### Slice ensembles for testing data
ohcready = models_slice[:,:,:].squeeze()
### Pick all hiatuses
if accurate == True: ### correct predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 1):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == False: ### picks all hiatus predictions
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if pre_re[yr] == 1:
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == 'WRONG': ### picks hiatus but is wrong
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (pre_re[yr]) == 1 and (act_re[yr] == 0):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
elif accurate == 'HIATUS': ### accurate climate change
ohc_allenscomp = []
for yr in range(ohcready.shape[0]):
if (act_re[yr] == 1):
ohc_allenscomp.append(np.nanmean(ohcready[yr+lag1:yr+lag2,:,:],axis=0))
else:
print(ValueError('SOMETHING IS WRONG WITH ACCURACY COMPOSITES!'))
sys.exit()
### Composite across all years to get hiatuses
ohcHIATUSlag[vvv,:,:] = np.nanmean(np.asarray(ohc_allenscomp),axis=0)
### Composite all for plotting
ohc_allcomp = np.append(ohcHIATUS,ohcHIATUSlag,axis=0)
###############################################################################
###############################################################################
### Plot subplot of obser+++++++++++++++vations
letters = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n"]
plotloc = [1,3,5,7,2,4,6,8]
if rm_ensemble_mean == False:
limit = np.arange(-1.5,1.51,0.02)
barlim = np.round(np.arange(-1.5,1.6,0.5),2)
elif rm_ensemble_mean == True:
limit = np.arange(-1.5,1.6,0.02)
barlim = np.round(np.arange(-1.5,1.6,0.5),2)
cmap = cmocean.cm.balance
label = r'\textbf{[ HIATUS COMPOSITE ]}'
fig = plt.figure(figsize=(8,10))
###############################################################################
for ppp in range(ohc_allcomp.shape[0]):
ax1 = plt.subplot(ohc_allcomp.shape[0]//2,2,plotloc[ppp])
m = Basemap(projection='robin',lon_0=-180,resolution='l',area_thresh=10000)
m.drawcoastlines(color='darkgrey',linewidth=0.27)
### Variable
varn = ohc_allcomp[ppp]
if ppp == 0:
lons = np.where(lons >180,lons-360,lons)
x, y = np.meshgrid(lons,lats)
circle = m.drawmapboundary(fill_color='dimgrey',color='dimgray',
linewidth=0.7)
circle.set_clip_on(False)
cs1 = m.contourf(x,y,varn,limit,extend='both',latlon=True)
cs1.set_cmap(cmap)
m.fillcontinents(color='dimgrey',lake_color='dimgrey')
ax1.annotate(r'\textbf{[%s]}' % letters[ppp],xy=(0,0),xytext=(0.95,0.93),
textcoords='axes fraction',color='k',fontsize=10,
rotation=0,ha='center',va='center')
if ppp < 4:
ax1.annotate(r'\textbf{%s}' % vari_predict[ppp],xy=(0,0),xytext=(-0.08,0.5),
textcoords='axes fraction',color='dimgrey',fontsize=20,
rotation=90,ha='center',va='center')
if ppp == 0:
plt.title(r'\textbf{Onset}',fontsize=15,color='k')
if ppp == 4:
plt.title(r'\textbf{%s-Year Composite}' % lag,fontsize=15,color='k')
###############################################################################
cbar_ax1 = fig.add_axes([0.38,0.05,0.3,0.02])
cbar1 = fig.colorbar(cs1,cax=cbar_ax1,orientation='horizontal',
extend='both',extendfrac=0.07,drawedges=False)
cbar1.set_label(label,fontsize=6,color='dimgrey',labelpad=1.4)
cbar1.set_ticks(barlim)
cbar1.set_ticklabels(list(map(str,barlim)))
cbar1.ax.tick_params(axis='x', size=.01,labelsize=4)
cbar1.outline.set_edgecolor('dimgrey')
plt.tight_layout()
plt.subplots_adjust(bottom=0.08,wspace=0.01)
if rm_ensemble_mean == True:
plt.savefig(directoryfigure + 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s_rmENSEMBLEmean.png' % (lag,accurate,accurate),dpi=300)
else:
plt.savefig(directoryfigure + 'RawCompositesHiatus_OBSERVATIONS_OHClevels-lag%s_v2_AccH-%s_AccR-%s.png' % (lag,accurate,accurate),dpi=300)
| 44.244604
| 284
| 0.547805
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 6,811
| 0.36916
|
4d0095e3df86b0354c6a7f3fe8432d1caf5ff121
| 3,807
|
py
|
Python
|
osnexus_flocker_driver/osnexusdriver.py
|
OSNEXUS/flocker-driver
|
22a6ecf57c6841359df82657659f8e945b206f1b
|
[
"Apache-2.0"
] | 2
|
2016-04-29T22:38:05.000Z
|
2016-04-29T22:39:06.000Z
|
osnexus_flocker_driver/osnexusdriver.py
|
OSNEXUS/flocker-driver
|
22a6ecf57c6841359df82657659f8e945b206f1b
|
[
"Apache-2.0"
] | null | null | null |
osnexus_flocker_driver/osnexusdriver.py
|
OSNEXUS/flocker-driver
|
22a6ecf57c6841359df82657659f8e945b206f1b
|
[
"Apache-2.0"
] | 2
|
2016-05-08T07:39:12.000Z
|
2019-07-05T18:35:12.000Z
|
# Copyright 2016 OSNEXUS Corporation
"""
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import socket
from zope.interface import implementer
from flocker.node.agents.blockdevice import (
AlreadyAttachedVolume, IBlockDeviceAPI, IProfiledBlockDeviceAPI,
BlockDeviceVolume, UnknownVolume, UnattachedVolume
)
from osnexusutil import osnexusAPI
import logging
from eliot import Message, Logger
#_logger = Logger()
@implementer(IProfiledBlockDeviceAPI)
@implementer(IBlockDeviceAPI)
class OsnexusBlockDeviceAPI(object):
defaultVolumeBlkSize_ = 4096
defaultCreatedBy_ = "osnexus_flocker_driver"
defaultExportedBlkSize_ = 4096
def __init__(self, ipaddress, username, password, gold_tier, silver_tier, bronze_tier, default_pool):
"""
:returns: A ``BlockDeviceVolume``.
"""
logging.basicConfig(filename='/var/log/flocker/qs_flocker.log', format='%(asctime)s : %(message)s', level=logging.ERROR)
self._logger = logging.getLogger("QuantastorLogger")
self._instance_id = self.compute_instance_id()
self._osnexusApi = osnexusAPI(ipaddress, username, password, gold_tier, silver_tier, bronze_tier, default_pool, self._logger)
def compute_instance_id(self):
"""
Return current node's hostname
"""
#socket.getfqdn - Return a fully qualified domain name for name. If name is omitted or empty, it is interpreted
#as the local host. In case no fully qualified domain name is available, the hostname as returned by
# gethostname() is returned.
#socket.gethostbyname - Translate a host name to IPv4 address format.
return unicode(socket.gethostbyname(socket.getfqdn()))
def allocation_unit(self):
"""
return int: 1 GB
"""
return 1024*1024*1024
def _cleanup(self):
"""
Remove all volumes
"""
volumes = self.list_volumes()
for volume in volumes:
self._logger.debug("Deleting volume '%s'", volume.blockdevice_id)
self.destroy_volume(volume.blockdevice_id)
def list_volumes(self):
"""
List all the block devices available via the back end API.
:returns: A ``list`` of ``BlockDeviceVolume``s.
"""
return self._osnexusApi.listOsnexusVolumes()
def create_volume(self, dataset_id, size):
return self._osnexusApi.createOsnexusVolume(dataset_id, size)
def create_volume_with_profile(self, dataset_id, size, profile_name):
return self._osnexusApi.createOsnexusVolumeWithProfile(dataset_id, size, profile_name.lower())
def destroy_volume(self, blockdevice_id):
return self._osnexusApi.deleteOsnexusVolume(blockdevice_id)
def attach_volume(self, blockdevice_id, attach_to):
return self._osnexusApi.attachOsnexusVolume(blockdevice_id, attach_to)
def detach_volume(self, blockdevice_id):
return self._osnexusApi.detachOsnexusvolume(blockdevice_id)
def get_device_path(self, blockdevice_id):
return self._osnexusApi.getOsNexusDevicePath(blockdevice_id)
def GetOsnexusStorageApi(ipaddress, username, password, gold_tier, silver_tier, bronze_tier, default_pool ):
return OsnexusBlockDeviceAPI(ipaddress, username, password, gold_tier, silver_tier, bronze_tier, default_pool)
| 36.961165
| 133
| 0.727607
| 2,614
| 0.68663
| 0
| 0
| 2,682
| 0.704492
| 0
| 0
| 1,345
| 0.353297
|
4d009e96e973b11eba741f0ee1dbc7d7ed84b7ed
| 2,629
|
py
|
Python
|
rescan-script.py
|
fivepiece/electrum-personal-server
|
dae6eb3954f3916e13aa88969a5b6ac65a488a13
|
[
"MIT"
] | null | null | null |
rescan-script.py
|
fivepiece/electrum-personal-server
|
dae6eb3954f3916e13aa88969a5b6ac65a488a13
|
[
"MIT"
] | null | null | null |
rescan-script.py
|
fivepiece/electrum-personal-server
|
dae6eb3954f3916e13aa88969a5b6ac65a488a13
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python3
from configparser import ConfigParser, NoSectionError, NoOptionError
from electrumpersonalserver.jsonrpc import JsonRpc, JsonRpcError
from datetime import datetime
import server
def search_for_block_height_of_date(datestr, rpc):
target_time = datetime.strptime(datestr, "%d/%m/%Y")
bestblockhash = rpc.call("getbestblockhash", [])
best_head = rpc.call("getblockheader", [bestblockhash])
if target_time > datetime.fromtimestamp(best_head["time"]):
print("ERROR: date in the future")
return -1
genesis_block = rpc.call("getblockheader", [rpc.call("getblockhash", [0])])
if target_time < datetime.fromtimestamp(genesis_block["time"]):
print("WARNING: date is before the creation of bitcoin")
return 0
first_height = 0
last_height = best_head["height"]
while True:
m = (first_height + last_height) // 2
m_header = rpc.call("getblockheader", [rpc.call("getblockhash", [m])])
m_header_time = datetime.fromtimestamp(m_header["time"])
m_time_diff = (m_header_time - target_time).total_seconds()
if abs(m_time_diff) < 60*60*2: #2 hours
return m_header["height"]
elif m_time_diff < 0:
first_height = m
elif m_time_diff > 0:
last_height = m
else:
return -1
def main():
try:
config = ConfigParser()
config.read(["config.cfg"])
config.options("master-public-keys")
except NoSectionError:
print("Non-existant configuration file `config.cfg`")
return
try:
rpc_u = config.get("bitcoin-rpc", "rpc_user")
rpc_p = config.get("bitcoin-rpc", "rpc_password")
except NoOptionError:
rpc_u, rpc_p = server.obtain_rpc_username_password(config.get(
"bitcoin-rpc", "datadir"))
if rpc_u == None:
return
rpc = JsonRpc(host = config.get("bitcoin-rpc", "host"),
port = int(config.get("bitcoin-rpc", "port")),
user = rpc_u, password = rpc_p,
wallet_filename=config.get("bitcoin-rpc", "wallet_filename").strip())
user_input = input("Enter earliest wallet creation date (DD/MM/YYYY) "
"or block height to rescan from: ")
try:
height = int(user_input)
except ValueError:
height = search_for_block_height_of_date(user_input, rpc)
if height == -1:
return
height -= 2016 #go back two weeks for safety
if input("Rescan from block height " + str(height) + " ? (y/n):") != 'y':
return
rpc.call("rescanblockchain", [height])
print("end")
main()
| 36.013699
| 79
| 0.63218
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 637
| 0.242297
|
4d01262d0ab1840560717880a8567c3e85b8f930
| 1,082
|
py
|
Python
|
tests/application/register/test_views.py
|
AlexKouzy/ethnicity-facts-and-figures-publisher
|
18ab2495a8633f585e18e607c7f75daa564a053d
|
[
"MIT"
] | 1
|
2021-10-06T13:48:36.000Z
|
2021-10-06T13:48:36.000Z
|
tests/application/register/test_views.py
|
AlexKouzy/ethnicity-facts-and-figures-publisher
|
18ab2495a8633f585e18e607c7f75daa564a053d
|
[
"MIT"
] | 116
|
2018-11-02T17:20:47.000Z
|
2022-02-09T11:06:22.000Z
|
tests/application/register/test_views.py
|
racedisparityaudit/rd_cms
|
a12f0e3f5461cc41eed0077ed02e11efafc5dd76
|
[
"MIT"
] | 2
|
2018-11-09T16:47:35.000Z
|
2020-04-09T13:06:48.000Z
|
from bs4 import BeautifulSoup
from flask import url_for
from application.utils import generate_token
from application.auth.models import TypeOfUser
from tests.models import UserFactory
def test_confirm_account_rejects_easy_password(app, test_app_client):
rdu_user = UserFactory(user_type=TypeOfUser.RDU_USER, active=False)
token = generate_token(rdu_user.email, app)
confirmation_url = url_for("register.confirm_account", token=token, _external=True)
rdu_user.active = False
user_details = {"password": "long-enough-but-too-easy", "confirm_password": "long-enough-but-too-easy"}
resp = test_app_client.post(confirmation_url, data=user_details, follow_redirects=True)
page = BeautifulSoup(resp.data.decode("utf-8"), "html.parser")
assert (
page.find("div", class_="eff-flash-message__body").text.strip()
== """Your password is too weak. It has to be at least 10 characters long and use a mix of numbers, special
characters as well as upper and lowercase letters. Avoid using common patterns and repeated characters."""
)
| 38.642857
| 115
| 0.756932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 368
| 0.340111
|
4d014fe4ec193e53774cf70e289d81ecdf7c7e43
| 1,205
|
py
|
Python
|
setup.py
|
OriHoch/ckan-cloud-operator
|
125c3eb10f843ac62fc85659e756bd1d9620eae7
|
[
"MIT"
] | null | null | null |
setup.py
|
OriHoch/ckan-cloud-operator
|
125c3eb10f843ac62fc85659e756bd1d9620eae7
|
[
"MIT"
] | null | null | null |
setup.py
|
OriHoch/ckan-cloud-operator
|
125c3eb10f843ac62fc85659e756bd1d9620eae7
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
from os import path
from time import time
here = path.abspath(path.dirname(__file__))
if path.exists("VERSION.txt"):
# this file can be written by CI tools (e.g. Travis)
with open("VERSION.txt") as version_file:
version = version_file.read().strip().strip("v")
else:
version = str(time())
setup(
name='ckan_cloud_operator',
version=version,
description='''CKAN Cloud Kubernetes operator''',
url='https://github.com/datopian/ckan-cloud-operator',
author='''Viderum''',
license='MIT',
packages=find_packages(exclude=['examples', 'tests', '.tox']),
install_requires=[
'httpagentparser',
'boto3',
'coverage',
'psycopg2',
# 'pyyaml<5.2,>=3.10',
'kubernetes',
'click',
'toml',
# 'dataflows>=0.0.37',
# 'dataflows-shell>=0.0.8',
# 'jupyterlab',
'awscli',
'urllib3<1.25',
'ruamel.yaml<1',
'requests==2.21',
# 'python-dateutil<2.8.1',
'botocore',
],
entry_points={
'console_scripts': [
'ckan-cloud-operator = ckan_cloud_operator.cli:main',
]
},
)
| 25.638298
| 66
| 0.575104
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 541
| 0.448963
|
4d01db8b99d5d581962d295f65f32a07a2a32b59
| 652
|
py
|
Python
|
extension/magic/activate.py
|
ianpreston/oh-my-py
|
17e37974c203cb28aa2de340c6ac66143c16bd4e
|
[
"Unlicense",
"MIT"
] | 3
|
2016-04-10T20:08:57.000Z
|
2021-12-05T19:03:37.000Z
|
extension/magic/activate.py
|
ianpreston/oh-my-py
|
17e37974c203cb28aa2de340c6ac66143c16bd4e
|
[
"Unlicense",
"MIT"
] | null | null | null |
extension/magic/activate.py
|
ianpreston/oh-my-py
|
17e37974c203cb28aa2de340c6ac66143c16bd4e
|
[
"Unlicense",
"MIT"
] | null | null | null |
import os
import os.path
def activate(ipython, venv):
"""
Shortcut to run execfile() on `venv`/bin/activate_this.py
"""
venv = os.path.abspath(venv)
venv_activate = os.path.join(venv, 'bin', 'activate_this.py')
if not os.path.exists(venv_activate):
print('Not a virtualenv: {}'.format(venv))
return
# activate_this.py doesn't set VIRTUAL_ENV, so we must set it here
os.environ['VIRTUAL_ENV'] = venv
os.putenv('VIRTUAL_ENV', venv)
execfile(venv_activate, {'__file__': venv_activate})
print('Activated: {}'.format(venv))
def load(ipython):
ipython.define_magic('activate', activate)
| 25.076923
| 70
| 0.662577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 245
| 0.375767
|
4d034751cf7a5ae250a1f9a85e64ff78986aa837
| 4,201
|
py
|
Python
|
storage/__init__.py
|
daqbroker/daqbrokerServer
|
e8d2b72b4e3ab12c26dfa7b52e9d77097ede3f33
|
[
"MIT"
] | null | null | null |
storage/__init__.py
|
daqbroker/daqbrokerServer
|
e8d2b72b4e3ab12c26dfa7b52e9d77097ede3f33
|
[
"MIT"
] | null | null | null |
storage/__init__.py
|
daqbroker/daqbrokerServer
|
e8d2b72b4e3ab12c26dfa7b52e9d77097ede3f33
|
[
"MIT"
] | null | null | null |
import base64
import os
import threading
from pathlib import Path
#from sqlitedict import SqliteDict
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, scoped_session
from daqbrokerServer.web.utils import hash_password
from daqbrokerServer.storage.server_schema import ServerBase, User, Connection
from daqbrokerServer.storage.contextual_session import session_open
# ###### THIS CREATES THE LOCAL STRUCTURE NECESSARY TO HOLD LOCAL DATABASES #######
# if not os.path.isdir(db_folder):
# os.mkdir(db_folder)
# # Initialise the local settings database
# local_url = "sqlite+pysqlite:///" + str(db_folder / "settings.sqlite")
# local_engine = create_engine(local_url)
# #################################################################################
# # This should create the mappings necessary on the local database
# Base.metadata.reflect(local_engine, extend_existing= True, autoload_replace= False)
# Base.metadata.create_all(local_engine, checkfirst= True)
# #This starts a session - probably not ideal, should consider using scoped session
# #LocalSession = scoped_session(sessionmaker(bind=local_engine))
# Session = sessionmaker(bind=local_engine)
# session = Session()
# Experimenting a class that will handle the folder definition of the session for the server class
class LocalSession:
def __init__(self, db_folder= None, empty_connections= False):
self.db_folder = None if db_folder == None else Path(db_folder)
self.url = "sqlite+pysqlite:///" + str( ( self.db_folder if self.db_folder else Path(__file__).parent / "databases" ) / "settings.sqlite")
self.engine = create_engine(self.url)
self.session = scoped_session(sessionmaker(bind=self.engine))
ServerBase.metadata.reflect(self.engine, extend_existing= True, autoload_replace= False)
ServerBase.metadata.create_all(self.engine, checkfirst= True)
Connection.set_db_folder(self.db_folder)
self.setup(empty_connections)
def setup(self, empty_connections= False):
test_session = self.session()
######## THIS IS VERY DANGEROUS - IT SHOULD BE A PROMPT CREATED WHEN INSTALLING THE LIBRARY
query = test_session.query(User).filter(User.id == 0)
if not query.count() > 0:
pwd = "admin"
password = hash_password(pwd)
user = User(id= 0, type= 3, email= "mail", username= "admin", password= password)
if not query.count() > 0:
test_session.add(user)
##########################################################################################
if not empty_connections:
##### THIS SHOULD LOOK FOR RECORDS OF LOCAL DATABASE, CREATES IF IT DOES NOT EXIST #######
query2 = test_session.query(Connection).filter(Connection.id == 0)
if not query2.count() > 0:
connection = Connection(id= 0, type= "sqlite+pysqlite", hostname= "local", username= "admin", password= base64.b64encode(b"admin"), port=0)
if not query2.count() > 0:
test_session.add(connection)
##########################################################################################
#Actually adding the object(s)
test_session.commit()
def teardown(self):
self.engine.dispose()
# ######## THIS IS VERY DANGEROUS - IT SHOULD BE A PROMPT CREATED WHEN INSTALLING THE LIBRARY
# query = session.query(User).filter(User.id == 0)
# if not query.count() > 0:
# pwd = "admin"
# password = hash_password(pwd)
# user = User(id= 0, type= 3, email= "mail", username= "admin", password= password)
# ##########################################################################################
# ##### THIS SHOULD LOOK FOR RECORDS OF LOCAL DATABASE, CREATES IF IT DOES NOT EXIST #######
# query2 = session.query(Connection).filter(Connection.id == 0)
# if not query2.count() > 0:
# connection = Connection(id= 0, type= "sqlite+pysqlite", hostname= "local", username= "admin", password= base64.b64encode(b"admin"), port=0)
# ##########################################################################################
# #Actually adding the objects - if one does not exist the other will most likely not exist too
# if (not query.count() > 0) or (not query2.count() > 0):
# connection.users.append(user)
# session.add(user)
# session.add(connection)
# session.commit()
| 40.009524
| 143
| 0.653416
| 1,802
| 0.428945
| 0
| 0
| 0
| 0
| 0
| 0
| 2,490
| 0.592716
|
4d03f7e180eeb633a961138f2a85fdbfb2a84df1
| 1,786
|
py
|
Python
|
tempest/api/queuing/test_queues.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/queuing/test_queues.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
tempest/api/queuing/test_queues.py
|
NetApp/tempest
|
dd86b1517ec5ac16c26975ed0ce0d8b7ddcac6cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from tempest.api.queuing import base
from tempest.common.utils import data_utils
from tempest import test
LOG = logging.getLogger(__name__)
class TestQueues(base.BaseQueuingTest):
@test.attr(type='smoke')
def test_create_queue(self):
# Create Queue
queue_name = data_utils.rand_name('test-')
resp, body = self.create_queue(queue_name)
self.addCleanup(self.client.delete_queue, queue_name)
self.assertEqual('201', resp['status'])
self.assertEqual('', body)
class TestManageQueue(base.BaseQueuingTest):
_interface = 'json'
@classmethod
def setUpClass(cls):
super(TestManageQueue, cls).setUpClass()
cls.queue_name = data_utils.rand_name('Queues-Test')
# Create Queue
cls.client.create_queue(cls.queue_name)
@test.attr(type='smoke')
def test_delete_queue(self):
# Delete Queue
resp, body = self.delete_queue(self.queue_name)
self.assertEqual('204', resp['status'])
self.assertEqual('', body)
@classmethod
def tearDownClass(cls):
cls.client.delete_queue(cls.queue_name)
super(TestManageQueue, cls).tearDownClass()
| 29.278689
| 69
| 0.702688
| 1,037
| 0.580627
| 0
| 0
| 906
| 0.507279
| 0
| 0
| 682
| 0.381859
|
4d04229e05bd8f6f6995b6ba536b1ed9096df15a
| 478
|
py
|
Python
|
checkin/tests.py
|
MAKENTNU/web
|
7a5b512bf4c087d1561cdb623d7df4b3d04811a2
|
[
"MIT"
] | 10
|
2017-11-25T01:47:20.000Z
|
2020-03-24T18:28:24.000Z
|
checkin/tests.py
|
MAKENTNU/web
|
7a5b512bf4c087d1561cdb623d7df4b3d04811a2
|
[
"MIT"
] | 319
|
2017-11-16T09:56:03.000Z
|
2022-03-28T00:24:37.000Z
|
checkin/tests.py
|
MAKENTNU/web
|
7a5b512bf4c087d1561cdb623d7df4b3d04811a2
|
[
"MIT"
] | 6
|
2017-11-12T14:04:08.000Z
|
2021-03-10T09:41:18.000Z
|
from django.test import TestCase
from django_hosts import reverse
from util.test_utils import Get, assert_requesting_paths_succeeds
class UrlTests(TestCase):
def test_all_get_request_paths_succeed(self):
path_predicates = [
Get(reverse('skills_present_list'), public=True),
Get(reverse('profile'), public=False),
Get(reverse('suggest'), public=False),
]
assert_requesting_paths_succeeds(self, path_predicates)
| 29.875
| 65
| 0.709205
| 342
| 0.715481
| 0
| 0
| 0
| 0
| 0
| 0
| 39
| 0.08159
|
4d04bfd380e253ed326e19219946bfffe57dc0dc
| 10,757
|
py
|
Python
|
tests/gdata_tests/live_client_test.py
|
lqc/google-data-api
|
b720582a472d627a0853d02e51e13dbce4cfe6ae
|
[
"Apache-2.0"
] | null | null | null |
tests/gdata_tests/live_client_test.py
|
lqc/google-data-api
|
b720582a472d627a0853d02e51e13dbce4cfe6ae
|
[
"Apache-2.0"
] | null | null | null |
tests/gdata_tests/live_client_test.py
|
lqc/google-data-api
|
b720582a472d627a0853d02e51e13dbce4cfe6ae
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
#
# Copyright (C) 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# This module is used for version 2 of the Google Data APIs.
# These tests attempt to connect to Google servers.
__author__ = 'j.s@google.com (Jeff Scudder)'
import os
import unittest
import gdata.gauth
import gdata.client
import atom.http_core
import atom.mock_http_core
import atom.core
import gdata.data
# TODO: switch to using v2 atom data once it is available.
import atom
import gdata.test_config as conf
conf.options.register_option(conf.BLOG_ID_OPTION)
class BloggerTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.client.GDClient()
conf.configure_client(self.client, 'BloggerTest', 'blogger')
def tearDown(self):
conf.close_client(self.client)
def test_create_update_delete(self):
if not conf.options.get_value('runlive') == 'true':
return
# Either load the recording or prepare to make a live request.
conf.configure_cache(self.client, 'test_create_update_delete')
blog_post = atom.Entry(
title=atom.Title(text='test from python BloggerTest'),
content=atom.Content(text='This is only a test.'))
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(str(blog_post), 'application/atom+xml')
def entry_from_string_wrapper(response):
return atom.EntryFromString(response.read())
entry = self.client.request('POST',
'http://www.blogger.com/feeds/%s/posts/default' % (
conf.options.get_value('blogid')),
converter=entry_from_string_wrapper, http_request=http_request)
self.assertEqual(entry.title.text, 'test from python BloggerTest')
self.assertEqual(entry.content.text, 'This is only a test.')
# Edit the test entry.
edit_link = None
for link in entry.link:
# Find the edit link for this entry.
if link.rel == 'edit':
edit_link = link.href
entry.title.text = 'Edited'
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(str(entry), 'application/atom+xml')
edited_entry = self.client.request('PUT', edit_link,
converter=entry_from_string_wrapper, http_request=http_request)
self.assertEqual(edited_entry.title.text, 'Edited')
self.assertEqual(edited_entry.content.text, entry.content.text)
# Delete the test entry from the blog.
edit_link = None
for link in edited_entry.link:
if link.rel == 'edit':
edit_link = link.href
response = self.client.request('DELETE', edit_link)
self.assertEqual(response.status, 200)
def test_use_version_two(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_use_version_two')
# Use version 2 of the Blogger API.
self.client.api_version = '2'
# Create a v2 blog post entry to post on the blog.
entry = create_element('entry')
entry._other_elements.append(
create_element('title', text='Marriage!',
attributes={'type': 'text'}))
entry._other_elements.append(
create_element('content', attributes={'type': 'text'},
text='Mr. Darcy has proposed marriage to me!'))
entry._other_elements.append(
create_element('category',
attributes={'scheme': TAG, 'term': 'marriage'}))
entry._other_elements.append(
create_element('category',
attributes={'scheme': TAG, 'term': 'Mr. Darcy'}))
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(entry.to_string(), 'application/atom+xml')
posted = self.client.request('POST',
'http://www.blogger.com/feeds/%s/posts/default' % (
conf.options.get_value('blogid')),
converter=element_from_string, http_request=http_request)
# Verify that the blog post content is correct.
self.assertEqual(posted.get_elements('title', ATOM)[0].text, 'Marriage!')
# TODO: uncomment once server bug is fixed.
#self.assertEqual(posted.get_elements('content', ATOM)[0].text,
# 'Mr. Darcy has proposed marriage to me!')
found_tags = [False, False]
categories = posted.get_elements('category', ATOM)
self.assertEqual(len(categories), 2)
for category in categories:
if category.get_attributes('term')[0].value == 'marriage':
found_tags[0] = True
elif category.get_attributes('term')[0].value == 'Mr. Darcy':
found_tags[1] = True
self.assert_(found_tags[0])
self.assert_(found_tags[1])
# Find the blog post on the blog.
self_link = None
edit_link = None
for link in posted.get_elements('link', ATOM):
if link.get_attributes('rel')[0].value == 'self':
self_link = link.get_attributes('href')[0].value
elif link.get_attributes('rel')[0].value == 'edit':
edit_link = link.get_attributes('href')[0].value
self.assert_(self_link is not None)
self.assert_(edit_link is not None)
queried = self.client.request('GET', self_link,
converter=element_from_string)
# TODO: add additional asserts to check content and etags.
# Test queries using ETags.
entry = self.client.get_entry(self_link)
self.assert_(entry.etag is not None)
self.assertRaises(gdata.client.NotModified, self.client.get_entry,
self_link, etag=entry.etag)
# Delete the test blog post.
self.client.request('DELETE', edit_link)
class ContactsTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.client.GDClient()
conf.configure_client(self.client, 'ContactsTest', 'cp')
def tearDown(self):
conf.close_client(self.client)
def test_crud_version_two(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_crud_version_two')
self.client.api_version = '2'
entry = create_element('entry')
entry._other_elements.append(
create_element('title', ATOM, 'Jeff', {'type': 'text'}))
entry._other_elements.append(
create_element('email', GD,
attributes={'address': 'j.s@google.com', 'rel': WORK_REL}))
http_request = atom.http_core.HttpRequest()
http_request.add_body_part(entry.to_string(), 'application/atom+xml')
posted = self.client.request('POST',
'http://www.google.com/m8/feeds/contacts/default/full',
converter=element_from_string, http_request=http_request)
self_link = None
edit_link = None
for link in posted.get_elements('link', ATOM):
if link.get_attributes('rel')[0].value == 'self':
self_link = link.get_attributes('href')[0].value
elif link.get_attributes('rel')[0].value == 'edit':
edit_link = link.get_attributes('href')[0].value
self.assert_(self_link is not None)
self.assert_(edit_link is not None)
etag = posted.get_attributes('etag')[0].value
self.assert_(etag is not None)
self.assert_(len(etag) > 0)
# Delete the test contact.
http_request = atom.http_core.HttpRequest()
http_request.headers['If-Match'] = etag
self.client.request('DELETE', edit_link, http_request=http_request)
class VersionTwoClientContactsTest(unittest.TestCase):
def setUp(self):
self.client = None
if conf.options.get_value('runlive') == 'true':
self.client = gdata.client.GDClient()
self.client.api_version = '2'
conf.configure_client(self.client, 'VersionTwoClientContactsTest', 'cp')
self.old_proxy = os.environ.get('https_proxy')
def tearDown(self):
if self.old_proxy:
os.environ['https_proxy'] = self.old_proxy
elif 'https_proxy' in os.environ:
del os.environ['https_proxy']
conf.close_client(self.client)
def test_version_two_client(self):
if not conf.options.get_value('runlive') == 'true':
return
conf.configure_cache(self.client, 'test_version_two_client')
entry = gdata.data.GDEntry()
entry._other_elements.append(
create_element('title', ATOM, 'Test', {'type': 'text'}))
entry._other_elements.append(
create_element('email', GD,
attributes={'address': 'test@example.com', 'rel': WORK_REL}))
# Create the test contact.
posted = self.client.post(entry,
'https://www.google.com/m8/feeds/contacts/default/full')
self.assert_(isinstance(posted, gdata.data.GDEntry))
self.assertEqual(posted.get_elements('title')[0].text, 'Test')
self.assertEqual(posted.get_elements('email')[0].get_attributes(
'address')[0].value, 'test@example.com')
posted.get_elements('title')[0].text = 'Doug'
edited = self.client.update(posted)
self.assert_(isinstance(edited, gdata.data.GDEntry))
self.assertEqual(edited.get_elements('title')[0].text, 'Doug')
self.assertEqual(edited.get_elements('email')[0].get_attributes(
'address')[0].value, 'test@example.com')
# Delete the test contact.
self.client.delete(edited)
def test_crud_over_https_proxy(self):
os.environ['https_proxy'] = '98.192.125.23'
# Perform the CRUD test above, this time over a proxy.
self.test_version_two_client()
# Utility methods.
# The Atom XML namespace.
ATOM = 'http://www.w3.org/2005/Atom'
# URL used as the scheme for a blog post tag.
TAG = 'http://www.blogger.com/atom/ns#'
# Namespace for Google Data API elements.
GD = 'http://schemas.google.com/g/2005'
WORK_REL = 'http://schemas.google.com/g/2005#work'
def create_element(tag, namespace=ATOM, text=None, attributes=None):
element = atom.core.XmlElement()
element._qname = '{%s}%s' % (namespace, tag)
if text is not None:
element.text = text
if attributes is not None:
element._other_attributes = attributes.copy()
return element
def element_from_string(response):
return atom.core.xml_element_from_string(response.read(),
atom.core.XmlElement)
def suite():
return conf.build_suite([BloggerTest, ContactsTest,
VersionTwoClientContactsTest])
if __name__ == '__main__':
unittest.TextTestRunner().run(suite())
| 35.50165
| 78
| 0.685972
| 8,716
| 0.810263
| 0
| 0
| 0
| 0
| 0
| 0
| 3,213
| 0.298689
|
4d054d1c9024db142794eb18e583cbea3e61dd43
| 125
|
py
|
Python
|
apps/work_order/admin.py
|
joewen85/devops_study
|
6bbfbac7e70f295ef6068393bd9cf7d418ab4417
|
[
"Apache-2.0"
] | null | null | null |
apps/work_order/admin.py
|
joewen85/devops_study
|
6bbfbac7e70f295ef6068393bd9cf7d418ab4417
|
[
"Apache-2.0"
] | null | null | null |
apps/work_order/admin.py
|
joewen85/devops_study
|
6bbfbac7e70f295ef6068393bd9cf7d418ab4417
|
[
"Apache-2.0"
] | 1
|
2020-10-28T09:12:47.000Z
|
2020-10-28T09:12:47.000Z
|
from django.contrib import admin
# Register your models here.
from .models import WorkOrder
admin.site.register(WorkOrder)
| 17.857143
| 32
| 0.808
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 28
| 0.224
|
4d06738fefc51f3cbb68390fbd2da42ba7057869
| 262
|
py
|
Python
|
src/season/framework/status.py
|
season-framework/season-flask-wiz
|
95d75758a6036d387c1b803bd6a68f238ec430e0
|
[
"MIT"
] | 6
|
2021-12-09T05:06:49.000Z
|
2022-01-18T02:38:03.000Z
|
src/season/framework/status.py
|
season-framework/season-flask-wiz
|
95d75758a6036d387c1b803bd6a68f238ec430e0
|
[
"MIT"
] | 2
|
2022-02-18T02:00:36.000Z
|
2022-03-22T05:18:30.000Z
|
src/season/framework/status.py
|
season-framework/season-flask-wiz
|
95d75758a6036d387c1b803bd6a68f238ec430e0
|
[
"MIT"
] | 2
|
2022-01-07T00:26:00.000Z
|
2022-03-07T06:24:27.000Z
|
class status(Exception):
def __init__(self, code=200, response=None):
super().__init__("season.core.CLASS.RESPONSE.STATUS")
self.code = code
self.response = response
def get_response(self):
return self.response, self.code
| 32.75
| 61
| 0.660305
| 262
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 35
| 0.133588
|
4d0941aea75adaa006d884337e5c4d550547f131
| 6,030
|
py
|
Python
|
updates.py
|
knowledgetechnologyuhh/hipss
|
518bf3e6a4d02e234cbe29506b9afda0a6ccb187
|
[
"MIT"
] | null | null | null |
updates.py
|
knowledgetechnologyuhh/hipss
|
518bf3e6a4d02e234cbe29506b9afda0a6ccb187
|
[
"MIT"
] | null | null | null |
updates.py
|
knowledgetechnologyuhh/hipss
|
518bf3e6a4d02e234cbe29506b9afda0a6ccb187
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import torch.nn.functional as F
from torch.nn.utils.clip_grad import clip_grad_norm_
from mpi_utils.mpi_utils import sync_grads
def update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg):
if cfg.automatic_entropy_tuning:
alpha_loss = -(log_alpha * (log_pi + target_entropy).detach()).mean()
alpha_optim.zero_grad()
alpha_loss.backward()
alpha_optim.step()
alpha = log_alpha.exp()
alpha_tlogs = alpha.clone()
else:
alpha_loss = torch.tensor(0.)
alpha_tlogs = torch.tensor(alpha)
return alpha_loss, alpha_tlogs
def update_flat(actor_network, critic_network, critic_target_network, policy_optim, critic_optim, alpha, log_alpha,
target_entropy, alpha_optim, obs_norm, ag_norm, g_norm, obs_next_norm, actions, rewards, cfg):
inputs_norm = np.concatenate([obs_norm, ag_norm, g_norm], axis=1)
inputs_next_norm = np.concatenate([obs_next_norm, ag_norm, g_norm], axis=1)
inputs_norm_tensor = torch.tensor(inputs_norm, dtype=torch.float32)
inputs_next_norm_tensor = torch.tensor(inputs_next_norm, dtype=torch.float32)
actions_tensor = torch.tensor(actions, dtype=torch.float32)
r_tensor = torch.tensor(rewards, dtype=torch.float32).reshape(rewards.shape[0], 1)
if cfg.cuda:
inputs_norm_tensor = inputs_norm_tensor.cuda()
inputs_next_norm_tensor = inputs_next_norm_tensor.cuda()
actions_tensor = actions_tensor.cuda()
r_tensor = r_tensor.cuda()
with torch.no_grad():
actions_next, log_pi_next, _ = actor_network.sample(inputs_next_norm_tensor)
qf_next_target = critic_target_network(inputs_next_norm_tensor, actions_next)
min_qf_next_target = torch.min(qf_next_target, dim=0).values - alpha * log_pi_next
next_q_value = r_tensor + cfg.gamma * min_qf_next_target
# the q loss
qf = critic_network(inputs_norm_tensor, actions_tensor)
qf_loss = torch.stack([F.mse_loss(_qf, next_q_value) for _qf in qf]).mean()
# the actor loss
pi, log_pi, _ = actor_network.sample(inputs_norm_tensor)
qf_pi = critic_network(inputs_norm_tensor, pi)
min_qf_pi = torch.min(qf_pi, dim=0).values
policy_loss = ((alpha * log_pi) - min_qf_pi).mean()
# update actor network
policy_optim.zero_grad()
policy_loss.backward()
sync_grads(actor_network)
policy_optim.step()
# update the critic_network
critic_optim.zero_grad()
qf_loss.backward()
if cfg.clip_grad_norm:
clip_grad_norm_(critic_network.parameters(), cfg.max_norm)
sync_grads(critic_network)
critic_optim.step()
alpha_loss, alpha_tlogs = update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg)
train_metrics = dict(q_loss=qf_loss.item(),
next_q=next_q_value.mean().item(),
policy_loss=policy_loss.item(),
alpha_loss=alpha_loss.item(),
alpha_tlogs=alpha_tlogs.item())
for idx, (_qf, _qtarget) in enumerate(zip(qf, qf_next_target)):
train_metrics[f'q_{idx}'] = _qf.mean().item()
train_metrics[f'q_target_{idx}'] = _qtarget.mean().item()
return train_metrics
def update_language(actor_network, critic_network, critic_target_network, policy_optim, critic_optim, alpha, log_alpha,
target_entropy, alpha_optim, obs_norm, instruction, obs_next_norm, actions, rewards, cfg):
inputs_norm = obs_norm
inputs_next_norm = obs_next_norm
inputs_norm_tensor = torch.tensor(inputs_norm, dtype=torch.float32)
inputs_next_norm_tensor = torch.tensor(inputs_next_norm, dtype=torch.float32)
actions_tensor = torch.tensor(actions, dtype=torch.float32)
r_tensor = torch.tensor(rewards, dtype=torch.float32).reshape(rewards.shape[0], 1)
instruction_tensor = torch.tensor(instruction, dtype=torch.long)
if cfg.cuda:
inputs_norm_tensor = inputs_norm_tensor.cuda()
inputs_next_norm_tensor = inputs_next_norm_tensor.cuda()
actions_tensor = actions_tensor.cuda()
r_tensor = r_tensor.cuda()
instruction_tensor = instruction_tensor.cuda()
with torch.no_grad():
actions_next, log_pi_next, _ = actor_network.sample(inputs_next_norm_tensor, instruction_tensor)
qf_next_target = critic_target_network(inputs_next_norm_tensor, actions_next, instruction_tensor)
min_qf_next_target = torch.min(qf_next_target, dim=0).values - alpha * log_pi_next
next_q_value = r_tensor + cfg.gamma * min_qf_next_target
# the q loss
qf = critic_network(inputs_norm_tensor, actions_tensor, instruction_tensor)
qf_loss = torch.stack([F.mse_loss(_qf, next_q_value) for _qf in qf]).mean()
# the actor loss
pi, log_pi, _ = actor_network.sample(inputs_norm_tensor, instruction_tensor)
qf_pi = critic_network(inputs_norm_tensor, pi, instruction_tensor)
min_qf_pi = torch.min(qf_pi, dim=0).values
policy_loss = ((alpha * log_pi) - min_qf_pi).mean()
# update actor network
policy_optim.zero_grad()
policy_loss.backward()
sync_grads(actor_network)
policy_optim.step()
# update the critic_network
critic_optim.zero_grad()
qf_loss.backward()
if cfg.clip_grad_norm:
clip_grad_norm_(critic_network.parameters(), cfg.max_norm)
sync_grads(critic_network)
critic_optim.step()
alpha_loss, alpha_tlogs = update_entropy(alpha, log_alpha, target_entropy, log_pi, alpha_optim, cfg)
train_metrics = dict(q_loss=qf_loss.item(),
next_q=next_q_value.mean().item(),
policy_loss=policy_loss.item(),
alpha_loss=alpha_loss.item(),
alpha_tlogs=alpha_tlogs.item())
for idx, (_qf, _qtarget) in enumerate(zip(qf, qf_next_target)):
train_metrics[f'q_{idx}'] = _qf.mean().item()
train_metrics[f'q_target_{idx}'] = _qtarget.mean().item()
return train_metrics
| 42.167832
| 119
| 0.703814
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 208
| 0.034494
|
4d098b2bde7f0fef38c7be3632c1ac962fd07aad
| 125,107
|
py
|
Python
|
spaghetti/network.py
|
gegen07/spaghetti
|
f10f9d016deeb8d4cdd63377304fc8e3b8492a0f
|
[
"BSD-3-Clause"
] | 182
|
2018-07-23T20:17:32.000Z
|
2022-03-28T07:08:43.000Z
|
spaghetti/network.py
|
gegen07/spaghetti
|
f10f9d016deeb8d4cdd63377304fc8e3b8492a0f
|
[
"BSD-3-Clause"
] | 563
|
2017-04-14T23:39:21.000Z
|
2022-02-12T20:34:21.000Z
|
spaghetti/network.py
|
gegen07/spaghetti
|
f10f9d016deeb8d4cdd63377304fc8e3b8492a0f
|
[
"BSD-3-Clause"
] | 51
|
2017-04-14T23:40:31.000Z
|
2022-03-31T01:41:56.000Z
|
from collections import defaultdict, OrderedDict
from itertools import islice
import copy, os, pickle, warnings
import esda
import numpy
from .analysis import GlobalAutoK
from . import util
from libpysal import cg, examples, weights
from libpysal.common import requires
try:
from libpysal import open
except ImportError:
import libpysal
open = libpysal.io.open
__all__ = ["Network", "PointPattern", "GlobalAutoK"]
SAME_SEGMENT = (-0.1, -0.1)
dep_msg = (
"The next major release of pysal/spaghetti (2.0.0) will "
"drop support for all ``libpysal.cg`` geometries. This change "
"is a first step in refactoring ``spaghetti`` that is "
"expected to result in dramatically reduced runtimes for "
"network instantiation and operations. Users currently "
"requiring network and point pattern input as ``libpysal.cg`` "
"geometries should prepare for this simply by converting "
"to ``shapely`` geometries."
)
warnings.warn(f"{dep_msg}", FutureWarning)
class Network:
"""Spatially-constrained network representation and analytical
functionality. Naming conventions are as follows, (1) arcs and
vertices for the full network object, and (2) edges and nodes for
the simplified graph-theoretic object. The term 'link' is used to
refer to a network arc or a graph edge.
Parameters
----------
in_data : {str, iterable (list, tuple, numpy.ndarray), libpysal.cg.Chain, geopandas.GeoDataFrame}
The input geographic data. Either (1) a path to a shapefile
(str); (2) an iterable containing ``libpysal.cg.Chain``
objects; (3) a single ``libpysal.cg.Chain``; or
(4) a ``geopandas.GeoDataFrame``.
vertex_sig : int
Round the x and y coordinates of all vertices to ``vertex_sig``
significant digits (combined significant digits on the left and
right of the decimal place). Default is 11. Set to ``None`` for
no rounding.
unique_arcs : bool
If ``True`` (default), keep only unique arcs (i.e., prune
out any duplicated arcs). If ``False`` keep all segments.
extractgraph : bool
If ``True``, extract a graph-theoretic object with no degree 2
nodes. Default is ``True``.
w_components : bool
Set to ``False`` to not record connected components from a
``libpysal.weights.W`` object. Default is ``True``.
weightings : {dict, bool}
If dict, lists of weightings for each arc. If bool,
``True`` flags ``self.arc_lengths`` as the weightings,
``False`` sets no weightings. Default is ``False``.
weights_kws : dict
Keyword arguments for ``libpysal.weights.W``.
vertex_atol : {int, None}
Precision for vertex absolute tolerance. Round vertex coordinates to
``vertex_atol`` decimal places. Default is ``None``. **ONLY** change
the default when there are known issues with digitization.
Attributes
----------
adjacencylist : list
List of lists storing vertex adjacency.
vertex_coords : dict
Keys are vertex IDs and values are :math:`(x,y)` coordinates of the vertices.
vertex_list : list
List of vertex IDs.
vertices : dict
Keys are tuples of vertex coords and values are the vertex ID.
arcs : list
List of arcs, where each arc is a sorted tuple
of vertex IDs.
arc_lengths : dict
Keys are tuples of sorted vertex IDs representing an arc and
values are the length.
pointpatterns : dict
Keys are a string name of the pattern and values are
``PointPattern`` class instances.
distance_matrix : numpy.ndarray
All network vertices (non-observations) distance matrix. Distances
between vertices in disparate components are recorded as ``inf``
by default.
network_trees : dict
Keys are the vertex IDs (``int``). Values are dictionaries
with the keys being the IDs of the destination vertex
and values being lists of vertices along the shortest path.
If the destination vertex is a) the origin or b)
unreachable (disparate component) it is listed as itself being the
neighbor.
edges : list
Tuples of graph edge IDs.
edge_lengths : dict
Keys are the graph edge IDs (``tuple``). Values are the
graph edge length (``float``).
non_articulation_points : list
All vertices with degree 2 that are not in an isolated
island ring (loop) component.
w_network : libpysal.weights.W
Weights object created from the network arcs.
network_n_components : int
Count of connected components in the network.
network_fully_connected : bool
``True`` if the network representation is a single connected
component, otherwise ``False``.
network_component_labels : numpy.ndarray
Component labels for network arcs.
network_component2arc : dict
Lookup in the form {int: list} for arcs comprising network
connected components keyed by component labels with arcs in
a list as values.
network_component_lengths : dict
Length of each network component (keyed by component label).
network_longest_component : int
The ID of the longest component in the network. This is not
necessarily equal to ``network_largest_component``.
network_component_vertices : dict
Lookup in the form {int: list} for vertices comprising network
connected components keyed by component labels with vertices in
a list as values.
network_component_vertex_count : dict
The number of vertices in each network component
(keyed by component label).
network_largest_component : int
The ID of the largest component in the network. Within ``spaghetti``
the largest component is the one with the most vertices. This is not
necessarily equal to ``network_longest_component``.
network_component_is_ring : dict
Lookup in the form {int: bool} keyed by component labels with values
as ``True`` if the component is a closed ring, otherwise ``False``.
w_graph : libpysal.weights.W
Weights object created from the graph edges.
graph_n_components : int
Count of connected components in the network.
graph_fully_connected : bool
``True`` if the graph representation is a single connected
component, otherwise ``False``.
graph_component_labels : numpy.ndarray
Component labels for graph edges.
graph_component2edge : dict
Lookup in the form {int: list} for edges comprising graph connected
components keyed by component labels with edges in a list
as values.
graph_component_lengths : dict
Length of each graph component (keyed by component label).
graph_longest_component : int
The ID of the longest component in the graph. This is not
necessarily equal to ``graph_largest_component``.
graph_component_vertices : dict
Lookup in the form {int: list} for vertices comprising graph
connected components keyed by component labels with vertices in
a list as values.
graph_component_vertex_count : dict
The number of vertices in each graph component
(keyed by component label).
graph_largest_component : int
The ID of the largest component in the graph. Within ``spaghetti``
the largest component is the one with the most vertices. This is not
necessarily equal to ``graph_longest_component``.
graph_component_is_ring : dict
Lookup in the form {int: bool} keyed by component labels with values as
``True`` if the component is a closed ring, otherwise ``False``.
Notes
-----
**Important**: The core procedure for generating network representations is
performed within the ``_extractnetwork()`` method. Here it is important to note
that a ``spaghetti.Network`` instance is built up from the individual,
constituent euclidean units of each line segment object. Therefore, the resulting
network structure will generally have (1) more vertices and links than may expected,
and, (2) many degree-2 vertices, which differs from a truly graph-theoretic object.
This is demonstrated in the
`Caveats Tutorial <https://pysal.org/spaghetti/notebooks/caveats.html#4.-Understanding-network-generation>`_.
See :cite:`Cliff1981`, :cite:`Tansel1983a`,
:cite:`AhujaRavindraK`, :cite:`Labbe1995`,
:cite:`Kuby2009`, :cite:`Barthelemy2011`,
:cite:`daskin2013`, :cite:`Okabe2012`,
:cite:`Ducruet2014`, :cite:`Weber2016`, for more in-depth discussion on
spatial networks, graph theory, and location along networks.
For related network-centric software see
`Snkit <https://github.com/tomalrussell/snkit>`_ :cite:`tom_russell_2019_3379659`,
`SANET <http://sanet.csis.u-tokyo.ac.jp>`_ :cite:`Okabe2006a`,
`NetworkX <https://networkx.github.io>`_ :cite:`Hagberg2008`,
`Pandana <http://udst.github.io/pandana/>`_ :cite:`Foti2012`,
and `OSMnx <https://osmnx.readthedocs.io/en/stable/>`_ :cite:`Boeing2017`.
Examples
--------
Create an instance of a network.
>>> import spaghetti
>>> from libpysal import examples
>>> streets_file = examples.get_path("streets.shp")
>>> ntw = spaghetti.Network(in_data=streets_file)
Fetch the number connected components in the network.
>>> ntw.network_n_components
1
Unique component labels in the network.
>>> import numpy
>>> list(numpy.unique(ntw.network_component_labels))
[0]
Show whether each component of the network is an isolated ring (or not).
>>> ntw.network_component_is_ring
{0: False}
Show how many network arcs are associated with the component.
>>> arcs = len(ntw.network_component2arc[ntw.network_component_labels[0]])
>>> arcs
303
Do the same as above, but for the graph-theoretic representation
of the network object.
>>> ntw.graph_n_components
1
>>> list(numpy.unique(ntw.graph_component_labels))
[0]
>>> ntw.graph_component_is_ring
{0: False}
>>> edges = len(ntw.graph_component2edge[ntw.graph_component_labels[0]])
>>> edges
179
The number of arcs in the network is always greater than or equal
to the number of edges in the graph-theoretic representation.
>>> arcs >= edges
True
Snap point observations to the network with attribute information.
>>> crimes_file = examples.get_path("crimes.shp")
>>> ntw.snapobservations(crimes_file, "crimes", attribute=True)
And without attribute information.
>>> schools_file = examples.get_path("schools.shp")
>>> ntw.snapobservations(schools_file, "schools", attribute=False)
Show the point patterns associated with the network.
>>> ntw.pointpatterns.keys()
dict_keys(['crimes', 'schools'])
"""
def __init__(
self,
in_data=None,
vertex_sig=11,
unique_arcs=True,
extractgraph=True,
w_components=True,
weightings=False,
weights_kws=dict(),
vertex_atol=None,
):
# do this when creating a clean network instance from a
# shapefile or a geopandas.GeoDataFrame, otherwise a shell
# network instance is created (see `split_arcs()` method)
if in_data is not None:
# set parameters as attributes
self.in_data = in_data
self.vertex_sig = vertex_sig
self.vertex_atol = vertex_atol
self.unique_arcs = unique_arcs
self.adjacencylist = defaultdict(list)
self.vertices = {}
# initialize network arcs and arc_lengths
self.arcs = []
self.arc_lengths = {}
# initialize pointpatterns
self.pointpatterns = {}
# spatial representation of the network
self._extractnetwork()
self.arcs = sorted(self.arcs)
self.vertex_coords = dict((v, k) for k, v in self.vertices.items())
# extract connected components
if w_components:
as_graph = False
network_weightings = False
if weightings == True:
# set network arc weights to length if weights are
# desired, but no other input in given
weightings = self.arc_lengths
network_weightings = True
# extract contiguity weights from libpysal
self.w_network = self.contiguityweights(
graph=as_graph,
weightings=weightings,
weights_kws=weights_kws,
)
# identify connected components from the `w_network`
self.identify_components(self.w_network, graph=as_graph)
# extract the graph -- repeat similar as above
# for extracting the network
if extractgraph:
self.extractgraph()
if w_components:
as_graph = True
if network_weightings:
weightings = self.edge_lengths
self.w_graph = self.contiguityweights(
graph=as_graph,
weightings=weightings,
weights_kws=weights_kws,
)
self.identify_components(self.w_graph, graph=as_graph)
# sorted list of vertex IDs
self.vertex_list = sorted(self.vertices.values())
def _round_sig(self, v):
"""Used internally to round the vertex to a set number of
significant digits. If ``sig`` is set to 4, then the following
are some possible results for a coordinate are as follows.
(1) 0.0xxxx, (2) 0.xxxx, (3) x.xxx, (4) xx.xx,
(5) xxx.x, (6) xxxx.0, (7) xxxx0.0
Parameters
----------
v : tuple
Coordinate (x,y) of the vertex.
"""
# set the number of significant digits
sig = self.vertex_sig
# simply return vertex (x,y) coordinates
if sig is None:
return v
# for each coordinate in a coordinate pair
# if the coordinate location is (0.0) simply return zero
# else -- (1) take the absolute value of `val`; (2) take the
# base 10 log for [1]; (3) take the floor of [2]; (4) convert
# [3] into a negative integer; (5) add `sig - 1` to [4];
# (6) round `val` by [5]
out_v = [
val
if val == 0
else round(val, -int(numpy.floor(numpy.log10(numpy.fabs(val)))) + (sig - 1))
for val in v
]
if self.vertex_atol:
out_v = [round(v, self.vertex_atol) for v in out_v]
return tuple(out_v)
def identify_components(self, w, graph=False):
"""Identify connected component information from a
``libpysal.weights.W`` object
Parameters
----------
w : libpysal.weights.W
Weights object created from the network segments (either
raw or graph-theoretic).
graph : bool
Flag for a raw network (``False``) or graph-theoretic network
(``True``). Default is ``False``.
"""
# flag network (arcs) or graph (edges)
if graph:
links = self.edges
obj_type = "graph_"
else:
links = self.arcs
obj_type = "network_"
# connected component count and labels
n_components = w.n_components
component_labels = w.component_labels
# is the network a single, fully-connected component?
if n_components == 1:
fully_connected = True
else:
fully_connected = False
# link to component lookup
link2component = dict(zip(links, component_labels))
# component ID lookups: links, lengths, vertices, vertex counts
component2link = {}
component_lengths = {}
component_vertices = {}
component_vertex_count = {}
cp_labs_ = set(w.component_labels)
l2c_ = link2component.items()
for cpl in cp_labs_:
component2link[cpl] = sorted([k for k, v in l2c_ if v == cpl])
c2l_ = component2link[cpl]
arclens_ = self.arc_lengths.items()
component_lengths[cpl] = sum([v for k, v in arclens_ if k in c2l_])
component_vertices[cpl] = list(set([v for l in c2l_ for v in l]))
component_vertex_count[cpl] = len(component_vertices[cpl])
# longest and largest components
longest_component = max(component_lengths, key=component_lengths.get)
largest_component = max(component_vertex_count, key=component_vertex_count.get)
# component to ring lookup
component_is_ring = {}
adj_ = self.adjacencylist.items()
for comp, verts in component_vertices.items():
component_is_ring[comp] = False
_2neighs = [len(neighs) == 2 for v, neighs in adj_ if v in verts]
if all(_2neighs):
component_is_ring[comp] = True
# attribute label name depends on object type
if graph:
c2l_attr_name = "component2edge"
else:
c2l_attr_name = "component2arc"
# set all new variables into list
extracted_attrs = [
["fully_connected", fully_connected],
["n_components", n_components],
["component_labels", component_labels],
[c2l_attr_name, component2link],
["component_lengths", component_lengths],
["component_vertices", component_vertices],
["component_vertex_count", component_vertex_count],
["longest_component", longest_component],
["largest_component", largest_component],
["component_is_ring", component_is_ring],
]
# iterate over list and set attribute with
# either "network" or "graph" extension
for (attr_str, attr) in extracted_attrs:
setattr(self, obj_type + attr_str, attr)
def _extractnetwork(self):
"""Used internally to extract a network."""
# initialize vertex count
vertex_count = 0
# determine input network data type
in_dtype = str(type(self.in_data)).split("'")[1]
is_libpysal_chains = False
supported_iterables = ["list", "tuple", "numpy.ndarray"]
# type error message
msg = "'%s' not supported for network instantiation."
# set appropriate geometries
if in_dtype == "str":
shps = open(self.in_data)
elif in_dtype in supported_iterables:
shps = self.in_data
shp_type = str(type(shps[0])).split("'")[1]
if shp_type == "libpysal.cg.shapes.Chain":
is_libpysal_chains = True
else:
raise TypeError(msg % shp_type)
elif in_dtype == "libpysal.cg.shapes.Chain":
shps = [self.in_data]
is_libpysal_chains = True
elif in_dtype == "geopandas.geodataframe.GeoDataFrame":
shps = self.in_data.geometry
else:
raise TypeError(msg % in_dtype)
# iterate over each record of the network lines
for shp in shps:
# if the segments are native pysal geometries
if is_libpysal_chains:
vertices = shp.vertices
else:
# fetch all vertices between euclidean segments
# in the line record -- these vertices are
# coordinates in an (x, y) tuple.
vertices = weights._contW_lists._get_verts(shp)
# iterate over each vertex (v)
for i, v in enumerate(vertices[:-1]):
# -- For vertex 1
# adjust precision -- this was originally
# implemented to handle high-precision
# network network vertices
v = self._round_sig(v)
# when the vertex already exists in lookup
# set it as the current `vid`
try:
vid = self.vertices[v]
# when the vertex is not present in the lookup
# add it and adjust vertex count
except KeyError:
self.vertices[v] = vid = vertex_count
vertex_count += 1
# -- For vertex 2
# repeat the steps above for vertex 1
v2 = self._round_sig(vertices[i + 1])
try:
nvid = self.vertices[v2]
except KeyError:
self.vertices[v2] = nvid = vertex_count
vertex_count += 1
# records vertex 1 and vertex 2 adjacency
self.adjacencylist[vid].append(nvid)
self.adjacencylist[nvid].append(vid)
# Sort the edges so that mono-directional
# keys can be stored.
arc_vertices = sorted([vid, nvid])
arc = tuple(arc_vertices)
# record the euclidean arc within the network
self.arcs.append(arc)
# record length
length = util.compute_length(v, vertices[i + 1])
self.arc_lengths[arc] = length
if self.unique_arcs:
# Remove duplicate edges and duplicate adjacent nodes.
self.arcs = list(set(self.arcs))
for k, v in self.adjacencylist.items():
self.adjacencylist[k] = list(set(v))
def extractgraph(self):
"""Using the existing network representation, create a
graph-theoretic representation by removing all vertices with a
neighbor incidence of two (non-articulation points). That is, we
assume these vertices are bridges between vertices with higher
or lower incidence.
"""
# initialize edges and edge_lengths
self.edges = []
self.edge_lengths = {}
# find all vertices with degree 2 that are not in an isolated
# island ring (loop) component. These are non-articulation
# points on the graph representation
non_articulation_points = self._yield_napts()
# retain non_articulation_points as an attribute
self.non_articulation_points = list(non_articulation_points)
# start with a copy of the spatial representation and
# iteratively remove edges deemed to be segments
self.edges = copy.deepcopy(self.arcs)
self.edge_lengths = copy.deepcopy(self.arc_lengths)
# mapping all the 'network arcs' contained within a single
# 'graph represented' edge
self.arcs_to_edges = {}
# build up bridges "rooted" on the initial
# non-articulation points
bridge_roots = []
# iterate over all vertices that are not contained within
# isolated loops that have a degree of 2
for s in non_articulation_points:
# initialize bridge with an articulation point
bridge = [s]
# fetch all vertices adjacent to point `s`
# that are also degree 2
neighbors = self._yieldneighbor(s, non_articulation_points, bridge)
while neighbors:
# extract the current node in `neighbors`
cnode = neighbors.pop()
# remove it from `non_articulation_points`
non_articulation_points.remove(cnode)
# add it to bridge
bridge.append(cnode)
# fetch neighbors for the current node
newneighbors = self._yieldneighbor(
cnode, non_articulation_points, bridge
)
# add the new neighbors back into `neighbors`
neighbors += newneighbors
# once all potential neighbors are exhausted add the
# current bridge of non-articulation points to the
# list of rooted bridges
bridge_roots.append(bridge)
# iterate over the list of newly created rooted bridges
for bridge in bridge_roots:
# if the vertex is only one non-articulation
# point in the bridge
if len(bridge) == 1:
# that the singular element of the bridge
n = self.adjacencylist[bridge[0]]
# and create a new graph edge from it
new_edge = tuple(sorted([n[0], n[1]]))
# identify the arcs to be removed
e1 = tuple(sorted([bridge[0], n[0]]))
e2 = tuple(sorted([bridge[0], n[1]]))
# remove the network arcs (spatial) from the
# graph-theoretic representation
self.edges.remove(e1)
self.edges.remove(e2)
# remove the former network arc lengths from the
# graph edge lengths lookup
length_e1 = self.edge_lengths[e1]
length_e2 = self.edge_lengths[e2]
self.edge_lengths.pop(e1, None)
self.edge_lengths.pop(e2, None)
# and add the new edge length in their place
self.edge_lengths[new_edge] = length_e1 + length_e2
# update the pointers
self.arcs_to_edges[e1] = new_edge
self.arcs_to_edges[e2] = new_edge
# if there are more than one vertices in the bridge
else:
cumulative_length = 0
start_end = {}
# initialize a redundant set of bridge edges
redundant = set([])
# iterate over the current bridge
for b in bridge:
# iterate over each node in the bridge
for n in self.adjacencylist[b]:
# start the bridge with this node
if n not in bridge:
start_end[b] = n
# or create a redundant edge with the current
# node and `b`
else:
redundant.add(tuple(sorted([b, n])))
# initialize a new graph edge
new_edge = tuple(sorted(start_end.values()))
# add start_end redundant edge
for k, v in start_end.items():
redundant.add(tuple(sorted([k, v])))
# remove all redundant network arcs while
# adjusting the graph edge lengths lookup
# and the edges_to_arcs lookup
for r in redundant:
self.edges.remove(r)
cumulative_length += self.edge_lengths[r]
self.edge_lengths.pop(r, None)
self.arcs_to_edges[r] = new_edge
# finally, add the new cumulative edge length
self.edge_lengths[new_edge] = cumulative_length
# add the updated graph edge
self.edges.append(new_edge)
# converted the graph edges into a sorted set to prune out
# duplicate graph edges created during simplification
self.edges = sorted(set(self.edges))
def _yield_napts(self):
"""Find all nodes with degree 2 that are not in an isolated
island ring (loop) component. These are non-articulation
points on the graph representation.
Returns
-------
napts : list
Non-articulation points on a graph representation.
"""
# non-articulation points
napts = set()
# network vertices remaining to evaluate
unvisted = set(self.vertices.values())
while unvisted:
# iterate over each component
for component_id, ring in self.network_component_is_ring.items():
# evaluate for non-articulation points
napts, unvisted = self._evaluate_napts(
napts, unvisted, component_id, ring
)
# convert set of non-articulation points into list
napts = list(napts)
return napts
def _evaluate_napts(self, napts, unvisited, component_id, ring):
"""Evaluate one connected component in a network for
non-articulation points (``napts``) and return an updated set of
``napts`` and unvisted vertices.
Parameters
----------
napts : set
Non-articulation points (``napts``) in the network. The
``napts`` here do not include those within an isolated
loop island.
unvisited : set
Vertices left to evaluate in the network.
component_id : int
ID for the network connected component for the
current iteration of the algorithm.
ring : bool
Network component is isolated island loop ``True`` or
not ``False``.
Returns
-------
napts : set
Updated ``napts`` object.
unvisited : set
Updated ``napts`` object.
"""
# iterate over each `edge` of the `component`
for component in self.network_component2arc[component_id]:
# each `component` has two vertices
for vertex in component:
# if `component` is not an isolated island
# and `vertex` has exactly 2 neighbors,
# add `vertex` to `napts`
if not ring:
if len(self.adjacencylist[vertex]) == 2:
napts.add(vertex)
# remove `vertex` from `unvisited` if
# it is still in the set else move along to
# the next iteration
try:
unvisited.remove(vertex)
except KeyError:
pass
return napts, unvisited
def _yieldneighbor(self, vtx, arc_vertices, bridge):
"""Used internally, this method traverses a bridge arc
to find the source and destination nodes.
Parameters
----------
vtx : int
The vertex ID.
arc_vertices : list
All non-articulation points (``napts``) in the network.
These are referred to as degree-2 vertices.
bridge : list
Inital bridge list containing only ``vtx``.
Returns
-------
nodes : list
Vertices to keep (articulation points). These elements are
referred to as nodes.
"""
# instantiate empty lis to fill with network articulation
# points (nodes with a degree of 1 [endpoints] or greater
# than 2 [intersections])
nodes = []
# get all nodes adjacent to `vtx` that are not in the
# set of 'bridge' vertices
for i in self.adjacencylist[vtx]:
if i in arc_vertices and i not in bridge:
nodes.append(i)
return nodes
def contiguityweights(
self, graph=True, weightings=None, from_split=False, weights_kws=dict()
):
"""Create a contiguity-based ``libpysal.weights.W`` object.
Parameters
----------
graph : bool
Controls whether the ``libpysal.weights.W`` is generated
using the spatial representation (``False``) or the graph
representation (``True``). Default is ``True``.
weightings : {dict, None}
Dictionary of lists of weightings for each arc/edge. Default is ``None``.
from_split : bool
Flag for whether the method is being called from within
``split_arcs()`` (``True``) or not (``False``). Default is ``False``.
weights_kws : dict
Keyword arguments for ``libpysal.weights.W``.
Returns
-------
W : libpysal.weights.W
A ``W`` representing the binary adjacency of the network.
See also
--------
libpysal.weights.W
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> import numpy
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap point observations to the network with attribute information.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
Find counts per network arc.
>>> counts = ntw.count_per_link(
... ntw.pointpatterns["crimes"].obs_to_arc, graph=False
... )
>>> counts[(50, 165)]
4
Create a contiguity-based ``W`` object.
>>> w = ntw.contiguityweights(graph=False)
>>> w.n, w.n_components
(303, 1)
Notes
-----
See :cite:`pysal2007` for more details.
"""
# instantiate OrderedDict to record network link
# adjacency which will be keyed by the link ID (a tuple)
# with values being lists of tuples (contiguous links)
neighbors = OrderedDict()
# flag network (arcs) or graph (edges)
if graph:
links = self.edges
else:
links = self.arcs
# if weightings are desired instantiate a dictionary
# other ignore weightings
if weightings:
_weights = {}
else:
_weights = None
# iterate over all links until all possibilities
# for network link adjacency are exhausted
working = True
while working:
# for each network link (1)
for key in links:
# instantiate a slot in the OrderedDict
neighbors[key] = []
if weightings:
_weights[key] = []
# for each network link (2)
for neigh in links:
# skip if comparing link to itself
if key == neigh:
continue
# if link(1) and link(2) share any vertex
# update neighbors adjacency
if (
key[0] == neigh[0]
or key[0] == neigh[1]
or key[1] == neigh[0]
or key[1] == neigh[1]
):
neighbors[key].append(neigh)
# and add weights if desired
if weightings:
_weights[key].append(weightings[neigh])
# break condition
# -- everything is sorted, so we know when we have
# stepped beyond a possible neighbor
if key[1] > neigh[1]:
working = False
if len(links) == 1 or from_split:
working = False
# call libpysal for `W` instance
weights_kws["weights"] = _weights
w = weights.W(neighbors, **weights_kws)
return w
def distancebandweights(self, threshold, n_processes=1, gen_tree=False):
"""Create distance-based weights.
Parameters
----------
threshold : float
Distance threshold value.
n_processes : {int, str}
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path with ``True``, or skip with ``False``.
Default is ``False``.
Returns
-------
w : libpysal.weights.W
A ``W`` object representing the binary adjacency of
the network.
Notes
-----
See :cite:`AnselinRey2014` and :cite:`rey_open_2015` for more details
regarding spatial weights.
See also
--------
libpysal.weights.W
Examples
--------
Instantiate an instance of a network.
>>> import spaghetti
>>> from libpysal import examples
>>> streets_file = examples.get_path("streets.shp")
>>> ntw = spaghetti.Network(in_data=streets_file)
Create a contiguity-based ``W`` object based on network distance, ``500``
`US feet in this case <https://github.com/pysal/libpysal/blob/master/libpysal/examples/geodanet/streets.prj>`_.
>>> w = ntw.distancebandweights(threshold=500)
Show the number of units in the ``W`` object.
>>> w.n
230
There are ``8`` units with ``3`` neighbors in the ``W`` object.
>>> w.histogram[-1]
(8, 3)
"""
# if the a vertex-to-vertex network distance matrix is
# not present in the `network.Network` object; calculate
# one at this point
if not hasattr(self, "distance_matrix"):
self.full_distance_matrix(n_processes, gen_tree=gen_tree)
# identify all network vertices which are within the
# `threshold` parameter
neighbor_query = numpy.where(self.distance_matrix < threshold)
# create an instance for recording neighbors which
# inserts a new key if not present in object
neighbors = defaultdict(list)
# iterate over neighbors within the `threshold`
# and record all network vertices as neighbors
# if the vertex is not being compared to itself
for i, n in enumerate(neighbor_query[0]):
neigh = neighbor_query[1][i]
if n != neigh:
neighbors[n].append(neigh)
# call libpysal for `W` instance
w = weights.W(neighbors)
return w
def snapobservations(self, in_data, name, idvariable=None, attribute=False):
"""Snap a point pattern shapefile to a network object. The
point pattern is stored in the ``network.pointpattern``
attribute of the network object.
Parameters
----------
in_data : {geopandas.GeoDataFrame, str}
The input geographic data. Either (1) a path to a
shapefile (str); or (2) a ``geopandas.GeoDataFrame``.
name : str
Name to be assigned to the point dataset.
idvariable : str
Column name to be used as the ID variable.
attribute : bool
Defines whether attributes should be extracted. ``True`` for
attribute extraction. ``False`` for no attribute extraction.
Default is ``False``.
Notes
-----
See :cite:`doi:10.1111/gean.12211` for a detailed discussion on
the modeling consequences of snapping points to spatial networks.
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> streets_file = examples.get_path("streets.shp")
>>> ntw = spaghetti.Network(in_data=streets_file)
Snap observations to the network.
>>> pt_str = "crimes"
>>> in_data = examples.get_path(pt_str+".shp")
>>> ntw.snapobservations(in_data, pt_str, attribute=True)
Isolate the number of points in the dataset.
>>> ntw.pointpatterns[pt_str].npoints
287
"""
# create attribute of `pointpattern` but instantiating a
# `network.PointPattern` class
self.pointpatterns[name] = PointPattern(
in_data=in_data, idvariable=idvariable, attribute=attribute
)
# allocate the point observations to the nework
self._snap_to_link(self.pointpatterns[name])
def compute_distance_to_vertices(self, x, y, arc):
"""Given an observation on a network arc, return the distance
to the two vertices that bound that end.
Parameters
----------
x : float
The x-coordinate of the snapped point.
y : float
The y-coordinate of the snapped point.
arc : tuple
The (vtx0, vtx1) representation of the network arc.
Returns
-------
d1 : float
The distance to vtx0. Always the vertex with the lesser ID.
d2 : float
The distance to vtx1. Always the vertex with the greater ID.
"""
# distance to vertex 1
d1 = util.compute_length((x, y), self.vertex_coords[arc[0]])
# distance to vertex 2
d2 = util.compute_length((x, y), self.vertex_coords[arc[1]])
return d1, d2
def compute_snap_dist(self, pattern, idx):
"""Given an observation snapped to a network arc, calculate the
distance from the original location to the snapped location.
Parameters
-----------
pattern : spaghetti.PointPattern
The point pattern object.
idx : int
The point ID.
Returns
-------
dist : float
The euclidean distance from original location to the snapped
location.
"""
# set of original (x,y) point coordinates
loc = pattern.points[idx]["coordinates"]
# set of snapped (x,y) point coordinate
snp = pattern.snapped_coordinates[idx]
# distance from the original location to
# the snapped location along the network
dist = util.compute_length(loc, snp)
return dist
def _snap_to_link(self, pointpattern):
"""Used internally to snap point observations to network arcs.
Parameters
-----------
pointpattern : spaghetti.PointPattern
The point pattern object.
Returns
-------
obs_to_arc : dict
Dictionary with arcs as keys and lists of points as values.
arc_to_obs : dict
Dictionary with point IDs as keys and arc tuples as values.
dist_to_vertex : dict
Dictionary with point IDs as keys and values as dictionaries
with keys for vertex IDs and values as distances from point
to vertex.
dist_snapped : dict
Dictionary with point IDs as keys and distance from point
to the network arc that it is snapped.
"""
# instantiate observations snapped coordinates lookup
pointpattern.snapped_coordinates = {}
# record throw-away arcs (pysal.cg.Chain) enumerator
arcs_ = []
# snapped(point)-to-arc lookup
s2a = {}
# iterate over network arc IDs
for arc in self.arcs:
# record the start and end of the arc
head = self.vertex_coords[arc[0]]
tail = self.vertex_coords[arc[1]]
# create a pysal.cg.Chain object of the arc
# and add it to the arcs enumerator
arcs_.append(util._chain_constr(None, [head, tail]))
# add the arc into the snapped(point)-to-arc lookup
s2a[(head, tail)] = arc
# instantiate crosswalks
points = {} # point ID to coordinates lookup
obs_to_arc = {} # observations to arcs lookup
dist_to_vertex = {} # distance to vertices lookup
dist_snapped = {} # snapped distance lookup
# fetch and records point coordinates keyed by ID
for point_idx, point in pointpattern.points.items():
points[point_idx] = point["coordinates"]
# snap point observations to the network
snapped = util.snap_points_to_links(points, arcs_)
# record obs_to_arc, dist_to_vertex, and dist_snapped
# -- iterate over the snapped observation points
for point_idx, snap_info in snapped.items():
# fetch the x and y coordinate
x, y = snap_info[1].tolist()
# look up the arc from snapped(point)-to-arc
arc = s2a[tuple(snap_info[0])]
# add the arc key to observations to arcs lookup
if arc not in obs_to_arc:
obs_to_arc[arc] = {}
# add the (x,y) coordinates of the original observation
# point location to the observations to arcs lookup
obs_to_arc[arc][point_idx] = (x, y)
# add the (x,y) coordinates of the snapped observation
# point location to the snapped coordinates lookup
pointpattern.snapped_coordinates[point_idx] = (x, y)
# calculate the distance to the left and right vertex
# along the network link from the snapped point location
d1, d2 = self.compute_distance_to_vertices(x, y, arc)
# record the distances in the distance to vertices lookup
dist_to_vertex[point_idx] = {arc[0]: d1, arc[1]: d2}
# record the snapped distance
dist_snapped[point_idx] = self.compute_snap_dist(pointpattern, point_idx)
# instantiate observations to network vertex lookup
obs_to_vertex = defaultdict(list)
# iterate over the observations to arcs lookup
for k, v in obs_to_arc.items():
# record the left and right vertex ids
keys = v.keys()
obs_to_vertex[k[0]] = keys
obs_to_vertex[k[1]] = keys
# iterate over components and assign observations
component_to_obs = {}
for comp, _arcids in self.network_component2arc.items():
component_to_obs[comp] = []
for lk, odict in obs_to_arc.items():
if lk in _arcids:
component_to_obs[comp].extend(list(odict.keys()))
# set crosswalks as attributes of the `pointpattern` class
pointpattern.obs_to_arc = obs_to_arc
pointpattern.component_to_obs = component_to_obs
pointpattern.dist_to_vertex = dist_to_vertex
pointpattern.dist_snapped = dist_snapped
pointpattern.obs_to_vertex = list(obs_to_vertex)
def count_per_link(self, obs_on, graph=False):
"""Compute the counts per arc or edge (link).
Parameters
----------
obs_on : dict
Dictionary of observations on the network.
Either in the form ``{(<LINK>):{<POINT_ID>:(<COORDS>)}}``
or ``{<LINK>:[(<COORD>),(<COORD>)]}``.
graph : bool
Count observations on graph edges (``True``) or
network arcs (``False``). Default is ``False``.
Returns
-------
counts : dict
Counts per network link in the form ``{(<LINK>):<COUNT>}``.
Examples
--------
Note that this passes the ``obs_to_arc`` or ``obs_to_edge`` attribute
of a point pattern snapped to the network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
>>> counts = ntw.count_per_link(
... ntw.pointpatterns["crimes"].obs_to_arc, graph=False
... )
>>> counts[(140, 142)]
10
>>> s = sum([v for v in list(counts.values())])
>>> s
287
"""
# instantiate observation counts by link lookup
counts = {}
# graph-theoretic object of nodes and edges
if graph:
# iterate the links-to-observations lookup
for key, observations in obs_on.items():
# isolate observation count for the link
cnt = len(observations)
# extract link (edges) key
if key in self.arcs_to_edges.keys():
key = self.arcs_to_edges[key]
# either add to current count or a dictionary
# entry or create new dictionary entry
try:
counts[key] += cnt
except KeyError:
counts[key] = cnt
# network object of arcs and vertices
else:
# simplified version of the above process
for key in obs_on.keys():
counts[key] = len(obs_on[key])
return counts
def _newpoint_coords(self, arc, distance):
"""Used internally to compute new point coordinates during snapping."""
# extract coordinates for vertex 1 of arc
x1 = self.vertex_coords[arc[0]][0]
y1 = self.vertex_coords[arc[0]][1]
# extract coordinates for vertex 2 of arc
x2 = self.vertex_coords[arc[1]][0]
y2 = self.vertex_coords[arc[1]][1]
# if the network arc is vertical set the (x) coordinate
# and proceed to calculating the (y) coordinate
if x1 == x2:
x0 = x1
# if the vertical direction is positive from
# vertex 1 to vertex 2 on the euclidean plane
if y1 < y2:
y0 = y1 + distance
# if the vertical direction is negative from
# vertex 1 to vertex 2 on the euclidean plane
# -- this shouldn't happen due to vertex sorting in
# -- self._extractnetwork() and self.extractgraph()
elif y1 > y2:
y0 = y2 + distance
# otherwise the link is zero-length
# -- this should never happen
else:
y0 = y1
return x0, y0
# calculate the slope of the arc, `m`
m = (y2 - y1) / (x2 - x1)
# if the horizontal direction is negative from
# vertex 1 to vertex 2 on the euclidean plane
if x1 > x2:
x0 = x1 - distance / numpy.sqrt(1 + m ** 2)
# if the horizontal direction is positive from
# vertex 1 to vertex 2 on the euclidean plane
elif x1 < x2:
x0 = x1 + distance / numpy.sqrt(1 + m ** 2)
# calculate the (y) coordinate
y0 = m * (x0 - x1) + y1
# the new (x,y) coordinates for the snapped observation
return x0, y0
def simulate_observations(self, count, distribution="uniform"):
"""Generate a simulated point pattern on the network.
Parameters
----------
count : int
The number of points to create.
distribution : str
A distribution of random points. Currently, the only
supported distribution is uniform.
Returns
-------
random_pts : dict
Keys are the edge tuple. Values are lists of new point coordinates.
See also
--------
numpy.random.Generator.uniform
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
Isolate the number of points in the dataset.
>>> npts = ntw.pointpatterns["crimes"].npoints
>>> npts
287
Simulate ``npts`` number of points along the network
in a `uniform` distribution.
>>> sim = ntw.simulate_observations(npts)
>>> isinstance(sim, spaghetti.network.SimulatedPointPattern)
True
>>> sim.npoints
287
"""
# instantiate an empty `SimulatedPointPattern()`
simpts = SimulatedPointPattern()
# record throw-away arcs enumerator
arcs_ = []
# create array and fill each entry as length of network arc
lengths = numpy.zeros(len(self.arc_lengths))
for i, key in enumerate(self.arc_lengths.keys()):
arcs_.append(key)
lengths[i] = self.arc_lengths[key]
# cumulative network length
stops = numpy.cumsum(lengths)
cumlen = stops[-1]
# create lengths with a uniform distribution
if distribution.lower() == "uniform":
nrandompts = numpy.random.uniform(0, cumlen, size=(count,))
else:
msg = "%s distribution not currently supported." % distribution
raise RuntimeError(msg)
# iterate over random distances created above
for i, r in enumerate(nrandompts):
# take the first element of the index array (arc ID) where the
# random distance is greater than that of its value in `stops`
idx = numpy.where(r < stops)[0][0]
# assign the simulated point to the arc
assignment_arc = arcs_[idx]
# calculate and set the distance from the arc start
distance_from_start = stops[idx] - r
# populate the coordinates dict
x0, y0 = self._newpoint_coords(assignment_arc, distance_from_start)
# record the snapped coordinates and associated vertices
simpts.snapped_coordinates[i] = (x0, y0)
simpts.obs_to_vertex[assignment_arc[0]].append(i)
simpts.obs_to_vertex[assignment_arc[1]].append(i)
# calculate and set the distance from the arc end
distance_from_end = self.arc_lengths[arcs_[idx]] - distance_from_start
# populate the distances to vertices
simpts.dist_to_vertex[i] = {
assignment_arc[0]: distance_from_start,
assignment_arc[1]: distance_from_end,
}
# set snapped coordinates and point count attributes
simpts.points = simpts.snapped_coordinates
simpts.npoints = len(simpts.points)
return simpts
def enum_links_vertex(self, v0):
"""Returns the arcs (links) adjacent to vertices.
Parameters
-----------
v0 : int
The vertex ID.
Returns
-------
links : list
List of tuple arcs adjacent to the vertex.
Examples
--------
Create an instance of a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Enumerate the links/arcs that are adjacent to vertex ``24``.
>>> ntw.enum_links_vertex(24)
[(24, 48), (24, 25), (24, 26)]
"""
# instantiate links list
links = []
neighbor_vertices = self.adjacencylist[v0]
# enumerate links associated with the current vertex
for n in neighbor_vertices:
links.append(tuple(sorted([n, v0])))
return links
def full_distance_matrix(self, n_processes, gen_tree=False):
"""All vertex-to-vertex distances on a network. This method
is called from within ``allneighbordistances()``,
``nearestneighbordistances()``, and ``distancebandweights()``.
Parameters
-----------
n_processes : int
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path ``True``, or skip ``False``.
Default is ``False``.
Notes
-----
Based on :cite:`Dijkstra1959a` and :cite:`doi:10.1002/9781119967101.ch3`.
"""
# create an empty matrix which will store shortest path distance
nvtx = len(self.vertex_list)
self.distance_matrix = numpy.empty((nvtx, nvtx))
# create `network_trees` attribute that stores
# all network path trees (if desired)
self.network_trees = {}
# single-core processing
if n_processes == 1:
# iterate over each network vertex
for vtx in self.vertex_list:
# calculate the shortest path and preceding
# vertices for traversal route
distance, pred = util.dijkstra(self, vtx)
pred = numpy.array(pred)
# generate the shortest path tree
if gen_tree:
tree = util.generatetree(pred)
else:
tree = None
# populate distances and paths
self.distance_matrix[vtx] = distance
self.network_trees[vtx] = tree
# multiprocessing
else:
# set up multiprocessing schema
import multiprocessing as mp
from itertools import repeat
if n_processes == "all":
cores = mp.cpu_count()
else:
cores = n_processes
p = mp.Pool(processes=cores)
# calculate the shortest path and preceding
# vertices for traversal route by mapping each process
distance_pred = p.map(util.dijkstra_mp, zip(repeat(self), self.vertex_list))
# set range of iterations
iterations = range(len(distance_pred))
# fill shortest paths
distance = [distance_pred[itr][0] for itr in iterations]
# fill preceding vertices
pred = numpy.array([distance_pred[itr][1] for itr in iterations])
# iterate of network vertices and generate
# the shortest path tree for each
for vtx in self.vertex_list:
if gen_tree:
tree = util.generatetree(pred[vtx])
else:
tree = None
# populate distances and paths
self.distance_matrix[vtx] = distance[vtx]
self.network_trees[vtx] = tree
def allneighbordistances(
self,
sourcepattern,
destpattern=None,
fill_diagonal=None,
n_processes=1,
gen_tree=False,
snap_dist=False,
):
"""Compute either all distances between :math:`i` and :math:`j` in a
single point pattern or all distances between each :math:`i` from a
source pattern and all :math:`j` from a destination pattern.
Parameters
----------
sourcepattern : {str, spaghetti.PointPattern}
The key of a point pattern snapped to the network or
the full ``spaghetti.PointPattern`` object.
destpattern : str
(Optional) The key of a point pattern snapped to the network
or the full ``spaghetti.PointPattern`` object.
fill_diagonal : {float, int}
(Optional) Fill the diagonal of the cost matrix. Default is
``None`` and will populate the diagonal with ``numpy.nan``.
Do not declare a ``destpattern`` for a custom
``fill_diagonal``.
n_processes : {int, str}
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path ``True``, or skip ``False``.
Default is ``False``.
snap_dist : bool
Flag as ``True`` to include the distance from the original
location to the snapped location along the network. Default
is ``False``.
Returns
-------
nearest : numpy.ndarray
An array of shape (n,m) storing distances between all
source and destination points.
tree_nearest : dict
Nearest network node to point pattern vertex shortest
path lookup. The values of the dictionary are a tuple
of the nearest source vertex and the nearest destination
vertex to query the lookup tree. If two observations are
snapped to the same network arc a flag of -.1 is set for
both the source and destination network vertex
indicating the same arc is used while also raising an
``IndexError`` when rebuilding the path.
Examples
--------
Create a network instance.
>>> import spaghetti
>>> from libpysal import examples
>>> import numpy
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(
... examples.get_path("crimes.shp"), "crimes", attribute=True
... )
Calculate all distances between observations in the ``crimes`` dataset.
>>> s2s_dist = ntw.allneighbordistances("crimes")
If calculating a ``type-a`` to ``type-a`` distance matrix
the distance between an observation and itself is ``nan`` and
the distance between one observation and another will be positive value.
>>> s2s_dist[0,0], s2s_dist[1,0]
(nan, 3105.189475447081)
If calculating a ``type-a`` to ``type-b`` distance matrix
the distance between all observations will likely be positive
values, may be zero (or approximately zero), but will never be negative.
>>> ntw.snapobservations(
... examples.get_path("schools.shp"), "schools", attribute=False
... )
>>> s2d_dist = ntw.allneighbordistances("crimes", destpattern="schools")
>>> numpy.round((s2d_dist[0,0], s2d_dist[1,0]), 5)
array([4520.72354, 6340.42297])
Shortest paths can also be reconstructed when desired by
setting the ``gen_tree`` keyword argument to ``True``. Here
it is shown that the shortest path between school ``6`` and
school ``7`` flows along network arcs through network
vertices ``173`` and ``64``. The ``ntw.network_trees`` attribute
may then be queried for the network elements comprising that path.
>>> d2d_dist, tree = ntw.allneighbordistances("schools", gen_tree=True)
>>> tree[(6, 7)]
(173, 64)
"""
# calculate the network vertex to vertex distance matrix
# if it is not already an attribute
if not hasattr(self, "distance_matrix"):
self.full_distance_matrix(n_processes, gen_tree=gen_tree)
# set the source and destination observation point patterns
if type(sourcepattern) is str:
sourcepattern = self.pointpatterns[sourcepattern]
if destpattern:
destpattern = self.pointpatterns[destpattern]
# source pattern setup
# set local copy of source pattern index
src_indices = list(sourcepattern.points.keys())
# set local copy of source distance to vertex lookup
src_d2v = copy.deepcopy(sourcepattern.dist_to_vertex)
# source point count
nsource_pts = len(src_indices)
# create source point to network vertex lookup
src_vertices = {}
for s in src_indices:
v1, v2 = src_d2v[s].keys()
src_vertices[s] = (v1, v2)
# destination pattern setup
# if only a source pattern is specified, also set it as
# the destination pattern
symmetric = False
if destpattern is None:
symmetric = True
destpattern = sourcepattern
# set local copy of destination pattern index
dest_indices = list(destpattern.points.keys())
# set local copy of destination distance to vertex lookup
dst_d2v = copy.deepcopy(destpattern.dist_to_vertex)
# destination point count
ndest_pts = len(dest_indices)
# create `deepcopy` of destination points to
# consider for searching
dest_searchpts = copy.deepcopy(dest_indices)
# create destination point to network vertex lookup
dest_vertices = {}
for s in dest_indices:
v1, v2 = dst_d2v[s].keys()
dest_vertices[s] = (v1, v2)
# add snapping distance to each pointpattern
if snap_dist:
# declare both point patterns and both
# distance to vertex lookup in single lists
patterns = [sourcepattern, destpattern]
dist_copies = [src_d2v, dst_d2v]
# iterate over each point pattern
for elm, pp in enumerate(patterns):
# extract associated vertex distances
for pidx, dists_dict in dist_copies[elm].items():
# add snapped distance to each point
for vidx, vdist in dists_dict.items():
dists_dict[vidx] = vdist + pp.dist_snapped[pidx]
# output setup
# create empty source x destination array
# and fill with infinity values
nearest = numpy.empty((nsource_pts, ndest_pts))
nearest[:] = numpy.inf
# create empty dictionary to store path trees
tree_nearest = {}
# iterate over each point in sources
for p1 in src_indices:
# get the source vertices and dist to source vertices
source1, source2 = src_vertices[p1]
set1 = set(src_vertices[p1])
# distance from source vertex1 to point and
# distance from source vertex2 to point
sdist1, sdist2 = src_d2v[p1].values()
if symmetric:
# only compute the upper triangle if symmetric
dest_searchpts.remove(p1)
# iterate over each point remaining in destinations
for p2 in dest_searchpts:
# get the destination vertices and
# dist to destination vertices
dest1, dest2 = dest_vertices[p2]
set2 = set(dest_vertices[p2])
# when the observations are snapped to the same arc
if set1 == set2:
# calculate only the length between points along
# that arc
x1, y1 = sourcepattern.snapped_coordinates[p1]
x2, y2 = destpattern.snapped_coordinates[p2]
computed_length = util.compute_length((x1, y1), (x2, y2))
nearest[p1, p2] = computed_length
# set the nearest network vertices to a flag of -.1
# indicating the same arc is used while also raising
# and indexing error when rebuilding the path
tree_nearest[p1, p2] = SAME_SEGMENT
# otherwise lookup distance between the source and
# destination vertex
else:
# distance from destination vertex1 to point and
# distance from destination vertex2 to point
ddist1, ddist2 = dst_d2v[p2].values()
# set the four possible combinations of
# source to destination shortest path traversal
d11 = self.distance_matrix[source1][dest1]
d21 = self.distance_matrix[source2][dest1]
d12 = self.distance_matrix[source1][dest2]
d22 = self.distance_matrix[source2][dest2]
# find the shortest distance from the path passing
# through each of the two origin vertices to the
# first destination vertex
sd_1 = d11 + sdist1
sd_21 = d21 + sdist2
sp_combo1 = source1, dest1
if sd_1 > sd_21:
sd_1 = sd_21
sp_combo1 = source2, dest1
# now add the point to vertex1 distance on
# the destination arc
len_1 = sd_1 + ddist1
# repeat the prior but now for the paths entering
# at the second vertex of the second arc
sd_2 = d12 + sdist1
sd_22 = d22 + sdist2
sp_combo2 = source1, dest2
if sd_2 > sd_22:
sd_2 = sd_22
sp_combo2 = source2, dest2
len_2 = sd_2 + ddist2
# now find the shortest distance path between point
# 1 on arc 1 and point 2 on arc 2, and assign
sp_12 = len_1
s_vertex, d_vertex = sp_combo1
if len_1 > len_2:
sp_12 = len_2
s_vertex, d_vertex = sp_combo2
# set distance and path tree
nearest[p1, p2] = sp_12
tree_nearest[p1, p2] = (s_vertex, d_vertex)
if symmetric:
# mirror the upper and lower triangle
# when symmetric
nearest[p2, p1] = nearest[p1, p2]
# populate the main diagonal when symmetric
if symmetric:
# fill the matrix diagonal with NaN values is no fill
# value is specified
if fill_diagonal is None:
numpy.fill_diagonal(nearest, numpy.nan)
# otherwise fill with specified value
else:
numpy.fill_diagonal(nearest, fill_diagonal)
# if the nearest path tree is desired return it along
# with the cost matrix
if gen_tree:
return nearest, tree_nearest
else:
return nearest
def nearestneighbordistances(
self,
sourcepattern,
destpattern=None,
n_processes=1,
gen_tree=False,
all_dists=None,
snap_dist=False,
keep_zero_dist=True,
):
"""Compute the interpattern nearest neighbor distances or the
intrapattern nearest neighbor distances between a source
pattern and a destination pattern.
Parameters
----------
sourcepattern : str
The key of a point pattern snapped to the network.
destpattern : str
(Optional) The key of a point pattern snapped to the
network.
n_processes : {int, str}
Specify the number of cores to utilize. Default is 1 core.
Use ``"all"`` to request all available cores.
Specify the exact number of cores with an integer.
gen_tree : bool
Rebuild shortest path ``True``, or skip ``False``.
Default is ``False``.
all_dists : numpy.ndarray
An array of shape :math:`(n,n)` storing distances between all
points.
snap_dist : bool
Flag as ``True`` to include the distance from the original
location to the snapped location along the network. Default
is ``False``.
keep_zero_dist : bool
Include zero values in minimum distance ``True`` or exclude
``False``. Default is ``True``. If the source pattern is the
same as the destination pattern the diagonal is filled with
``numpy.nan``.
Returns
-------
nearest : dict
Nearest neighbor distances keyed by the source point ID with
the value as as tuple of lists containing
nearest destination point ID(s) and distance.
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(examples.get_path("crimes.shp"), "crimes")
Fetch nearest neighbor distances while (potentially)
keeping neighbors that have been geocoded directly on top of
each other. Here it is demonstrated that observation ``11``
has two neighbors (``18`` and ``19``) at an exactly equal distance.
However, observation ``18`` is shown to have only one neighbor
(``18``) with no distance between them.
>>> nn = ntw.nearestneighbordistances("crimes", keep_zero_dist=True)
>>> nn[11], nn[18]
(([18, 19], 165.33982412719126), ([19], 0.0))
This may be remedied by setting the ``keep_zero_dist`` keyword
argument to ``False``. With this parameter set, observation ``11``
still has the same neighbor/distance values, but
observation ``18`` now has a single nearest neighbor (``11``)
with a non-zero, postive distance.
>>> nn = ntw.nearestneighbordistances("crimes", keep_zero_dist=False)
>>> nn[11], nn[18]
(([18, 19], 165.33982412719126), ([11], 165.33982412719126))
There are valid reasons for both retaining or masking zero distance
neighbors. When conducting analysis, thought must be given as to
which model more accurately represents the specific scenario.
"""
# raise exception is the specified point pattern does not exist
if sourcepattern not in self.pointpatterns.keys():
err_msg = "Available point patterns are {}"
raise KeyError(err_msg.format(self.pointpatterns.keys()))
# calculate the network vertex to vertex distance matrix
# if it is not already an attribute
if not hasattr(self, "distance_matrix"):
self.full_distance_matrix(n_processes, gen_tree=gen_tree)
# determine if the source and destination patterns are equal
symmetric = sourcepattern != destpattern
# (for source-to-source patterns) if zero-distance neighbors are
# desired, keep the diagonal as NaN and take the minimum
# distance neighbor(s), which may include zero distance
# neighors.
fill_diagonal = None
if not keep_zero_dist and symmetric:
# (for source-to-source patterns) if zero-distance neighbors
# should be ignored, convert the diagonal to 0.0 and take
# the minimum distance neighbor(s) that is/are not 0.0
# distance.
fill_diagonal = 0.0
# set the source and destination observation point patterns
sourcepattern = self.pointpatterns[sourcepattern]
if destpattern:
destpattern = self.pointpatterns[destpattern]
# if the full source to destination is not calculated,
# do that at this time
if all_dists is None:
all_dists = self.allneighbordistances(
sourcepattern,
destpattern=destpattern,
fill_diagonal=fill_diagonal,
n_processes=n_processes,
gen_tree=gen_tree,
snap_dist=snap_dist,
)
# create empty nearest neighbors lookup
nearest = {}
# iterate over each source point
for source_index in sourcepattern.points.keys():
# this considers all zero-distance neighbors
if keep_zero_dist and symmetric:
val = numpy.nanmin(all_dists[source_index, :])
# this does not consider zero-distance neighbors
else:
val = numpy.min(
all_dists[source_index, :][
numpy.nonzero(all_dists[source_index, :])
]
)
# nearest destination (may be more than one if
# observations are equal distances away)
dest_idxs = numpy.where(all_dists[source_index, :] == val)[0].tolist()
# set nearest destination point(s) and distance
nearest[source_index] = (dest_idxs, val)
return nearest
def shortest_paths(self, tree, pp_orig, pp_dest=None, n_processes=1):
"""Return the shortest paths between observation points as
``libpysal.cg.Chain`` objects.
Parameters
----------
tree : dict
See ``tree_nearest`` in
``spaghetti.Network.allneighbordistances()``.
pp_orig : str
Origin point pattern for shortest paths.
See ``name`` in ``spaghetti.Network.snapobservations()``.
pp_dest : str
Destination point pattern for shortest paths.
See ``name`` in ``spaghetti.Network.snapobservations()``.
Defaults ``pp_orig`` if not declared.
n_processes : int
See ``n_processes`` in ``spaghetti.Network.full_distance_matrix()``.
Returns
-------
paths : list
The shortest paths between observations as geometric objects.
Each element of the list is a list where the first element
is an origin-destination pair tuple and the second
element is a ``libpysal.cg.Chain``.
Raises
------
AttributeError
This exception is raised when an attempt to extract shortest
path geometries is being made that but the ``network_trees``
attribute does not exist within the network object.
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Snap observations to the network.
>>> ntw.snapobservations(examples.get_path("schools.shp"), "schools")
Create shortest path trees between observations.
>>> _, tree = ntw.allneighbordistances("schools", gen_tree=True)
Generate geometric objects from trees.
>>> paths = ntw.shortest_paths(tree, "schools")
Extract the first path, which is between observations
``0`` and ``1``.
>>> path = paths[0]
>>> path[0]
(0, 1)
The are ``n`` vertices in the path between observations
``0`` and ``1``.
>>> n = len(path[1].vertices)
>>> n
10
"""
# build the network trees object if it is not already an attribute
if not hasattr(self, "network_trees"):
msg = "The 'network_trees' attribute has not been created. "
msg += "Rerun 'spaghetti.Network.allneighbordistances()' "
msg += "with the 'gen_tree' parameter set to 'True'."
raise AttributeError(msg)
# isolate network attributes
pp_orig = self.pointpatterns[pp_orig]
if pp_dest:
pp_dest = self.pointpatterns[pp_dest]
else:
pp_dest = pp_orig
vtx_coords = self.vertex_coords
net_trees = self.network_trees
# instantiate a list to store paths
paths = []
# iterate over each path in the tree
for idx, ((obs0, obs1), (v0, v1)) in enumerate(tree.items()):
# if the observations share the same segment
# create a partial segment path
if (v0, v1) == SAME_SEGMENT:
# isolate the snapped coordinates and put in a list
partial_segment_verts = [
cg.Point(pp_orig.snapped_coordinates[obs0]),
cg.Point(pp_dest.snapped_coordinates[obs1]),
]
path = partial_segment_verts
else:
# source and destination network vertices
svtx, dvtx = tree[obs0, obs1]
# path passes through these nodes
# (source and destination inclusive)
thru_nodes = net_trees[svtx][dvtx][::-1] + [dvtx]
# full-length network segments along path
full_segs_path = []
iter_limit = len(thru_nodes) - 1
for _idx, item in enumerate(islice(thru_nodes, iter_limit)):
full_segs_path.append((item, thru_nodes[_idx + 1]))
# create copy of arc paths dataframe
full_segments = []
for fsp in full_segs_path:
full_segments.append(util._chain_constr(vtx_coords, fsp))
# unpack the vertices containers
segm_verts = [v for fs in full_segments for v in fs.vertices]
# remove duplicate vertices
for idx, v in enumerate(segm_verts):
try:
if v == segm_verts[idx + 1]:
segm_verts.remove(v)
except IndexError as e:
if e.args[0] == "list index out of range":
continue
else:
raise
# partial-length network segments along path
partial_segment_verts = [
cg.Point(pp_orig.snapped_coordinates[obs0]),
cg.Point(pp_dest.snapped_coordinates[obs1]),
]
# combine the full and partial segments into a single list
first_vtx, last_vtx = partial_segment_verts
path = [first_vtx] + segm_verts + [last_vtx]
# populate the ``paths`` dataframe
paths.append([(obs0, obs1), util._chain_constr(None, path)])
return paths
def split_arcs(self, split_param, split_by="distance", w_components=True):
"""Split all network arcs at either a fixed distance or fixed count.
Parameters
-----------
split_param : {int, float}
Either the number of desired resultant split arcs or
the distance at which arcs are split.
split_by : str
Either ``'distance'`` or ``'count'``. Default is ``'distance'``.
w_components : bool
Set to ``False`` to not record connected components from a
``libpysal.weights.W`` object. Default is ``True``.
Returns
-------
split_network : spaghetti.Network
A newly instantiated ``spaghetti.Network`` object.
Examples
--------
Instantiate a network.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Split the network into a segments of 200 distance units in length
(`US feet in this case <https://github.com/pysal/libpysal/blob/master/libpysal/examples/geodanet/streets.prj>`_.).
This will include "remainder" segments unless the network is
comprised of arcs with lengths exactly divisible by ``distance``.
>>> n200 = ntw.split_arcs(200.0)
>>> len(n200.arcs)
688
The number of arcs within the new object can be accessed via the
weights object, as well. These counts will be equal.
>>> len(n200.arcs) == n200.w_network.n
True
Neighboring arcs can also be queried through the weight object.
>>> n200.w_network.neighbors[72,392]
[(71, 72), (72, 252), (72, 391), (392, 393)]
Network arcs can also be split by a specified number of divisions with
the ``split_by`` keyword set to ``'count'``, which is ``'distance'`` by
default. For example, each arc can be split into 2 equal parts.
>>> n2 = ntw.split_arcs(2, split_by="count")
>>> len(n2.arcs)
606
"""
# catch invalid split types
split_by = split_by.lower()
valid_split_types = ["distance", "count"]
if split_by not in valid_split_types:
msg = f"'{split_by}' is not a valid value for 'split_by'. "
msg += f"Valid arguments include: {valid_split_types}."
raise ValueError(msg)
# catch invalid count params
if split_by == "count":
if split_param <= 1:
msg = "Splitting arcs by 1 or less is not possible. "
msg += f"Currently 'split_param' is set to {split_param}."
raise ValueError(msg)
split_integer = int(split_param)
if split_param != split_integer:
msg = "Network arcs must split by an integer. "
msg += f"Currently 'split_param' is set to {split_param}."
raise TypeError(msg)
# convert coordinates for integers if possible
# e.g., (1.0, 0.5) --> (1, 0.5)
int_coord = lambda c: int(c) if (type(c) == float and c.is_integer()) else c
# create new shell network instance
split_network = Network()
# duplicate input network attributes
split_network.adjacencylist = copy.deepcopy(self.adjacencylist)
split_network.arc_lengths = copy.deepcopy(self.arc_lengths)
split_network.arcs = copy.deepcopy(self.arcs)
split_network.vertex_coords = copy.deepcopy(self.vertex_coords)
split_network.vertex_list = copy.deepcopy(self.vertex_list)
split_network.vertices = copy.deepcopy(self.vertices)
split_network.pointpatterns = copy.deepcopy(self.pointpatterns)
split_network.in_data = self.in_data
# set vertex ID to start iterations
current_vertex_id = max(self.vertices.values())
# instantiate sets for newly created network arcs and
# input network arcs to remove
new_arcs = set()
remove_arcs = set()
# iterate over all network arcs
for arc in split_network.arcs:
# fetch network arc length
length = split_network.arc_lengths[arc]
# set initial segmentation interval
if split_by == "distance":
interval = split_param
else:
interval = length / float(split_param)
# initialize arc new arc length at zero
totallength = 0
# initialize the current vertex and ending vertex
currentstart, end_vertex = arc[0], arc[1]
# determine direction of arc vertices
csx, csy = split_network.vertex_coords[currentstart]
evx, evy = split_network.vertex_coords[end_vertex]
if csy > evy and csx == evx:
currentstart, end_vertex = end_vertex, currentstart
# if the arc will be split remove the current
# arc from the adjacency list
if interval < length:
# remove old arc adjacency information
split_network.adjacencylist[currentstart].remove(end_vertex)
split_network.adjacencylist[end_vertex].remove(currentstart)
# remove old arc length information
split_network.arc_lengths.pop(arc, None)
# add old arc to set of arcs to remove
remove_arcs.add(arc)
# if the arc will not be split, do nothing and continue
else:
continue
# traverse the length of the arc
while totallength < length:
# once an arc can not be split further
if totallength + interval >= length:
# record the ending vertex
currentstop = end_vertex
# set the length remainder
interval = length - totallength
# full old length reached
totallength = length
else:
# set the current vertex ID
current_vertex_id += 1
# set the current stopping ID
currentstop = current_vertex_id
# add the interval distance to the traversed length
totallength += interval
# compute the new vertex coordinate
newx, newy = self._newpoint_coords(arc, totallength)
new_vertex = (int_coord(newx), int_coord(newy))
# update the vertex and coordinate info if needed
if new_vertex not in split_network.vertices.keys():
split_network.vertices[new_vertex] = currentstop
split_network.vertex_coords[currentstop] = new_vertex
split_network.vertex_list.append(currentstop)
else:
# retrieve vertex ID if coordinate already exists
current_vertex_id -= 1
currentstop = split_network.vertices[new_vertex]
# update the new network adjacency list
split_network.adjacencylist[currentstart].append(currentstop)
split_network.adjacencylist[currentstop].append(currentstart)
# add the new arc to the arc dictionary
# iterating over this so we need to add after iterating
_new_arc = tuple(sorted([currentstart, currentstop]))
new_arcs.add(_new_arc)
# set the length of the arc
split_network.arc_lengths[_new_arc] = interval
# increment the starting vertex to the stopping vertex
currentstart = currentstop
# add the newly created arcs to the network and remove the old arcs
split_network.arcs = set(split_network.arcs)
split_network.arcs.update(new_arcs)
split_network.arcs.difference_update(remove_arcs)
split_network.arcs = sorted(list(split_network.arcs))
# extract connected components
if w_components:
# extract contiguity weights from libpysal
split_network.w_network = split_network.contiguityweights(
graph=False, from_split=True
)
# identify connected components from the `w_network`
split_network.identify_components(split_network.w_network, graph=False)
# update the snapped point pattern
for instance in split_network.pointpatterns.values():
split_network._snap_to_link(instance)
return split_network
def GlobalAutoK(
self,
pointpattern,
nsteps=10,
permutations=99,
threshold=0.5,
distribution="uniform",
upperbound=None,
):
r"""Compute a global auto :math:`K`-function based on a network constrained
cost matrix through `Monte Carlo simulation <https://en.wikipedia.org/wiki/Monte_Carlo_method>`_
according to the formulation adapted from
:cite:`doi:10.1002/9780470549094.ch5`. See the **Notes**
section for further description.
Parameters
----------
pointpattern : spaghetti.PointPattern
A ``spaghetti`` point pattern object.
nsteps : int
The number of steps at which the count of the nearest
neighbors is computed. Default is ``10``.
permutations : int
The number of permutations to perform. Default is ``99``.
threshold : float
The level at which significance is computed.
(0.5 would be 97.5% and 2.5%). Default is ``0.5``.
distribution : str
The distribution from which random points are sampled.
Currently, the only supported distribution is ``'uniform'``.
upperbound : float
The upper bound at which the :math:`K`-function is computed.
Defaults to the maximum observed nearest neighbor distance.
Returns
-------
GlobalAutoK : spaghetti.analysis.GlobalAutoK
The global auto :math:`K`-function class instance.
Notes
-----
The :math:`K`-function can be formulated as:
.. math::
\displaystyle K(r)=\frac{\sum^n_{i=1} \#[\hat{A} \in D(a_i, r)]}{n\lambda},
where $n$ is the set cardinality of :math:`A`, :math:`\hat{A}` is the subset of
observations in :math:`A` that are within :math:`D` units of distance from :math:`a_i`
(each single observation in :math:`A`), and :math:`r` is the range of distance
values over which the :math:`K`-function is calculated. The :math:`\lambda` term
is the intensity of observations along the network, calculated as:
.. math::
\displaystyle \lambda = \frac{n}{\big|N_{arcs}\big|},
where :math:`\big|N_{arcs}\big|` is the summed length of network arcs.
The global auto :math:`K`-function measures overall clustering in one set of
observations by comparing all intra-set distances over a range of
distance buffers :math:`D \in r`. The :math:`K`-function improves upon
nearest-neighbor distance measures through the analysis of all neighbor
distances. For an explanation on how to interpret the results of the
:math:`K`-function see the `Network Spatial Dependence tutorial <https://pysal.org/spaghetti/notebooks/network-spatial-dependence.html>`_.
For original implementation see :cite:`Ripley1976`
and :cite:`Ripley1977`.
For further Network-`K` formulations see
:cite:`doi:10.1111/j.1538-4632.2001.tb00448.x`,
:cite:`doi:10.1002/9781119967101.ch6`, and
:cite:`Baddeley2020`.
See also
--------
pointpats.K
Examples
--------
Create a network instance.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(in_data=examples.get_path("streets.shp"))
Snap observation points onto the network.
>>> pt_str = "schools"
>>> in_data = examples.get_path(pt_str+".shp")
>>> ntw.snapobservations(in_data, pt_str, attribute=True)
>>> schools = ntw.pointpatterns[pt_str]
Compute a :math:`K`-function from school observations
with ``99`` ``permutations`` at ``10`` intervals.
>>> kres = ntw.GlobalAutoK(schools, permutations=99, nsteps=10)
>>> kres.lowerenvelope.shape[0]
10
"""
# call analysis.GlobalAutoK
return GlobalAutoK(
self,
pointpattern,
nsteps=nsteps,
permutations=permutations,
threshold=threshold,
distribution=distribution,
upperbound=upperbound,
)
def Moran(self, pp_name, permutations=999, graph=False):
"""Calculate a Moran's *I* statistic on a set of observations
based on network arcs. The Moran’s *I* test statistic allows
for the inference of how clustered (or dispersed) a dataset is
while considering both attribute values and spatial relationships.
A value of closer to +1 indicates absolute clustering while a
value of closer to -1 indicates absolute dispersion. Complete
spatial randomness takes the value of 0. See the
`esda documentation <https://pysal.org/esda/generated/esda.Moran.html#esda.Moran>`_
for in-depth descriptions and tutorials.
Parameters
----------
pp_name : str
The name of the point pattern in question.
permutations : int
The number of permutations to perform. Default is ``999``.
graph : bool
Perform the Moran calculation on the graph `W` object
(``True``). Default is ``False``, which performs the
Moran calculation on the network `W` object.
Returns
-------
moran : esda.Moran
A Moran's *I* statistic object results.
y : list
The y-axis (counts).
Examples
--------
Create a network instance.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(in_data=examples.get_path("streets.shp"))
Snap observation points onto the network.
>>> crimes = "crimes"
>>> in_data = examples.get_path(crimes+".shp")
>>> ntw.snapobservations(in_data, crimes, attribute=True)
Compute a Moran's :math:`I` from crime observations.
>>> moran_res, _ = ntw.Moran(crimes)
>>> round(moran_res.I, 6)
0.005193
Notes
-----
See :cite:`moran:_cliff81` and :cite:`esda:_2019` for more details.
"""
# set proper weights attribute
if graph:
w = self.w_graph
else:
w = self.w_network
# Compute the counts
pointpat = self.pointpatterns[pp_name]
counts = self.count_per_link(pointpat.obs_to_arc, graph=graph)
# Build the y vector
y = [counts[i] if i in counts else 0.0 for i in w.neighbors]
# Moran's I
moran = esda.moran.Moran(y, w, permutations=permutations)
return moran, y
def savenetwork(self, filename):
"""Save a network to disk as a binary file.
Parameters
----------
filename : str
The filename where the network should be saved. This should
be a full path or it will be saved in the current directory.
Examples
--------
Create a network instance.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Save out the network instance.
>>> ntw.savenetwork("mynetwork.pkl")
"""
with open(filename, "wb") as networkout:
pickle.dump(self, networkout, protocol=2)
@staticmethod
def loadnetwork(filename):
"""Load a network from a binary file saved on disk.
Parameters
----------
filename : str
The filename where the network is saved.
Returns
-------
self : spaghetti.Network
A pre-computed ``spaghetti`` network object.
"""
with open(filename, "rb") as networkin:
self = pickle.load(networkin)
return self
def extract_component(net, component_id, weightings=None):
"""Extract a single component from a network object.
Parameters
----------
net : spaghetti.Network
Full network object.
component_id : int
The ID of the desired network component.
weightings : {dict, bool}
See the ``weightings`` keyword argument in ``spaghetti.Network``.
Returns
-------
cnet : spaghetti.Network
The pruned network containing the component specified in
``component_id``.
Notes
-----
Point patterns are not reassigned when extracting a component. Therefore,
component extraction should be performed prior to snapping any point
sets onto the network. Also, if the ``spaghetti.Network`` object
has ``distance_matrix`` or ``network_trees`` attributes, they are
deleted and must be computed again on the single component.
Examples
--------
Instantiate a network object.
>>> from libpysal import examples
>>> import spaghetti
>>> snow_net = examples.get_path("Soho_Network.shp")
>>> ntw = spaghetti.Network(in_data=snow_net, extractgraph=False)
The network is not fully connected.
>>> ntw.network_fully_connected
False
Examine the number of network components.
>>> ntw.network_n_components
45
Extract the longest component.
>>> longest = spaghetti.extract_component(ntw, ntw.network_longest_component)
>>> longest.network_n_components
1
>>> longest.network_component_lengths
{0: 13508.169276875526}
"""
def _reassign(attr, cid):
"""Helper for reassigning attributes."""
# set for each attribute(s)
if attr == "_fully_connected":
_val = [True for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "_n_components":
_val = [1 for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr in ["_longest_component", "_largest_component"]:
_val = [cid for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "vertex_list":
# reassigns vertex list + network, graph component vertices
supp = [objt + "_component_vertices" for objt in obj_type]
_val = [getattr(cnet, supp[0])[cid]]
_val += [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = [attr] + supp
elif attr == "vertex_coords":
# reassigns both vertex_coords and vertices
supp = getattr(cnet, "vertex_list")
_val = [{k: v for k, v in getattr(cnet, attr).items() if k in supp}]
_val += [{v: k for k, v in _val[0].items()}]
attr = [attr, "vertices"]
elif attr == "_component_vertex_count":
# reassigns both network and graph _component_vertex_count
supp = len(getattr(cnet, "vertex_list"))
_val = [{cid: supp} for objt in obj_type]
attr = [objt + attr for objt in obj_type]
elif attr == "adjacencylist":
supp_adj = copy.deepcopy(list(getattr(cnet, attr).keys()))
supp_vtx = getattr(cnet, "vertex_list")
supp_rmv = [v for v in supp_adj if v not in supp_vtx]
[getattr(cnet, attr).pop(s) for s in supp_rmv]
return
elif attr == "_component_is_ring":
# reassigns both network and graph _component_is_ring
supp = [getattr(cnet, objt + attr) for objt in obj_type]
_val = [{cid: s[cid]} for s in supp]
attr = [objt + attr for objt in obj_type]
elif attr == "non_articulation_points":
supp_vtx = getattr(cnet, "vertex_list")
_val = [[s for s in getattr(cnet, attr) if s in supp_vtx]]
attr = [attr]
elif attr == "_component2":
# reassigns both network and graph _component2 attributes
supp = [_n + "_component2" + _a]
if hasgraph:
supp += [_g + "_component2" + _e]
_val = [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = supp
elif attr == "arcs":
# reassigns both arcs and edges
c2 = "_component2"
supp = [_n + c2 + _a]
if hasgraph:
supp += [_g + c2 + _e]
_val = [getattr(cnet, s)[cid] for s in supp]
attr = [attr]
if hasgraph:
attr += ["edges"]
elif attr == "_component_labels":
# reassigns both network and graph _component_labels
supp = [len(getattr(cnet, o + "s")) for o in obj]
_val = [numpy.array([cid] * s) for s in supp]
attr = [objt + attr for objt in obj_type]
elif attr == "_component_lengths":
# reassigns both network and graph _component_lengths
supp = [objt + attr for objt in obj_type]
_val = [{cid: getattr(cnet, s)[cid]} for s in supp]
attr = supp
elif attr == "_lengths":
# reassigns both arc and edge _lengths
supp_name = [o + attr for o in obj]
supp_lens = [getattr(cnet, s) for s in supp_name]
supp_link = [getattr(cnet, o + "s") for o in obj]
supp_ll = list(zip(supp_lens, supp_link))
_val = [{k: v for k, v in l1.items() if k in l2} for l1, l2 in supp_ll]
attr = supp_name
# reassign attributes
for a, av in zip(attr, _val):
setattr(cnet, a, av)
# provide warning (for now) if the network contains a point pattern
if getattr(net, "pointpatterns"):
msg = "There is a least one point pattern associated with the network."
msg += " Component extraction should be performed prior to snapping"
msg += " point patterns to the network object; failing to do so may"
msg += " lead to unexpected results."
warnings.warn(msg)
# provide warning (for now) if the network contains a point pattern
dm, nt = "distance_matrix", "network_trees"
if hasattr(net, dm) or hasattr(net, nt):
msg = "Either one or both (%s, %s) attributes" % (dm, nt)
msg += " are present and will be deleted. These must be"
msg += " recalculated following component extraction."
warnings.warn(msg)
for attr in [dm, nt]:
if hasattr(net, attr):
_attr = getattr(net, attr)
del _attr
# make initial copy of the network
cnet = copy.deepcopy(net)
# set labels
_n, _a, _g, _e = "network", "arc", "graph", "edge"
obj_type = [_n]
obj = [_a]
hasgraph = False
if hasattr(cnet, "w_graph"):
obj_type += [_g]
obj += [_e]
hasgraph = True
# attributes to reassign
update_attributes = [
"_fully_connected",
"_n_components",
"_longest_component",
"_largest_component",
"vertex_list",
"vertex_coords",
"_component_vertex_count",
"adjacencylist",
"_component_is_ring",
"_component2",
"arcs",
"_component_lengths",
"_lengths",
"_component_labels",
]
if hasgraph:
update_attributes.append("non_articulation_points")
# reassign attributes
for attribute in update_attributes:
_reassign(attribute, component_id)
# recreate spatial weights
cnet.w_network = cnet.contiguityweights(graph=False, weightings=weightings)
if hasgraph:
cnet.w_graph = cnet.contiguityweights(graph=True, weightings=weightings)
return cnet
def spanning_tree(net, method="sort", maximum=False, silence_warnings=True):
"""Extract a minimum or maximum spanning tree from a network.
Parameters
----------
net : spaghetti.Network
Instance of a network object.
method : str
Method for determining spanning tree. Currently, the only
supported method is 'sort', which sorts the network arcs
by length prior to building intermediary networks and checking
for cycles within the tree/subtrees. Future methods may
include linear programming approachs, etc.
maximum : bool
When ``True`` a maximum spanning tree is created. When ``False``
a minimum spanning tree is created. Default is ``False``.
silence_warnings : bool
Warn if there is more than one connected component. Default is
``False`` due to the nature of constructing a minimum
spanning tree.
Returns
-------
net : spaghetti.Network
Pruned instance of the network object.
Notes
-----
For in-depth background and details see
:cite:`GrahamHell_1985`,
:cite:`AhujaRavindraK`, and
:cite:`Okabe2012`.
See also
--------
networkx.algorithms.tree.mst
scipy.sparse.csgraph.minimum_spanning_tree
Examples
--------
Create a network instance.
>>> from libpysal import cg
>>> import spaghetti
>>> p00 = cg.Point((0,0))
>>> lines = [cg.Chain([p00, cg.Point((0,3)), cg.Point((4,0)), p00])]
>>> ntw = spaghetti.Network(in_data=lines)
Extract the minimum spanning tree.
>>> minst_net = spaghetti.spanning_tree(ntw)
>>> min_len = sum(minst_net.arc_lengths.values())
>>> min_len
7.0
Extract the maximum spanning tree.
>>> maxst_net = spaghetti.spanning_tree(ntw, maximum=True)
>>> max_len = sum(maxst_net.arc_lengths.values())
>>> max_len
9.0
>>> max_len > min_len
True
"""
# (un)silence warning
weights_kws = {"silence_warnings": silence_warnings}
# do not extract graph object while testing for cycles
net_kws = {"extractgraph": False, "weights_kws": weights_kws}
# if the network has no cycles, it is already a spanning tree
if util.network_has_cycle(net.adjacencylist):
if method.lower() == "sort":
spanning_tree = mst_weighted_sort(net, maximum, net_kws)
else:
msg = "'%s' not a valid method for minimum spanning tree creation"
raise ValueError(msg % method)
# instantiate the spanning tree as a network object
net = Network(in_data=spanning_tree, weights_kws=weights_kws)
return net
def mst_weighted_sort(net, maximum, net_kws):
"""Extract a minimum or maximum spanning tree from a network used
the length-weighted sort method.
Parameters
----------
net : spaghetti.Network
See ``spanning_tree()``.
maximum : bool
See ``spanning_tree()``.
net_kws : dict
Keywords arguments for instaniating a ``spaghetti.Network``.
Returns
-------
spanning_tree : list
All networks arcs that are members of the spanning tree.
Notes
-----
This function is based on the method found in Chapter 3
Section 4.3 of :cite:`Okabe2012`.
"""
# network arcs dictionary sorted by arc length
sort_kws = {"key": net.arc_lengths.get, "reverse": maximum}
sorted_lengths = sorted(net.arc_lengths, **sort_kws)
# the spanning tree is initially empty
spanning_tree = []
# iterate over each lengths of network arc
while sorted_lengths:
_arc = sorted_lengths.pop(0)
# make a spatial representation of an arc
chain_rep = util.chain_constr(net.vertex_coords, [_arc])
# current set of network arcs as libpysal.cg.Chain
_chains = spanning_tree + chain_rep
# current network iteration
_ntw = Network(in_data=_chains, **net_kws)
# determine if the network contains a cycle
if not util.network_has_cycle(_ntw.adjacencylist):
# If no cycle is present, add the arc to the spanning tree
spanning_tree.extend(chain_rep)
return spanning_tree
@requires("geopandas", "shapely")
def element_as_gdf(
net,
vertices=False,
arcs=False,
pp_name=None,
snapped=False,
routes=None,
id_col="id",
geom_col="geometry",
):
"""Return a ``geopandas.GeoDataFrame`` of network elements. This can be
(a) the vertices of a network; (b) the arcs of a network; (c) both the
vertices and arcs of the network; (d) the raw point pattern associated
with the network; (e) the snapped point pattern of (d); or (f) the
shortest path routes between point observations.
Parameters
----------
net : spaghetti.Network
A `spaghetti` network object.
vertices : bool
Extract the network vertices (``True``). Default is ``False``.
arcs : bool
Extract the network arcs (``True``). Default is ``False``.
pp_name : str
Name of the ``network.PointPattern`` to extract.
Default is ``None``.
snapped : bool
If extracting a ``network.PointPattern``, set to ``True`` for
snapped point locations along the network. Default is ``False``.
routes : dict
See ``paths`` from ``spaghetti.Network.shortest_paths``.
Default is ``None``.
id_col : str
``geopandas.GeoDataFrame`` column name for IDs. Default is ``"id"``.
When extracting routes this creates an (origin, destination) tuple.
geom_col : str
``geopandas.GeoDataFrame`` column name for geometry. Default is
``"geometry"``.
Raises
------
KeyError
In order to extract a ``network.PointPattern`` it must already
be a part of the network object. This exception is raised
when a ``network.PointPattern`` is being extracted that does
not exist within the network object.
Returns
-------
points : geopandas.GeoDataFrame
Network point elements (either vertices or ``network.PointPattern``
points) as a ``geopandas.GeoDataFrame`` of ``shapely.geometry.Point``
objects with an ``"id"`` column and ``"geometry""`` column.
If the network object has a ``network_component_vertices`` attribute,
then component labels are also added in a column.
lines : geopandas.GeoDataFrame
Network arc elements as a ``geopandas.GeoDataFrame`` of
``shapely.geometry.LineString`` objects with an ``"id"``
column and ``"geometry"`` column. If the network object has
a ``network_component_labels`` attribute, then component labels
are also added in a column.
paths : geopandas.GeoDataFrame
Shortest path routes along network arc elements as a
``geopandas.GeoDataFrame`` of ``shapely.geometry.LineString``
objects with an ``"id"`` (see ``spaghetti.Network.shortest_paths()``)
column and ``"geometry"`` column.
Notes
-----
When both network vertices and arcs are desired, the variable
declaration must be in the order: <vertices>, <arcs>.
This function requires ``geopandas``.
See also
--------
geopandas.GeoDataFrame
Examples
--------
Instantiate a network object.
>>> import spaghetti
>>> from libpysal import examples
>>> ntw = spaghetti.Network(examples.get_path("streets.shp"))
Extract the network elements (vertices and arcs) as
``geopandas.GeoDataFrame`` objects.
>>> vertices_df, arcs_df = spaghetti.element_as_gdf(
... ntw, vertices=True, arcs=True
... )
Examine the first vertex. It is a member of the component labeled ``0``.
>>> vertices_df.loc[0]
id 0
geometry POINT (728368.04762 877125.89535)
comp_label 0
Name: 0, dtype: object
Calculate the total length of the network.
>>> arcs_df.geometry.length.sum()
104414.09200823458
"""
# shortest path routes between observations
if routes:
paths = util._routes_as_gdf(routes, id_col, geom_col)
return paths
# need vertices place holder to create network segment LineStrings
# even if only network edges are desired.
vertices_for_arcs = False
if arcs and not vertices:
vertices_for_arcs = True
# vertices/nodes/points
if vertices or vertices_for_arcs or pp_name:
points = util._points_as_gdf(
net,
vertices,
vertices_for_arcs,
pp_name,
snapped,
id_col=id_col,
geom_col=geom_col,
)
# return points geodataframe if arcs not specified or
# if extracting `PointPattern` points
if not arcs or pp_name:
return points
# arcs
arcs = util._arcs_as_gdf(net, points, id_col=id_col, geom_col=geom_col)
if vertices_for_arcs:
return arcs
else:
return points, arcs
def regular_lattice(bounds, nh, nv=None, exterior=False):
"""Generate a regular lattice of line segments
(`libpysal.cg.Chain objects <https://pysal.org/libpysal/generated/libpysal.cg.Chain.html#libpysal.cg.Chain>`_).
Parameters
----------
bounds : {tuple, list}
Area bounds in the form - <minx,miny,maxx,maxy>.
nh : int
The number of internal horizontal lines of the lattice.
nv : int
The number of internal vertical lines of the lattice. Defaults to
``nh`` if left as None.
exterior : bool
Flag for including the outer bounding box segments. Default is False.
Returns
-------
lattice : list
The ``libpysal.cg.Chain`` objects forming a regular lattice.
Notes
-----
The ``nh`` and ``nv`` parameters do not include the external
line segments. For example, setting ``nh=3, nv=2, exterior=True``
will result in 5 horizontal line sets and 4 vertical line sets.
Examples
--------
Create a 5x5 regular lattice with an exterior
>>> import spaghetti
>>> lattice = spaghetti.regular_lattice((0,0,4,4), 3, exterior=True)
>>> lattice[0].vertices
[(0.0, 0.0), (1.0, 0.0)]
Create a 5x5 regular lattice without an exterior
>>> lattice = spaghetti.regular_lattice((0,0,5,5), 3, exterior=False)
>>> lattice[-1].vertices
[(3.75, 3.75), (3.75, 5.0)]
Create a 7x9 regular lattice with an exterior from the
bounds of ``streets.shp``.
>>> path = libpysal.examples.get_path("streets.shp")
>>> shp = libpysal.io.open(path)
>>> lattice = spaghetti.regular_lattice(shp.bbox, 5, nv=7, exterior=True)
>>> lattice[0].vertices
[(723414.3683108028, 875929.0396895551), (724286.1381211297, 875929.0396895551)]
"""
# check for bounds validity
if len(bounds) != 4:
bounds_len = len(bounds)
msg = "The 'bounds' parameter is %s elements " % bounds_len
msg += "but should be exactly 4 - <minx,miny,maxx,maxy>."
raise RuntimeError(msg)
# check for bounds validity
if not nv:
nv = nh
try:
nh, nv = int(nh), int(nv)
except TypeError:
nlines_types = type(nh), type(nv)
msg = "The 'nh' and 'nv' parameters (%s, %s) " % nlines_types
msg += "could not be converted to integers."
raise TypeError(msg)
# bounding box line lengths
len_h, len_v = bounds[2] - bounds[0], bounds[3] - bounds[1]
# horizontal and vertical increments
incr_h, incr_v = len_h / float(nh + 1), len_v / float(nv + 1)
# define the horizontal and vertical space
space_h = [incr_h * slot for slot in range(nv + 2)]
space_v = [incr_v * slot for slot in range(nh + 2)]
# create vertical and horizontal lines
lines_h = util.build_chains(space_h, space_v, exterior, bounds)
lines_v = util.build_chains(space_h, space_v, exterior, bounds, h=False)
# combine into one list
lattice = lines_h + lines_v
return lattice
class PointPattern:
"""A stub point pattern class used to store a point pattern.
Note from the original author of ``pysal.network``:
This class is monkey patched with network specific attributes when the
points are snapped to a network. In the future this class may be
replaced with a generic point pattern class.
Parameters
----------
in_data : {str, list, tuple, libpysal.cg.Point, geopandas.GeoDataFrame}
The input geographic data. Either (1) a path to a shapefile
(str); (2) an iterable containing ``libpysal.cg.Point``
objects; (3) a single ``libpysal.cg.Point``; or
(4) a ``geopandas.GeoDataFrame``.
idvariable : str
Field in the shapefile to use as an ID variable.
attribute : bool
A flag to indicate whether all attributes are tagged to this
class (``True``) or excluded (``False``). Default is ``False``.
Attributes
----------
points : dict
Keys are the point IDs (int). Values are the :math:`(x,y)`
coordinates (tuple).
npoints : int
The number of points.
obs_to_arc : dict
Keys are arc IDs (tuple). Values are snapped point information
(``dict``). Within the snapped point information (``dict``)
keys are observation IDs (``int``), and values are snapped
coordinates.
obs_to_vertex : list
List of incident network vertices to snapped observation points
converted from a ``default_dict``. Originally in the form of
paired left/right nearest network vertices {netvtx1: obs_id1,
netvtx2: obs_id1, netvtx1: obs_id2... netvtx1: obs_idn}, then
simplified to a list in the form
[netvtx1, netvtx2, netvtx1, netvtx2, ...].
dist_to_vertex : dict
Keys are observations IDs (``int``). Values are distance lookup
(``dict``). Within distance lookup (``dict``) keys are the two
incident vertices of the arc and values are distance to each of
those arcs.
snapped_coordinates : dict
Keys are the point IDs (int). Values are the snapped :math:`(x,y)`
coordinates (tuple).
snap_dist : bool
Flag as ``True`` to include the distance from the original
location to the snapped location along the network. Default
is ``False``.
"""
def __init__(self, in_data=None, idvariable=None, attribute=False):
# initialize points dictionary and counter
self.points = {}
self.npoints = 0
# determine input point data type
in_dtype = str(type(in_data)).split("'")[1]
# flag for points from a shapefile
from_shp = False
# flag for points as libpysal.cg.Point objects
is_libpysal_points = False
supported_iterables = ["list", "tuple"]
# type error message
msg = "'%s' not supported for point pattern instantiation."
# set appropriate geometries
if in_dtype == "str":
from_shp = True
elif in_dtype in supported_iterables:
dtype = str(type(in_data[0])).split("'")[1]
if dtype == "libpysal.cg.shapes.Point":
is_libpysal_points = True
else:
raise TypeError(msg % dtype)
elif in_dtype == "libpysal.cg.shapes.Point":
in_data = [in_data]
is_libpysal_points = True
elif in_dtype == "geopandas.geodataframe.GeoDataFrame":
from_shp = False
else:
raise TypeError(msg % in_dtype)
# either set native point ID from dataset or create new IDs
if idvariable and not is_libpysal_points:
ids = weights.util.get_ids(in_data, idvariable)
else:
ids = None
# extract the point geometries
if not is_libpysal_points:
if from_shp:
pts = open(in_data)
else:
pts_objs = list(in_data.geometry)
pts = [cg.shapes.Point((p.x, p.y)) for p in pts_objs]
else:
pts = in_data
# fetch attributes if requested
if attribute and not is_libpysal_points:
# open the database file if data is from shapefile
if from_shp:
dbname = os.path.splitext(in_data)[0] + ".dbf"
db = open(dbname)
# if data is from a GeoDataFrame, drop the geometry column
# and declare attribute values as a list of lists
else:
db = in_data.drop(in_data.geometry.name, axis=1).values.tolist()
db = [[d] for d in db]
else:
db = None
# iterate over all points
for i, pt in enumerate(pts):
# IDs, attributes
if ids and db is not None:
self.points[ids[i]] = {"coordinates": pt, "properties": db[i]}
# IDs, no attributes
elif ids and db is None:
self.points[ids[i]] = {"coordinates": pt, "properties": None}
# no IDs, attributes
elif not ids and db is not None:
self.points[i] = {"coordinates": pt, "properties": db[i]}
# no IDs, no attributes
else:
self.points[i] = {"coordinates": pt, "properties": None}
# close the shapefile and database file
# if the input data is a .shp
if from_shp:
pts.close()
if db:
db.close()
# record number of points
self.npoints = len(self.points.keys())
class SimulatedPointPattern:
"""Note from the original author of ``pysal.network``:
Struct style class to mirror the ``PointPattern`` class.
If the ``PointPattern`` class has methods, it might make
sense to make this a child of that class. This class is not intended
to be used by the external user.
Attributes
----------
npoints : int
The number of points.
obs_to_arc : dict
Keys are arc IDs (tuple). Values are snapped point information
(dict). Within the snapped point information (dict)
keys are observation IDs (int), and values are snapped
coordinates.
obs_to_vertex : list
List of incident network vertices to snapped observation points
converted from a default_dict. Originally in the form of
paired left/right nearest network vertices {netvtx1: obs_id1,
netvtx2: obs_id1, netvtx1: obs_id2... netvtx1: obs_idn}, then
simplified to a list in the form
[netvtx1, netvtx2, netvtx1, netvtx2, ...].
dist_to_vertex : dict
Keys are observations IDs (int). Values are distance lookup
(dict). Within distance lookup (dict) keys are the two
incident vertices of the arc and values are distance to each of
those arcs.
snapped_coordinates : dict
Keys are the point IDs (int). Values are the snapped :math:`(x,y)`
coordinates (tuple).
snap_dist : bool
Flag as ``True`` to include the distance from the original
location to the snapped location along the network. Default
is ``False``.
"""
def __init__(self):
# duplicate post-snapping PointPattern class structure
self.npoints = 0
self.obs_to_arc = {}
self.obs_to_vertex = defaultdict(list)
self.dist_to_vertex = {}
self.snapped_coordinates = {}
| 35.714245
| 146
| 0.588632
| 104,406
| 0.83452
| 0
| 0
| 5,325
| 0.042563
| 0
| 0
| 77,119
| 0.616414
|
4d09a5a4cc57e4e453dca3ac3e67a8ff83298706
| 340
|
py
|
Python
|
tests/resources/mlflow-test-plugin/mlflow_test_plugin/default_experiment_provider.py
|
Sohamkayal4103/mlflow
|
4e444efdf73c710644ee039b44fa36a31d716f69
|
[
"Apache-2.0"
] | 1
|
2022-01-11T02:51:17.000Z
|
2022-01-11T02:51:17.000Z
|
tests/resources/mlflow-test-plugin/mlflow_test_plugin/default_experiment_provider.py
|
Sohamkayal4103/mlflow
|
4e444efdf73c710644ee039b44fa36a31d716f69
|
[
"Apache-2.0"
] | null | null | null |
tests/resources/mlflow-test-plugin/mlflow_test_plugin/default_experiment_provider.py
|
Sohamkayal4103/mlflow
|
4e444efdf73c710644ee039b44fa36a31d716f69
|
[
"Apache-2.0"
] | 2
|
2019-05-11T08:13:38.000Z
|
2019-05-14T13:33:54.000Z
|
from mlflow.tracking.default_experiment.abstract_context import DefaultExperimentProvider
class PluginDefaultExperimentProvider(DefaultExperimentProvider):
"""DefaultExperimentProvider provided through plugin system"""
def in_context(self):
return False
def get_experiment_id(self):
return "experiment_id_1"
| 28.333333
| 89
| 0.791176
| 247
| 0.726471
| 0
| 0
| 0
| 0
| 0
| 0
| 79
| 0.232353
|
4d09ec45c4e1965510df15bcf08b297cda5ab9d9
| 1,097
|
py
|
Python
|
ac_loss_plot.py
|
atul799/CarND-Semantic-Segmentation
|
dbec928d3ba9cc68f3de9bbb7707df85131c1d5c
|
[
"MIT"
] | null | null | null |
ac_loss_plot.py
|
atul799/CarND-Semantic-Segmentation
|
dbec928d3ba9cc68f3de9bbb7707df85131c1d5c
|
[
"MIT"
] | null | null | null |
ac_loss_plot.py
|
atul799/CarND-Semantic-Segmentation
|
dbec928d3ba9cc68f3de9bbb7707df85131c1d5c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
plot acc loss
@author: atpandey
"""
#%%
import matplotlib.pyplot as plt
#%%
ff='./to_laptop/trg_file.txt'
with open(ff,'r') as trgf:
listidx=[]
listloss=[]
listacc=[]
ctr=0
for line in trgf:
if(ctr>0):
ll=line.split(',')
listidx.append(ll[0])
listloss.append(ll[1])
listacc.append(ll[2])
#listf.append(line)
ctr +=1
#for i in range(len(listidx)):
# print("idx: {}, loss: {}, acc: {}".format(listidx[i],listloss[i],listacc[i]))
# Make a figure
fig = plt.figure()
plt.subplots_adjust(top = 0.99, bottom=0.05, hspace=0.5, wspace=0.4)
# The axes
ax1 = fig.add_subplot(2, 1, 1)
ax2 = fig.add_subplot(2, 1, 2)
#plots
ax1.plot(listloss,'bo-',label='loss')
ax2.plot(listacc,'go-',label='accuracy')
ax1.set_xlabel('training idx')
ax1.set_ylabel('Loss')
ax1.set_title('loss data set')
ax1.legend()
ax2.set_xlabel('training idx')
ax2.set_ylabel('accuracy')
ax2.set_title('accuracydata set')
ax2.legend()
plt.show()
plt.savefig('./outputs/loss_accuracy.png')
| 18.913793
| 82
| 0.606199
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 396
| 0.360985
|
4d0a6ad7788dddfb228aeaaea80d6d51b9e09fa7
| 8,611
|
py
|
Python
|
VA_multiples/src/main.py
|
brown9804/Modelos_Probabilisticos-
|
8ddc6afbe4da5975af9eb5dc946ff19daa1171bc
|
[
"Apache-2.0"
] | null | null | null |
VA_multiples/src/main.py
|
brown9804/Modelos_Probabilisticos-
|
8ddc6afbe4da5975af9eb5dc946ff19daa1171bc
|
[
"Apache-2.0"
] | null | null | null |
VA_multiples/src/main.py
|
brown9804/Modelos_Probabilisticos-
|
8ddc6afbe4da5975af9eb5dc946ff19daa1171bc
|
[
"Apache-2.0"
] | null | null | null |
##--------------------------------Main file------------------------------------
##
## Copyright (C) 2020 by Belinda Brown Ramírez (belindabrownr04@gmail.com)
## June, 2020
## timna.brown@ucr.ac.cr
##-----------------------------------------------------------------------------
# Variables aleatorias múltiples
# Se consideran dos bases de datos las cuales contienen los descrito
# a continuación:
# 1. ****** Registro de la frecuencia relativa de dos variables aleatorias
# conjuntas en forma de tabla: xy.csv
# 2. ****** Pares (x, y) y su probabilidad asociada: xyp.csv
# Recordando que variable aleatoria es una función determinista.
#### **************** Algoritmo **************** ####
#******************************************************
# IMPORTANDO PAQUETES
#******************************************************
# Es importante considerar que notas son necesarias pero si
# fueron usadas durante el desarrollo de la tarea por diversas
# razones por lo cual se mantiene dentro del algortimo en forma
# comentario.
# from __future__ import division
# from pylab import *
# from sklearn import *
# from sklearn.preprocessing import PolynomialFeatures
# import math
# import decimal
# import pandas as pd
# from scipy.stats import norm
# from scipy.stats import rayleigh
# import csv
import pandas as pd
from collections import OrderedDict
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from mpl_toolkits.mplot3d import axes3d
from numpy import *
import numpy as np
from matplotlib import cm
import scipy.stats as stats
from scipy.optimize import curve_fit
#******************************************************
# DEFINICIONES
#******************************************************
def distribucion_normal(va, mu, sigma):
dist_normal = 1/(np.sqrt(2*np.pi*sigma**2)) * np.exp(-(va-mu)**2/(2*sigma**2))
return dist_normal
def densidad_conjunta(va0,va1,mu0,sigma0,mu1,sigma1):
val_conjunto = 1/((np.sqrt(2*np.pi*sigma0**2)) * np.exp(-(va0-mu0)**2/(2*sigma0**2)) * (1/(np.sqrt(2*np.pi*sigma1**2)) * np.exp(-(va1-mu1)**2/(2*sigma1**2))))
return val_conjunto
def ajuste_curva(marginal, par1, par2, distri_norm, graph_label_dis, distri_x_name_img, func_graph_label, function_va_img):
va = np.linspace(par1,par2,len(marginal))
plt.bar(va, marginal, label= graph_label_dis)
plt.legend()
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + distri_x_name_img + ".png")
parametros_va, _ = curve_fit(distri_norm, va, marginal)
mu, sigma = parametros_va[0], parametros_va[1]
print("\n\nMu " + distri_x_name_img + " = ", mu)
print("Sigma " + distri_x_name_img + " = ", sigma)
va_function = stats.norm(mu,sigma)
curva_ajustada = np.linspace(va_function.ppf(0.01), va_function.ppf(0.99), 100)
plt.plot(curva_ajustada,va_function.pdf(curva_ajustada),label=func_graph_label)
plt.legend()
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + function_va_img+".png")
# # Limpia el area de graficacion
plt.cla()
return curva_ajustada, mu, sigma
def valor_esperado(marginal,lim_inferior,lim_superior, de_quien_v_valor_esperado):
dominio = []
valor_esperado_marginal = 0
for k in range (5, lim_superior +1):
dominio.append(k)
dominio = list(OrderedDict.fromkeys(dominio))
print("\n\nEl dominio es de: ", dominio)
for i in range (0,len(marginal)):
valor_esperado_marginal = valor_esperado_marginal + dominio[i]*marginal[i]
print("\n" +de_quien_v_valor_esperado +" tiene un valor de: ", valor_esperado_marginal)
return valor_esperado_marginal
def grafica_en2d(mu_va, sigma_va, par1_modelo, nombre2d):
va_funcion_distri = stats.norm(mu_va,sigma_va)
curve = np.linspace(va_funcion_distri.ppf(0.01), va_funcion_distri.ppf(0.99), par1_modelo)
plt.plot(curve,va_funcion_distri.pdf(curve),label=nombre2d)
plt.legend()
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + nombre2d+".png")
# # Limpia el area de graficacion
plt.cla()
return
def grafica_en3d(VA0_modelo, VA1_modelo, VA0, VA1, nombre):
Z = []
for i in VA0:
XY = []
for j in VA1:
XY.append(i*j)
Z.append(XY)
fig = plt.figure()
eje_x= plt.axes(projection='3d')
VA0,VA1 = np.meshgrid(VA0_modelo,VA1_modelo)
eje_x.plot_surface(VA0,VA1,np.array(Z),cmap=cm.coolwarm)
plt.savefig("/Users/belindabrown/Desktop/VA_multiples/results/" + nombre+".png")
return
#******************************************************
# OBTENIENDO VALORES
# DE LOS CSV
#******************************************************
data = pd.read_csv("/Users/belindabrown/Desktop/VA_multiples/data_base/xy.csv", index_col=0)
data_xyp = pd.read_csv("/Users/belindabrown/Desktop/VA_multiples/data_base/xyp.csv")
#******************************************************
# CURVA DE MEJOR AJUSTE
# DE LAS FUNCIONES DE
# DENSIDAD MARGINALES X & Y
#******************************************************
# Se requieren los valores marginales tanto de x como de y
# Columna con la sumatoria de todas las columnas es la probabilidad marginal de X
marg_value_x = [n for n in data.sum(axis=1, numeric_only=True)]
# Fila con la sumatoria de todas las filas es la probabilidad marginal de Y
marg_value_y = [n for n in data.sum(axis=0, numeric_only=True)]
print("\nValor marginal de X: ", marg_value_x)
print("\nValor marginal de Y: ", marg_value_y)
x_curva_modelo, x_mu, x_sigma = ajuste_curva(marg_value_x, 5, 15, distribucion_normal, "Datos que pertenencen a X","Datos_de_X", "Modelos de X(x)", "Modelado_X(x)")
y_curva_modelo, y_mu, y_sigma = ajuste_curva(marg_value_y, 5, 25, distribucion_normal, "Datos que pertenencen a Y","Datos_de_Y", "Modelos de Y(y)", "Modelado_Y(y)")
#******************************************************
# FUNCION DE DENSIDAD
# CONJUNTA DE
# X & Y
#******************************************************
probabi_conjuntaX = distribucion_normal(x_curva_modelo,x_mu,x_sigma)
probabi_conjuntaY = distribucion_normal(y_curva_modelo,y_mu,y_sigma)
#******************************************************
# VALORES DE CORRELACION, COVARIANZA
# COEFICIENTE DE CORRELACION (PEARSON)
# Y SIGNIFICADO
#******************************************************
###### OBTENIDOS CON XY.CSV
# Se requieren los valores anteriormente calculados. Para calcular
# E[X] & E[Y] lo que se conoce como los valores.
# Valores inicializados de los valores de X y Y (E[X] y E[Y])
# Este rango es de [x0, x1], es decir, incluye los limites
e_x = valor_esperado(marg_value_x,5,15, "X")
e_y = valor_esperado(marg_value_y,5,25, "Y")
multi_valor_esperados = e_x*e_y
# Se calcula E[X]*E[Y]
print("\n\nEl valor de E[X]E[Y] es de: ", multi_valor_esperados)
###### OBTENIDOS CON XYP.CSV
# Dado que la primera fila contiene las etiquetas de x, y, p
todos_mu_sum = data_xyp.x * data_xyp.y * data_xyp.p
# La sumatoria de E[XY] nos brinda su correlación
correlacion = todos_mu_sum.sum()
# Ahora para la covarianza, de acuerdo a lo visto en clase la
# covarianza es la correlacion menos la multiplicacion de los
# valores.
covarianza = correlacion - multi_valor_esperados
# Se requiere calcular el coeficiente de correlacion de
# Pearson en el cual se utilizan los valores de la data brindada de
# obtenidos entonces ...
# De acuerdo a los resultados obtenidos al correr el programa
# se ve que:
# SigmaDatos_de_X = 3.2994428707078436
# SigmaDatos_de_Y = 6.0269377486808775
# Para el coeficiente pearson se calcula como la covarianza
# divida entre la multiplicacion de los sigmas
coef_pearson = covarianza/(3.2994428707078436*6.0269377486808775)
print("\nEl resultado de la correlación es de: ", correlacion)
print("\nEl resultado de la covarianza es de: ",covarianza)
print("\nDe acuerdo a los datos obtenidos y considerando todo sus decimales se tiene que el coeficiente de Pearson es de: ", coef_pearson)
#******************************************************
# GRAFICA EN 2D DE LAS FUNCIONES
# DE DENSIDAD MARGINALES
# &
# GRAFICA EN 3D DE LA FUNCION
# DE DENSIDAD CONJUNTA
#******************************************************
# Dado que se requiere redondear los valores para la gráfica se toma en
# cuenta que los parámetros completos para el modelo serían los ya calculados
distribucion_de_x = grafica_en2d(x_mu, x_sigma, 100,"Distribucion_de_X")
distribucion_de_y = grafica_en2d(y_mu, y_sigma, 100,"Distribucion_de_Y")
dis_cojun3d = grafica_en3d(x_curva_modelo, y_curva_modelo, probabi_conjuntaX, probabi_conjuntaY, "Distribucion_en_3D")
| 46.048128
| 164
| 0.652537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,828
| 0.560093
|
4d0a9eaef2e9a5554500cb97127b08aa78c0807c
| 7,527
|
py
|
Python
|
official/mnist/mnist.py
|
TuKJet/models
|
984fbc754943c849c55a57923f4223099a1ff88c
|
[
"Apache-2.0"
] | 3,326
|
2018-01-26T22:42:25.000Z
|
2022-02-16T13:16:39.000Z
|
official/mnist/mnist.py
|
lianlengyunyu/models
|
984fbc754943c849c55a57923f4223099a1ff88c
|
[
"Apache-2.0"
] | 150
|
2017-08-28T14:59:36.000Z
|
2022-03-11T23:21:35.000Z
|
official/mnist/mnist.py
|
lianlengyunyu/models
|
984fbc754943c849c55a57923f4223099a1ff88c
|
[
"Apache-2.0"
] | 1,474
|
2018-02-01T04:33:18.000Z
|
2022-03-08T07:02:20.000Z
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Convolutional Neural Network Estimator for MNIST, built with tf.layers."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import os
import sys
import tensorflow as tf
import dataset
class Model(object):
"""Class that defines a graph to recognize digits in the MNIST dataset."""
def __init__(self, data_format):
"""Creates a model for classifying a hand-written digit.
Args:
data_format: Either 'channels_first' or 'channels_last'.
'channels_first' is typically faster on GPUs while 'channels_last' is
typically faster on CPUs. See
https://www.tensorflow.org/performance/performance_guide#data_formats
"""
if data_format == 'channels_first':
self._input_shape = [-1, 1, 28, 28]
else:
assert data_format == 'channels_last'
self._input_shape = [-1, 28, 28, 1]
self.conv1 = tf.layers.Conv2D(
32, 5, padding='same', data_format=data_format, activation=tf.nn.relu)
self.conv2 = tf.layers.Conv2D(
64, 5, padding='same', data_format=data_format, activation=tf.nn.relu)
self.fc1 = tf.layers.Dense(1024, activation=tf.nn.relu)
self.fc2 = tf.layers.Dense(10)
self.dropout = tf.layers.Dropout(0.4)
self.max_pool2d = tf.layers.MaxPooling2D(
(2, 2), (2, 2), padding='same', data_format=data_format)
def __call__(self, inputs, training):
"""Add operations to classify a batch of input images.
Args:
inputs: A Tensor representing a batch of input images.
training: A boolean. Set to True to add operations required only when
training the classifier.
Returns:
A logits Tensor with shape [<batch_size>, 10].
"""
y = tf.reshape(inputs, self._input_shape)
y = self.conv1(y)
y = self.max_pool2d(y)
y = self.conv2(y)
y = self.max_pool2d(y)
y = tf.layers.flatten(y)
y = self.fc1(y)
y = self.dropout(y, training=training)
return self.fc2(y)
def model_fn(features, labels, mode, params):
"""The model_fn argument for creating an Estimator."""
model = Model(params['data_format'])
image = features
if isinstance(image, dict):
image = features['image']
if mode == tf.estimator.ModeKeys.PREDICT:
logits = model(image, training=False)
predictions = {
'classes': tf.argmax(logits, axis=1),
'probabilities': tf.nn.softmax(logits),
}
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
export_outputs={
'classify': tf.estimator.export.PredictOutput(predictions)
})
if mode == tf.estimator.ModeKeys.TRAIN:
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4)
logits = model(image, training=True)
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
accuracy = tf.metrics.accuracy(
labels=tf.argmax(labels, axis=1), predictions=tf.argmax(logits, axis=1))
# Name the accuracy tensor 'train_accuracy' to demonstrate the
# LoggingTensorHook.
tf.identity(accuracy[1], name='train_accuracy')
tf.summary.scalar('train_accuracy', accuracy[1])
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.TRAIN,
loss=loss,
train_op=optimizer.minimize(loss, tf.train.get_or_create_global_step()))
if mode == tf.estimator.ModeKeys.EVAL:
logits = model(image, training=False)
loss = tf.losses.softmax_cross_entropy(onehot_labels=labels, logits=logits)
return tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy':
tf.metrics.accuracy(
labels=tf.argmax(labels, axis=1),
predictions=tf.argmax(logits, axis=1)),
})
def main(unused_argv):
data_format = FLAGS.data_format
if data_format is None:
data_format = ('channels_first'
if tf.test.is_built_with_cuda() else 'channels_last')
mnist_classifier = tf.estimator.Estimator(
model_fn=model_fn,
model_dir=FLAGS.model_dir,
params={
'data_format': data_format
})
# Train the model
def train_input_fn():
# When choosing shuffle buffer sizes, larger sizes result in better
# randomness, while smaller sizes use less memory. MNIST is a small
# enough dataset that we can easily shuffle the full epoch.
ds = dataset.train(FLAGS.data_dir)
ds = ds.cache().shuffle(buffer_size=50000).batch(FLAGS.batch_size).repeat(
FLAGS.train_epochs)
(images, labels) = ds.make_one_shot_iterator().get_next()
return (images, labels)
# Set up training hook that logs the training accuracy every 100 steps.
tensors_to_log = {'train_accuracy': 'train_accuracy'}
logging_hook = tf.train.LoggingTensorHook(
tensors=tensors_to_log, every_n_iter=100)
mnist_classifier.train(input_fn=train_input_fn, hooks=[logging_hook])
# Evaluate the model and print results
def eval_input_fn():
return dataset.test(FLAGS.data_dir).batch(
FLAGS.batch_size).make_one_shot_iterator().get_next()
eval_results = mnist_classifier.evaluate(input_fn=eval_input_fn)
print()
print('Evaluation results:\n\t%s' % eval_results)
# Export the model
if FLAGS.export_dir is not None:
image = tf.placeholder(tf.float32, [None, 28, 28])
input_fn = tf.estimator.export.build_raw_serving_input_receiver_fn({
'image': image,
})
mnist_classifier.export_savedmodel(FLAGS.export_dir, input_fn)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch_size',
type=int,
default=100,
help='Number of images to process in a batch')
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/mnist_data',
help='Path to directory containing the MNIST dataset')
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/mnist_model',
help='The directory where the model will be stored.')
parser.add_argument(
'--train_epochs', type=int, default=40, help='Number of epochs to train.')
parser.add_argument(
'--data_format',
type=str,
default=None,
choices=['channels_first', 'channels_last'],
help='A flag to override the data format used in the model. channels_first '
'provides a performance boost on GPU but is not always compatible '
'with CPU. If left unspecified, the data format will be chosen '
'automatically based on whether TensorFlow was built for CPU or GPU.')
parser.add_argument(
'--export_dir',
type=str,
help='The directory where the exported SavedModel will be stored.')
tf.logging.set_verbosity(tf.logging.INFO)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| 36.1875
| 82
| 0.689385
| 1,740
| 0.231168
| 0
| 0
| 0
| 0
| 0
| 0
| 2,778
| 0.369071
|
4d0b5e5a16eda393441922d1c3ec56983303e265
| 523
|
py
|
Python
|
pep_92.py
|
sayantan3/project-euler
|
9b856c84a0b174754819ed15f86eb0f30181e94e
|
[
"MIT"
] | null | null | null |
pep_92.py
|
sayantan3/project-euler
|
9b856c84a0b174754819ed15f86eb0f30181e94e
|
[
"MIT"
] | null | null | null |
pep_92.py
|
sayantan3/project-euler
|
9b856c84a0b174754819ed15f86eb0f30181e94e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
def calculate():
ans = sum(1
for i in range(1, 10000000)
if get_terminal(i) == 89)
return str(ans)
TERMINALS = (1, 89)
def get_terminal(n):
while n not in TERMINALS:
n = square_digit_sum(n)
return n
def square_digit_sum(n):
result = 0
while n > 0:
result += sq_sum[n % 1000]
n //= 1000
return result
sq_sum = [sum(int(c)**2 for c in str(i)) for i in range(1000)]
if __name__ == "__main__":
print(calculate())
| 18.678571
| 62
| 0.565966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 0.061185
|
4d0e5f2a06efaa32ab6853b48bd163c479f22bbd
| 467
|
py
|
Python
|
Visualization/ConstrainedOpt.py
|
zhijieW94/SAGNet
|
017b58853cb51d50851a5a3728b3205d235ff889
|
[
"MIT"
] | 25
|
2019-09-15T09:10:17.000Z
|
2021-04-08T07:44:16.000Z
|
Visualization/ConstrainedOpt.py
|
zhijieW-94/SAGNet
|
017b58853cb51d50851a5a3728b3205d235ff889
|
[
"MIT"
] | 9
|
2019-11-16T07:06:08.000Z
|
2021-03-07T09:14:32.000Z
|
Visualization/ConstrainedOpt.py
|
zhijieW94/SAGNet
|
017b58853cb51d50851a5a3728b3205d235ff889
|
[
"MIT"
] | 7
|
2019-09-25T18:07:54.000Z
|
2021-12-21T08:41:47.000Z
|
from PyQt5.QtCore import *
class ConstrainedOpt(QThread):
signal_update_voxels = pyqtSignal(str)
def __init__(self, model,index):
QThread.__init__(self)
self.model = model['model']
# self.model = model
self.name = model['name']
self.index = index
def run(self):
# while True:
self.update_voxel_model()
def update_voxel_model(self):
self.signal_update_voxels.emit('update_voxels')
| 24.578947
| 55
| 0.631692
| 439
| 0.940043
| 0
| 0
| 0
| 0
| 0
| 0
| 68
| 0.14561
|
4d0e95505acc0778edf0c0cfa6593332a693d19f
| 7,813
|
py
|
Python
|
Common/EDACommon.py
|
MlGroupsWJ/Customer-Satisfication
|
bae0d3691613f078c88f926fee8d1d0684cb6f88
|
[
"Apache-2.0"
] | null | null | null |
Common/EDACommon.py
|
MlGroupsWJ/Customer-Satisfication
|
bae0d3691613f078c88f926fee8d1d0684cb6f88
|
[
"Apache-2.0"
] | null | null | null |
Common/EDACommon.py
|
MlGroupsWJ/Customer-Satisfication
|
bae0d3691613f078c88f926fee8d1d0684cb6f88
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding:UTF-8 -*-
import pandas as pd
from minepy import MINE
import seaborn as sns
import matplotlib.pyplot as plt
from sklearn.ensemble import ExtraTreesClassifier
import xgboost as xgb
import operator
from sklearn.utils import shuffle
from Common.ModelCommon import ModelCV
from sklearn import svm
import numpy as np
class NAClass(object):
def __init__(self):
pass
# 获取存在NA值的特征列表
def GetNAFeatures(self, df):
return df.columns[df.isnull().sum() != 0].tolist()
# 缺失特征按从多到少排序进行展示
def ShowNAInfo(self, df, NAlist):
NA_count = df[NAlist].isnull().sum().sort_values(ascending=False)
NAInfo = pd.DataFrame({'NA_count': NA_count, 'NA_percent': NA_count/df.shape[0]})
print(NAInfo)
# 含缺失值特征处理的通用接口,strategy为处理策略
def HandleNA(self, df, NAfeaturesList, strategy='mean'):
if strategy == 'mean':
for feature in NAfeaturesList:
if df[feature].dtypes == 'object':
raise ValueError('Nonnumeric feature!')
df[feature].fillna(df[feature].mean(), inplace=True)
elif strategy == 'mode':
for feature in NAfeaturesList:
df[feature].fillna(df[feature].mode()[0], inplace=True)
elif strategy == 'drop':
df.drop(NAfeaturesList, axis=1, inplace=True)
else:
for feature in NAfeaturesList:
if (df[feature].dtypes == 'object' and type(strategy) != str) or (
df[feature].dtypes != 'object' and type(strategy) == str):
raise ValueError('Mismatched type!')
df[feature].fillna(strategy, inplace=True)
def checkNA(self, df):
return df.isnull().sum().max()
def CategoricalList(df):
return [attr for attr in df.columns if df.dtypes[attr] == 'object']
def NumericalList(df):
return [attr for attr in df.columns if df.dtypes[attr] != 'object']
def GetTargetDf(df, target):
targetdf = pd.DataFrame(df[target].value_counts())
targetdf['Percent'] = targetdf[target]/df.shape[0]
return targetdf
def GetZeroDf(df):
zerodf = pd.DataFrame(df[df == 0].count())
zerodf['Percent'] = zerodf[0]/df.shape[0]
zerodf.rename(columns={0: 'Count'}, inplace=True)
return zerodf
def GetValueCountDf(df):
valueCountList = []
for feat in df.columns:
valueCountList.append(df[feat].value_counts().shape[0])
valueCountDf = pd.DataFrame({'feat': df.columns, 'valueCount': valueCountList})
return valueCountDf
def GetZeroColumns(df):
zeros = df[df != 0].count()
return zeros[zeros == 0].index
def mic(x, y):
m = MINE()
m.compute_score(x, y)
return m.mic()
def featShow(train_data, feat):
plt.scatter(range(train_data.shape[0]), train_data[feat].values, s=20)
plt.xlabel('index')
plt.ylabel(feat)
plt.show()
def TypeShow(train_data):
dtype_df = train_data.dtypes.reset_index()
dtype_df.columns = ["Count", "Column Type"]
print(dtype_df.groupby("Column Type").aggregate('count').reset_index())
# 通过决策树获取特征重要性
def TreeImportanceShow(train_data):
x = train_data[train_data.columns[:-1]]
y = train_data['TARGET']
clf = ExtraTreesClassifier()
clf.fit(x, y.astype('int'))
imptdf = pd.DataFrame({'feat': x.columns, 'importance': clf.feature_importances_})
imptdf_sort = imptdf.sort_values(by='importance', ascending=False)
# print("decision tree importance:\n", imptdf_sort)
sns.barplot(data=imptdf_sort, x='feat', y='importance')
plt.xticks(rotation='vertical')
# plt.show()
return imptdf_sort
def xgbImportanceShow(train_data):
x = train_data[train_data.columns[:-1]]
y = train_data['TARGET']
dtrain = xgb.DMatrix(x, y)
xgb_params = {"objective": "binary:logistic", "eta": 0.01, "max_depth": 8, "seed": 42, "silent": 1}
model = xgb.train(xgb_params, dtrain, num_boost_round=100)
impt = model.get_fscore()
impt = sorted(impt.items(), key=operator.itemgetter(1))
imptdf = pd.DataFrame(impt, columns=['feature', 'fscore'])
imptdf_sort = imptdf.sort_values(by='fscore', ascending=False)
# print("xgb importance:\n", imptdf_sort)
imptdf_sort.to_csv('../tmp/xgb_importance.csv', index=False)
xgb.plot_importance(model, max_num_features=400, height=0.8)
# plt.show()
return imptdf_sort
def valueCountsShow(train_data, featlist):
for feat in featlist:
print(train_data[feat].value_counts())
# rate为希望采样后的0样本的个数为rate*1样本
def underSampling(train, rate):
idx_0 = train[train['TARGET'] == 0].index
idx_1 = train[train['TARGET'] == 1].index
len_1 = len(train.loc[idx_1])
undersample_idx_0 = shuffle(idx_0, random_state=37, n_samples=int(len_1*rate))
idx_list = list(undersample_idx_0) + list(idx_1)
train = train.loc[idx_list].reset_index(drop=True)
return train
# repeat为重复样本1的次数
def overSampling(train, repeat):
idx_1 = train[train['TARGET'] == 1].index
i = 0
while i < repeat:
train = pd.concat([train, train.iloc[idx_1, :]], axis=0).reset_index(drop=True)
i += 1
return train
# 通过train_data的cv分数来作为评判标准,但是每种不同比率的sample,最终的样本数有一定不同,是否影响指标的客观准确性?
def getBestUnSamplingRate(train, ratelist):
bestscore = 0
bestrate = 0
for rate in ratelist:
svc = svm.LinearSVC()
train_data = underSampling(train, rate)
score = ModelCV(svc, 'svm', train_data, 5)
print("rate :%f, score:%f" % (rate, score))
if score > bestscore:
bestscore = score
bestrate = rate
print("best rate :%f, best score:%f" % (bestrate, bestscore))
return bestrate
def corr_heatmap(train, v):
correlations = train[v].corr()
# Create color map ranging between two colors
cmap = sns.diverging_palette(220, 10, as_cmap=True)
sns.heatmap(correlations, cmap=cmap, vmax=1.0, center=0, fmt='.2f',
square=True, linewidths=.5, annot=True, cbar_kws={"shrink": .75})
plt.show()
def typeShow(train_data):
print(train_data.dtypes.value_counts())
def getTypeMap(train_data):
typeMap = {}
typeMap['int64'] = train_data.dtypes[train_data.dtypes == 'int64'].index
typeMap['float64'] = train_data.dtypes[train_data.dtypes == 'float64'].index
return typeMap
# iswhole为True时代表是完整的数据集,需要将TARGET去除再求相关性,为False时代表已经是筛选后的列,不包含TARGET
def getHighCorrList(df, thres, iswhole):
if iswhole:
x = df.iloc[:, :-1]
else:
x = df
corr = x.corr()
index = corr.index[np.where(corr > thres)[0]]
columns = corr.columns[np.where(corr > thres)[1]]
highCorrList = [[index[i], columns[i]] for i in range(len(index)) if index[i] != columns[i]]
uniqList = [[0, 0]]
for i in range(len(highCorrList)):
uniqCount = 0
for j in range(len(uniqList)):
if highCorrList[i][0] == uniqList[j][1] and highCorrList[i][1] == uniqList[j][0]:
uniqCount += 1
if uniqCount == 0:
uniqList.append(highCorrList[i])
del uniqList[0]
return uniqList
def getDropHighCorrList(highList):
dropList = []
for item in highList:
if item[0] in dropList:
break
if item[1] in dropList:
break
else:
dropList.append(item[1])
return dropList
def getUinqueCorrDf(train, threshold):
cor_mat = train.corr()
important_corrs = (cor_mat[abs(cor_mat) > threshold][cor_mat != 1.0]).unstack().dropna().to_dict()
unique_important_corrs = pd.DataFrame(
list(set([(tuple(sorted(key)), important_corrs[key]) for key in important_corrs])),
columns=['attribute pair', 'correlation'])
unique_important_corrs = unique_important_corrs.ix[abs(unique_important_corrs['correlation']).argsort()[::-1]]
return unique_important_corrs
| 32.419087
| 114
| 0.64879
| 1,501
| 0.184285
| 0
| 0
| 0
| 0
| 0
| 0
| 1,341
| 0.164641
|
4d149a21b75c6283b2a1da8727432ed396c798a7
| 1,537
|
py
|
Python
|
redbull/doc_html.py
|
theSage21/redbull
|
2f3cf0c8a8a910c18151ca493984d0a364581710
|
[
"MIT"
] | 10
|
2018-09-11T09:11:13.000Z
|
2019-09-10T09:47:35.000Z
|
redbull/doc_html.py
|
theSage21/redbull
|
2f3cf0c8a8a910c18151ca493984d0a364581710
|
[
"MIT"
] | null | null | null |
redbull/doc_html.py
|
theSage21/redbull
|
2f3cf0c8a8a910c18151ca493984d0a364581710
|
[
"MIT"
] | null | null | null |
def gen_doc_html(version, api_list):
doc_html = f'''
<!DOCTYPE html>
<html>
<head><meta charset="utf-8">
<meta name="viewport" content="width=device-width, initial-scale=1">
<style type="text/css">
body{{margin:40px auto;
max-width:650px; line-height:1.6;
font-size:18px; color:#444;
padding:0 10px}}
h1,h2,h3{{line-height:1.2}}
pre{{background-color: beige;
border-radius: 5px;
padding: 15px;
border: 1px solid black;
}}
</style></head>
<body>
<h1>API Docs V{version}</h1>
<p>The documentation is live and autogenerated.</p>
<hr>
<div id='docs'>
</div>
<script>
var api_list = {str(api_list)};'''
doc_html += '''
for(var i=0; i < api_list.length; ++i){
var url = api_list[i];
var xmlhttp = new XMLHttpRequest();
xmlhttp.open("OPTIONS", url, false);
xmlhttp.onreadystatechange = function(){
if (xmlhttp.readyState == 4 && xmlhttp.status == 200){
var doc = document.createElement('pre');
doc.innerHTML = xmlhttp.responseText;
document.getElementById('docs').appendChild(doc);
}
}
xmlhttp.send();
}
</script>
</body>
</html>
'''
return doc_html
| 33.413043
| 78
| 0.471698
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,447
| 0.941444
|
4d1789b7a180d686bba726991622611824a7655a
| 11,166
|
py
|
Python
|
spekev2_verification_testsuite/test_drm_system_specific_system_id_elements.py
|
amphied/speke-reference-server
|
7b589a333fb3c619c6f7e53483d43de9a588f7b7
|
[
"Apache-2.0"
] | null | null | null |
spekev2_verification_testsuite/test_drm_system_specific_system_id_elements.py
|
amphied/speke-reference-server
|
7b589a333fb3c619c6f7e53483d43de9a588f7b7
|
[
"Apache-2.0"
] | null | null | null |
spekev2_verification_testsuite/test_drm_system_specific_system_id_elements.py
|
amphied/speke-reference-server
|
7b589a333fb3c619c6f7e53483d43de9a588f7b7
|
[
"Apache-2.0"
] | null | null | null |
import xml.etree.ElementTree as ET
import pytest
from .helpers import utils
@pytest.fixture(scope="session")
def playready_pssh_cpd_response(spekev2_url):
test_request_data = utils.read_xml_file_contents("test_case_1_p_v_1_a_1", utils.PRESETS_PLAYREADY)
response = utils.speke_v2_request(spekev2_url, test_request_data)
return response.text
@pytest.fixture(scope="session")
def widevine_pssh_cpd_response(spekev2_url):
test_request_data = utils.read_xml_file_contents("test_case_1_p_v_1_a_1", utils.PRESETS_WIDEVINE)
response = utils.speke_v2_request(spekev2_url, test_request_data)
return response.text
@pytest.fixture(scope="session")
def fairplay_hls_signalingdata_response(spekev2_url):
test_request_data = utils.read_xml_file_contents("test_case_1_p_v_1_a_1", utils.PRESETS_FAIRPLAY)
response = utils.speke_v2_request(spekev2_url, test_request_data)
return response.text
def test_widevine_pssh_cpd_no_rotation(widevine_pssh_cpd_response):
root_cpix = ET.fromstring(widevine_pssh_cpd_response)
drm_system_list_element = root_cpix.find('./{urn:dashif:org:cpix}DRMSystemList')
drm_system_elements = drm_system_list_element.findall('./{urn:dashif:org:cpix}DRMSystem')
for drm_system_element in drm_system_elements:
pssh_data_bytes = drm_system_element.find('./{urn:dashif:org:cpix}PSSH')
content_protection_data_bytes = drm_system_element.find('./{urn:dashif:org:cpix}ContentProtectionData')
content_protection_data_string = utils.decode_b64_bytes(content_protection_data_bytes.text)
pssh_in_cpd = ET.fromstring(content_protection_data_string)
# Assert pssh in cpd is same as pssh box
assert pssh_data_bytes.text == pssh_in_cpd.text, \
"Content in PSSH box and the requested content in ContentProtectionData are expected to be the same"
def test_dash_playready_pssh_cpd_no_rotation(playready_pssh_cpd_response):
root_cpix = ET.fromstring(playready_pssh_cpd_response)
drm_system_list_element = root_cpix.find('./{urn:dashif:org:cpix}DRMSystemList')
drm_system_elements = drm_system_list_element.findall('./{urn:dashif:org:cpix}DRMSystem')
for drm_system_element in drm_system_elements:
pssh_data_bytes = drm_system_element.find('./{urn:dashif:org:cpix}PSSH')
content_protection_data_bytes = drm_system_element.find('./{urn:dashif:org:cpix}ContentProtectionData')
content_protection_data_string = utils.decode_b64_bytes(content_protection_data_bytes.text)
cpd_xml = '<cpd>' + content_protection_data_string + '</cpd>'
cpd_root = ET.fromstring(cpd_xml)
pssh_in_cpd = cpd_root.find("./{urn:mpeg:cenc:2013}pssh")
# Assert pssh in cpd is same as pssh box
assert pssh_data_bytes.text == pssh_in_cpd.text, \
"Content in PSSH box and the requested content in ContentProtectionData are expected to be the same"
# Validate presence of HLSSignalingData and PSSH when those elements are present in the request
def test_playready_pssh_hlssignalingdata_no_rotation(playready_pssh_cpd_response):
root_cpix = ET.fromstring(playready_pssh_cpd_response)
drm_system_list_element = root_cpix.find('./{urn:dashif:org:cpix}DRMSystemList')
drm_system_elements = drm_system_list_element.findall('./{urn:dashif:org:cpix}DRMSystem')
for drm_system_element in drm_system_elements:
pssh_data_bytes = drm_system_element.find('./{urn:dashif:org:cpix}PSSH')
assert pssh_data_bytes.text, \
"PSSH must not be empty"
hls_signalling_data_elems = drm_system_element.findall('./{urn:dashif:org:cpix}HLSSignalingData')
# Two elements are expected, one for media and other for master
assert len(hls_signalling_data_elems) == 2, \
"Two HLSSignalingData elements are expected for this request: media and master, received {}".format(
hls_signalling_data_elems)
# Check if HLSSignalingData text is present in the response
hls_signalling_data_media = "{urn:dashif:org:cpix}HLSSignalingData[@playlist='media']"
assert drm_system_element.find(hls_signalling_data_media).text, \
"One HLSSignalingData element is expected to have a playlist value of media"
hls_signalling_data_master = "{urn:dashif:org:cpix}HLSSignalingData[@playlist='master']"
assert drm_system_element.find(hls_signalling_data_master).text, \
"One HLSSignalingData element is expected to have a playlist value of master"
received_playlist_atrrib_values = [hls_signalling_data.get('playlist') for hls_signalling_data in
hls_signalling_data_elems]
# Check both media and master attributes are present in the response
assert all(attribute in received_playlist_atrrib_values for attribute in
utils.SPEKE_V2_HLS_SIGNALING_DATA_PLAYLIST_MANDATORY_ATTRIBS), \
"Two HLSSignalingData elements, with playlist values of media and master are expected"
str_ext_x_key = utils.parse_ext_x_key_contents(drm_system_element.find(hls_signalling_data_media).text)
# Treat ext-x-session-key as ext-x-key for purposes of this validation
str_ext_x_session_key = utils.parse_ext_x_session_key_contents(
drm_system_element.find(hls_signalling_data_master).text)
# Assert that str_ext_x_key and str_ext_x_session_key contents are present and parsed correctly
assert str_ext_x_key.keys, \
"EXT-X-KEY was not parsed correctly"
assert str_ext_x_session_key.keys, \
"EXT-X-SESSION-KEY was not parsed correctly"
# Value of (EXT-X-SESSION-KEY) METHOD attribute MUST NOT be NONE
assert str_ext_x_session_key.keys[0].method, \
"EXT-X-SESSION-KEY METHOD must not be NONE"
# If an EXT-X-SESSION-KEY is used, the values of the METHOD, KEYFORMAT, and KEYFORMATVERSIONS attributes MUST
# match any EXT-X-KEY with the same URI value
assert str_ext_x_key.keys[0].method == str_ext_x_session_key.keys[0].method, \
"METHOD for #EXT-X-KEY and EXT-X-SESSION-KEY must match for this request"
assert str_ext_x_key.keys[0].keyformat == str_ext_x_session_key.keys[0].keyformat, \
"KEYFORMAT for #EXT-X-KEY and EXT-X-SESSION-KEY must match for this request"
assert str_ext_x_key.keys[0].keyformatversions == str_ext_x_session_key.keys[0].keyformatversions, \
"KEYFORMATVERSIONS for #EXT-X-KEY and EXT-X-SESSION-KEY must match for this request"
# Relaxing this requirement, this was originally added as we do not currently support different values
# for the two signaling levels.
# assert str_ext_x_key.keys[0].uri == str_ext_x_session_key.keys[0].uri, \
# "URI for #EXT-X-KEY and EXT-X-SESSION-KEY must match for this request"
assert str_ext_x_key.keys[0].keyformat == str_ext_x_session_key.keys[
0].keyformat == utils.HLS_SIGNALING_DATA_KEYFORMAT.get("playready"), \
f"KEYFORMAT value is expected to be com.microsoft.playready for playready"
def test_fairplay_hlssignalingdata_no_rotation(fairplay_hls_signalingdata_response):
root_cpix = ET.fromstring(fairplay_hls_signalingdata_response)
drm_system_list_element = root_cpix.find('./{urn:dashif:org:cpix}DRMSystemList')
drm_system_elements = drm_system_list_element.findall('./{urn:dashif:org:cpix}DRMSystem')
for drm_system_element in drm_system_elements:
pssh_data_bytes = drm_system_element.find('./{urn:dashif:org:cpix}PSSH')
assert not pssh_data_bytes, \
"PSSH must not be empty"
hls_signalling_data_elems = drm_system_element.findall('./{urn:dashif:org:cpix}HLSSignalingData')
# Two elements are expected, one for media and other for master
assert len(hls_signalling_data_elems) == 2, \
"Two HLSSignalingData elements are expected for this request: media and master, received {}".format(
hls_signalling_data_elems)
# Check if HLSSignalingData text is present in the response
hls_signalling_data_media = "{urn:dashif:org:cpix}HLSSignalingData[@playlist='media']"
assert drm_system_element.find(hls_signalling_data_media).text, \
"One HLSSignalingData element is expected to have a playlist value of media"
hls_signalling_data_master = "{urn:dashif:org:cpix}HLSSignalingData[@playlist='master']"
assert drm_system_element.find(hls_signalling_data_master).text, \
"One HLSSignalingData element is expected to have a playlist value of master"
received_playlist_atrrib_values = [hls_signalling_data.get('playlist') for hls_signalling_data in
hls_signalling_data_elems]
# Check both media and master attributes are present in the response
assert all(attribute in received_playlist_atrrib_values for attribute in
utils.SPEKE_V2_HLS_SIGNALING_DATA_PLAYLIST_MANDATORY_ATTRIBS), \
"Two HLSSignalingData elements, with playlist values of media and master are expected"
str_ext_x_key = utils.parse_ext_x_key_contents(drm_system_element.find(hls_signalling_data_media).text)
# Treat ext-x-session-key as ext-x-key for purposes of this validation
str_ext_x_session_key = utils.parse_ext_x_session_key_contents(
drm_system_element.find(hls_signalling_data_master).text)
# Assert that str_ext_x_key and str_ext_x_session_key contents are present and parsed correctly
assert str_ext_x_key.keys, \
"EXT-X-KEY was not parsed correctly"
assert str_ext_x_session_key.keys, \
"EXT-X-SESSION-KEY was not parsed correctly"
# Value of (EXT-X-SESSION-KEY) METHOD attribute MUST NOT be NONE
assert str_ext_x_session_key.keys[0].method, \
"EXT-X-SESSION-KEY METHOD must not be NONE"
# If an EXT-X-SESSION-KEY is used, the values of the METHOD, KEYFORMAT, and KEYFORMATVERSIONS attributes MUST
# match any EXT-X-KEY with the same URI value
assert str_ext_x_key.keys[0].method == str_ext_x_session_key.keys[0].method, \
"METHOD for #EXT-X-KEY and EXT-X-SESSION-KEY must match for this request"
assert str_ext_x_key.keys[0].keyformat == str_ext_x_session_key.keys[0].keyformat, \
"KEYFORMAT for #EXT-X-KEY and EXT-X-SESSION-KEY must match for this request"
assert str_ext_x_key.keys[0].keyformatversions == str_ext_x_session_key.keys[0].keyformatversions, \
"KEYFORMATVERSIONS for #EXT-X-KEY and EXT-X-SESSION-KEY must match for this request"
assert str_ext_x_key.keys[0].uri == str_ext_x_session_key.keys[0].uri, \
"URI for #EXT-X-KEY and EXT-X-SESSION-KEY must match for this request"
assert str_ext_x_key.keys[0].keyformat == str_ext_x_session_key.keys[
0].keyformat == utils.HLS_SIGNALING_DATA_KEYFORMAT.get("fairplay"), \
f"KEYFORMAT value is expected to be com.apple.streamingkeydelivery for Fairplay"
| 57.854922
| 117
| 0.732581
| 0
| 0
| 0
| 0
| 833
| 0.074601
| 0
| 0
| 4,442
| 0.397815
|
4d1842ad937506e0139c4548364f4972688cf066
| 606
|
py
|
Python
|
lgtv_rs232/commands/power.py
|
davo22/lgtv_rs232
|
40562cddf7acdf6fa95124029595e3838dd9e7b0
|
[
"MIT"
] | null | null | null |
lgtv_rs232/commands/power.py
|
davo22/lgtv_rs232
|
40562cddf7acdf6fa95124029595e3838dd9e7b0
|
[
"MIT"
] | null | null | null |
lgtv_rs232/commands/power.py
|
davo22/lgtv_rs232
|
40562cddf7acdf6fa95124029595e3838dd9e7b0
|
[
"MIT"
] | null | null | null |
from enum import Enum
class Power(Enum):
OFF = 0
ON = 1
def map_to_state(data: int):
return Power(data)
class PowerCommands(object):
_command = "ka"
def __init__(self, send_command):
self._send_command = send_command
async def get_state(self):
return map_to_state(await self._send_command(self._command, 255))
async def set_state(self, state: Power):
return map_to_state(await self._send_command(self._command, state.value))
def on(self):
return self.set_state(Power.ON)
def off(self):
return self.set_state(Power.OFF)
| 20.2
| 81
| 0.671617
| 524
| 0.864686
| 0
| 0
| 0
| 0
| 222
| 0.366337
| 4
| 0.006601
|
4d188e480cc959a97285226a6ee540747e54cbfc
| 3,661
|
py
|
Python
|
aioruuvitag/scanner_windows.py
|
hulttis/ruuvigw
|
914eb657e3f2792cecf6848dfa7607ad45f17ab4
|
[
"MIT"
] | 7
|
2019-11-08T07:30:05.000Z
|
2022-02-20T21:58:44.000Z
|
aioruuvitag/scanner_windows.py
|
hulttis/ruuvigw
|
914eb657e3f2792cecf6848dfa7607ad45f17ab4
|
[
"MIT"
] | null | null | null |
aioruuvitag/scanner_windows.py
|
hulttis/ruuvigw
|
914eb657e3f2792cecf6848dfa7607ad45f17ab4
|
[
"MIT"
] | 1
|
2021-06-19T16:52:55.000Z
|
2021-06-19T16:52:55.000Z
|
# -*- coding: utf-8 -*-
"""
Perform Bluetooth LE Scan.
Based on https://github.com/hbldh/bleak/blob/master/bleak/backends/dotnet/discovery.py by
Created by hbldh <henrik.blidh@nedomkull.com>
"""
import logging
logger = logging.getLogger('bleak_scanner')
import asyncio
import queue
from bleak.backends.device import BLEDevice
# Import of Bleak CLR->UWP Bridge. It is not needed here, but it enables loading of Windows.Devices
from BleakBridge import Bridge
from System import Array, Byte
from Windows.Devices.Bluetooth.Advertisement import \
BluetoothLEAdvertisementWatcher, BluetoothLEScanningMode
from Windows.Storage.Streams import DataReader, IBuffer
QUEUE_SIZE = 100
###############################################################################
async def scanner(
outqueue: asyncio.Queue,
stopevent: asyncio.Event,
**kwargs
):
"""Perform a continuous Bluetooth LE Scan using Windows.Devices.Bluetooth.Advertisement
Args:
outqueue: outgoing queue
stopevent: stop event
"""
logger.info(f'>>> scanner:windows')
watcher = BluetoothLEAdvertisementWatcher()
q = queue.Queue(QUEUE_SIZE)
# -----------------------------------------------------------------------------
def _format_bdaddr(a):
return ":".join("{:02X}".format(x) for x in a.to_bytes(6, byteorder="big"))
# -----------------------------------------------------------------------------
def AdvertisementWatcher_Received(sender, e):
if sender == watcher:
# logger.debug("Received {0}.".format(_format_event_args(e)))
l_bdaddr = _format_bdaddr(e.BluetoothAddress)
l_uuids = []
for l_u in e.Advertisement.ServiceUuids:
l_uuids.append(l_u.ToString())
l_data = {}
for l_m in e.Advertisement.ManufacturerData:
l_md = IBuffer(l_m.Data)
l_b = Array.CreateInstance(Byte, l_md.Length)
l_reader = DataReader.FromBuffer(l_md)
l_reader.ReadBytes(l_b)
l_data[l_m.CompanyId] = bytes(l_b)
local_name = e.Advertisement.LocalName
logger.debug(f'>>> bdaddr:{l_bdaddr} local_name:{local_name} mfdata:{l_data}')
if q:
q.put(BLEDevice(
l_bdaddr,
local_name,
e,
uuids=l_uuids,
manufacturer_data=l_data,
))
def AdvertisementWatcher_Stopped(sender, e):
if sender == watcher:
logger.info(f'>>> stopped')
# -----------------------------------------------------------------------------
watcher.Received += AdvertisementWatcher_Received
watcher.Stopped += AdvertisementWatcher_Stopped
watcher.ScanningMode = BluetoothLEScanningMode.Active
# Watcher works outside of the Python process.
watcher.Start()
# communication loop
while not stopevent.is_set():
try:
l_data = q.get_nowait()
if l_data and outqueue:
await outqueue.put(l_data)
except queue.Empty:
try:
await asyncio.sleep(0.1)
except asyncio.CancelledError:
logger.warning(f'>>> CancelledError')
break
except:
logger.exception(f'>>> exception')
watcher.Stop()
await asyncio.sleep(0.1)
try:
watcher.Received -= AdvertisementWatcher_Received
watcher.Stopped -= AdvertisementWatcher_Stopped
logger.info(f'>>> Event handlers removed')
except:
logger.warning(f'>>> Could not remove event handlers')
| 34.537736
| 99
| 0.571429
| 0
| 0
| 0
| 0
| 0
| 0
| 2,898
| 0.791587
| 1,141
| 0.311663
|
4d1921977ff8cb85df1411e9a16a739fa19af1b7
| 2,354
|
py
|
Python
|
GUI/app.py
|
YJWang94108/Real-Time-Textural-Analysis-System-with-Autonomous-Underwater-Vehicles
|
dbdfb1157c6448720bcf18135789c91c0940bdb4
|
[
"MIT"
] | 1
|
2020-09-09T13:59:34.000Z
|
2020-09-09T13:59:34.000Z
|
GUI/app.py
|
YJWang94108/Real-Time-Textural-Analysis-System-with-Autonomous-Underwater-Vehicles
|
dbdfb1157c6448720bcf18135789c91c0940bdb4
|
[
"MIT"
] | 1
|
2020-09-10T05:28:02.000Z
|
2020-09-10T05:28:02.000Z
|
GUI/app.py
|
YJWang94108/Real-Time-Textural-Analysis-System-with-Autonomous-Underwater-Vehicles
|
dbdfb1157c6448720bcf18135789c91c0940bdb4
|
[
"MIT"
] | 1
|
2020-09-09T13:59:37.000Z
|
2020-09-09T13:59:37.000Z
|
import tkinter as tk
from PIL import Image, ImageTk
import numpy as np
import os
import time
class MyAPP():
def __init__(self):
self.window = tk.Tk()
self.window.title('Classification Results')
self.window.geometry('1000x800')
self.window.attributes('-topmost',1)
#self.window.attributes('-fullscreen', True)
self.window.configure(background='white')
self.classify = 0
np.save('data',np.array(0))
button = tk.Button(self.window, text='Close', command=self.CloseWindow)
button.place(x=0,y=0)
image = Image.open(os.getcwd()+'/init/init.jpg')
image.save('result.png')
left = Image.open(os.getcwd()+'/init/empty.jpg')
left.save('left.jpg')
right = Image.open(os.getcwd()+'/init/#init.jpg')
right.save('right.jpg')
self.background()
self.update_image()
self.window.mainloop()
def background(self):
screenwidth = self.window.winfo_screenwidth()
screenheight = self.window.winfo_screenheight()
self.canvas = tk.Canvas(self.window, width=screenwidth, height=screenheight-90,bg='white')
self.canvas.pack(side='bottom')
bgp = Image.open(os.getcwd()+'/img/sea.PNG').resize((screenwidth,screenheight-90))
self.pic = ImageTk.PhotoImage(bgp, master=self.window)
self.canvas.create_image(0,0,anchor='nw',image=self.pic)
def update_image(self):
try:
image = Image.open('result.png').resize((800, 600))
self.photo1 = ImageTk.PhotoImage(image, master=self.window)
self.canvas.create_image(530,123,anchor='nw',image=self.photo1)
Class = Image.open('left.jpg').resize((250, 600))
self.photo2 = ImageTk.PhotoImage(Class, master=self.window)
self.canvas.create_image(280,123,anchor='nw',image=self.photo2)
info = Image.open('right.jpg').resize((250, 600))
self.photo3 = ImageTk.PhotoImage(info, master=self.window)
self.canvas.create_image(1320,123,anchor='nw',image=self.photo3)
self.classify = np.load('data.npy')
self.window.after(300, self.update_image)
except:
time.sleep(0.4)
self.update_image()
def CloseWindow(self):
self.window.destroy()
app=MyAPP()
| 35.134328
| 98
| 0.621071
| 2,246
| 0.954121
| 0
| 0
| 0
| 0
| 0
| 0
| 279
| 0.118522
|
4d193b99e7c955296baf206f87610d82e0c31d15
| 1,568
|
py
|
Python
|
lib/surface/api_gateway/__init__.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 2
|
2019-11-10T09:17:07.000Z
|
2019-12-18T13:44:08.000Z
|
lib/surface/api_gateway/__init__.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | null | null | null |
lib/surface/api_gateway/__init__.py
|
google-cloud-sdk-unofficial/google-cloud-sdk
|
2a48a04df14be46c8745050f98768e30474a1aac
|
[
"Apache-2.0"
] | 1
|
2020-07-25T01:40:19.000Z
|
2020-07-25T01:40:19.000Z
|
# -*- coding: utf-8 -*- #
# Copyright 2019 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""The command group for Cloud API Gateway CLI."""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.calliope import base
from googlecloudsdk.command_lib.ml_engine import flags
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from googlecloudsdk.core import resources
@base.ReleaseTracks(base.ReleaseTrack.ALPHA, base.ReleaseTrack.BETA,
base.ReleaseTrack.GA)
class ApiGateway(base.Group):
"""Manage Cloud API Gateway resources.
Commands for managing Cloud API Gateway resources.
"""
category = base.API_PLATFORM_AND_ECOSYSTEMS_CATEGORY
def Filter(self, context, args):
# TODO(b/190524392): Determine if command group works with project number
base.RequireProjectID(args)
del context, args
base.DisableUserProjectQuota()
resources.REGISTRY.RegisterApiByName('apigateway', 'v1')
| 34.844444
| 78
| 0.769133
| 451
| 0.287628
| 0
| 0
| 562
| 0.358418
| 0
| 0
| 845
| 0.538903
|
4d19e5d928407169ece619db02a32249f6803443
| 1,350
|
py
|
Python
|
028_dict.py
|
MikePolyakov/python_book
|
497681e8a167918a19ae737960c9c86ebffa9e91
|
[
"MIT"
] | null | null | null |
028_dict.py
|
MikePolyakov/python_book
|
497681e8a167918a19ae737960c9c86ebffa9e91
|
[
"MIT"
] | null | null | null |
028_dict.py
|
MikePolyakov/python_book
|
497681e8a167918a19ae737960c9c86ebffa9e91
|
[
"MIT"
] | null | null | null |
countries = {'Russia' : 'Europe', 'Germany' : 'Europe', 'Australia' : 'Australia'}
sqrs = {}
sqrs[1] = 1
sqrs[2] = 4
sqrs[10] = 100
print(sqrs)
myDict = dict([['key1', 'value1'], ('key2', 'value2')])
print(myDict)
phones = {'police' : 102, 'ambulance' : 103, 'firefighters' : 101}
print(phones['police'])
phones = {'police' : 102, 'ambulance' : 103, 'firefighters' : 101}
del phones['police']
print(phones)
phones = {'police' : 102, 'ambulance' : 103, 'firefighters' : 101}
for service in phones:
print(service, phones[service])
phones = {'police' : 102, 'ambulance' : 103, 'firefighters' : 101}
for service, phone in phones.items():
print(service, phone)
seq = map(int, input().split())
countDict = {}
for elem in seq:
countDict[elem] = countDict.get(elem, 0) + 1
for key in sorted(countDict):
print(key, countDict[key], sep=' : ')
n = int(input())
latinEnglish = {}
for i in range(n):
line = input()
english = line[:line.find('-')].strip()
latinsStr = line[line.find('-') + 1:].strip()
latins = map(lambda s : s.strip(), latinsStr.split(','))
for latin in latins:
if latin not in latinEnglish:
latinEnglish[latin] = []
latinEnglish[latin].append(english)
print(len(latinEnglish))
for latin in sorted(latinEnglish):
print(latin, '-', ', '.join(sorted(latinEnglish[latin])))
| 28.723404
| 82
| 0.625185
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 252
| 0.186667
|
4d1a042ce335839faa6fa2c218bbf1f71877225d
| 1,203
|
py
|
Python
|
solutions/10. Regular Expression Matching.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
solutions/10. Regular Expression Matching.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
solutions/10. Regular Expression Matching.py
|
JacopoPan/leetcode-top100-liked-questions
|
03dc05f087d05805d54b7585ce740338f3128833
|
[
"MIT"
] | null | null | null |
"""
Runtime: 47 ms, faster than 89.57% of Python3 online submissions for Regular Expression Matching.
Memory Usage: 15.2 MB, less than 6.45% of Python3 online submissions for Regular Expression Matching.
"""
from typing import List
from typing import Optional
class Solution:
cache = {}
def isMatch(self, s: str, p: str) -> bool:
if (s, p) in self.cache:
return self.cache[(s, p)]
length = len(s)
if p == '':
if length == 0:
return True
else:
return False
if p[-1] == '*':
if self.isMatch(s, p[:-2]):
self.cache[(s, p)] = True
return True
if length>0 and (s[-1]==p[-2] or p[-2]=='.') and self.isMatch(s[:-1], p):
self.cache[(s, p)] = True
return True
if length>0 and (p[-1]==s[-1] or p[-1]=='.') and self.isMatch(s[:-1], p[:-1]):
self.cache[(s, p)] = True
return True
self.cache[(s, p)] = False
return False
def main():
sol = Solution()
print('Output:', sol.isMatch('ab', '.*'))
print('Expected:', True)
if __name__ == "__main__":
main()
| 29.341463
| 101
| 0.502078
| 793
| 0.659185
| 0
| 0
| 0
| 0
| 0
| 0
| 256
| 0.212801
|
4d1aec75f55686cedd085e55848b278b516e591c
| 166
|
py
|
Python
|
Mundo2/Desafio019.py
|
Marcoakira/Desafios_Python_do_Curso_Guanabara
|
c49b774148a2232f8f3c21b83e3dc97610480757
|
[
"MIT"
] | null | null | null |
Mundo2/Desafio019.py
|
Marcoakira/Desafios_Python_do_Curso_Guanabara
|
c49b774148a2232f8f3c21b83e3dc97610480757
|
[
"MIT"
] | null | null | null |
Mundo2/Desafio019.py
|
Marcoakira/Desafios_Python_do_Curso_Guanabara
|
c49b774148a2232f8f3c21b83e3dc97610480757
|
[
"MIT"
] | null | null | null |
#Desafio019 ( aplicação randomica para determinar que aluno vai no quadro.
import random
al01 = str('joao'),('maria'),('pédro'),('paula')
print(random.choice(al01))
| 27.666667
| 74
| 0.722892
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 104
| 0.615385
|
4d1b446c03d9bd0f9bcfdda12df328d24d3b6854
| 3,807
|
py
|
Python
|
jdll-tutorial/ansible/library/book.py
|
Spredzy/jdll-ansible
|
767ab383c9efb676c3d9923159172f42d221fd2f
|
[
"Apache-2.0"
] | null | null | null |
jdll-tutorial/ansible/library/book.py
|
Spredzy/jdll-ansible
|
767ab383c9efb676c3d9923159172f42d221fd2f
|
[
"Apache-2.0"
] | null | null | null |
jdll-tutorial/ansible/library/book.py
|
Spredzy/jdll-ansible
|
767ab383c9efb676c3d9923159172f42d221fd2f
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
from ansible.module_utils.basic import *
from jdll import API
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'version': '1.0'}
DOCUMENTATION = '''
---
module: book
author: "Yanis Guenane (@Spredzy)"
version_added: "2.3"
short_description: Gerer des resources books de notre API de test.
description:
- Ce module interagit avec le endpoint /books de notre API de test.
options:
state:
required: false
default: "present"
choices: [ present, absent ]
description:
- Si la resource book doit etre presente ou absente.
id:
required: false
description:
- L'identifieur de la resource book.
author:
required: false
description:
- Le nom de l'auteur de book.
title:
required: false
description:
- Titre du book.
summary:
required: true
description:
- Resume du book.
'''
EXAMPLES = '''
# Create a new book
- book:
title: A title
author: An author
summary: A summary
# Update a specific book
- book:
id: XXXX
title: Un titre alternatif
# Delete a book
- book:
id: XXX
state: absent
'''
RETURN = '''
title:
description: The title of the book
returned:
- changed
- success
type: string
sample: A title
summary:
description: The summary of the book
returned:
- changed
- success
type: string
sample: A summary
id:
description: ID of the book
returned:
- changed
- success
type: string
sample: XXXXX
'''
def main():
module = AnsibleModule(
argument_spec=dict(
state=dict(default='present', choices=['present', 'absent'], type='str'),
id=dict(type='str'),
author=dict(type='str'),
summary=dict(type='str'),
title=dict(type='str'),
),
)
# TODO: List of improvement that could be done with
# this module as a starting point.
#
# * Implement noop mode with --check
# * Set accordingly the 'changed' status based on
# the actual action set
# * Check return number and return message accordinly
#
myapi = API()
result = {
'changed': True
}
if module.params['state'] == 'absent':
if 'id' not in module.params:
module.fail_json(msg='id parameter is mandatory')
# Call to the bindingL: DELETE
myapi.delete_book(module.params['id'])
else:
if module.params['id'] is not None:
update = {}
for key in ['author', 'title', 'summary']:
if key in module.params:
update[key] = module.params[key]
# Call to the binding: PUT
myapi.update_book(module.params['id'], **update)
result.update(update)
elif module.params['author'] is not None or module.params['title'] is not None or module.params['summary'] is not None:
if module.params['author'] is None or module.params['title'] is None or module.params['summary'] is None:
module.fail_json(msg='author, title and summary are mandatory parameters')
book = {
'author': module.params['author'],
'summary': module.params['summary'],
'title': module.params['title']
}
# Call to the binding: POST
myapi.create_book(**book)
result.update(book)
else:
# Call to the binding : GET
books = {'books': myapi.list_books()}
result.update(books)
module.exit_json(**result)
if __name__ == '__main__':
main()
| 25.38
| 127
| 0.561334
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 2,182
| 0.573155
|
4d1d08adbb5e362ba1318dc99aee5c3a36e6c489
| 2,679
|
py
|
Python
|
main.py
|
CyberPunk-Josh/Lab-app
|
aa3a5cf77a176bde3156d0f69a1f4018a503ca6f
|
[
"MIT"
] | null | null | null |
main.py
|
CyberPunk-Josh/Lab-app
|
aa3a5cf77a176bde3156d0f69a1f4018a503ca6f
|
[
"MIT"
] | null | null | null |
main.py
|
CyberPunk-Josh/Lab-app
|
aa3a5cf77a176bde3156d0f69a1f4018a503ca6f
|
[
"MIT"
] | null | null | null |
from menu_design import *
from PySide6.QtWidgets import QApplication, QMainWindow
from PySide6.QtCore import Qt, QEasingCurve
# Local files
from reologicalOne.reological import RModel
from reologicalTwo.reologicalDB import RModelDB
from density.density import Density
import sys
# class for menu
class MiApp(QMainWindow, Ui_MainWindow):
def __init__(self):
super().__init__()
self.setupUi(self)
# eliminar barra y de titulo - opacidad
self.setWindowFlag(Qt.FramelessWindowHint)
self.setWindowOpacity(1)
# mover ventana
self.frame_superior.mouseMoveEvent = self.mover_ventana
# acceder a las paginas
self.bt_inicio.clicked.connect(lambda: self.stackedWidget.setCurrentWidget(self.page))
self.bt_uno.clicked.connect(lambda: self.stackedWidget.setCurrentWidget(self.page_uno))
self.bt_2.clicked.connect(lambda: self.stackedWidget.setCurrentWidget(self.page_dos))
# control barra de titulos
self.bt_minimizar.clicked.connect(self.control_bt_minimizar)
self.bt_cerrar.clicked.connect(lambda: self.close())
# menu lateral
self.bt_menu.clicked.connect(self.mover_menu)
# reological model
# self.RM_Graph.clicked.connect(self.message)
def control_bt_minimizar(self):
self.showMinimized()
def control_bt_normal(self):
self.showNormal()
def mover_menu(self):
if True:
width = self.frame_lateral.width()
normal = 0
if width == 0:
extender = 200
else:
extender = normal
self.animacion = QPropertyAnimation(self.frame_lateral, b'minimumWidth')
self.animacion.setDuration(300)
self.animacion.setStartValue(width)
self.animacion.setEndValue(extender)
self.animacion.setEasingCurve(QEasingCurve.InOutQuart)
self.animacion.start()
## mover ventana
def mousePressEvent(self, event):
self.clickPosition = event.globalPosition().toPoint()
def mover_ventana(self, event):
if self.isMaximized() == False:
if event.buttons() == Qt.LeftButton:
self.move(self.pos() + event.globalPosition().toPoint() - self.clickPosition)
self.clickPosition = event.globalPosition().toPoint()
event.accept()
else:
self.showNormal()
class Global(Density, RModelDB, RModel, MiApp):
def __init__(self):
super().__init__()
if __name__ == "__main__":
app = QApplication(sys.argv)
mi_app = Global()
mi_app.show()
sys.exit(app.exec())
| 30.793103
| 95
| 0.655842
| 2,247
| 0.838746
| 0
| 0
| 0
| 0
| 0
| 0
| 250
| 0.093318
|
4d1e11fe07c9e17482225346b3bf314a5354caa6
| 360
|
py
|
Python
|
create_table_from_word/table_columns.py
|
yongli82/CodeGenerator
|
4ca9255c3c4c5392e45815fd20f605ccbbfd2325
|
[
"MIT"
] | null | null | null |
create_table_from_word/table_columns.py
|
yongli82/CodeGenerator
|
4ca9255c3c4c5392e45815fd20f605ccbbfd2325
|
[
"MIT"
] | null | null | null |
create_table_from_word/table_columns.py
|
yongli82/CodeGenerator
|
4ca9255c3c4c5392e45815fd20f605ccbbfd2325
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import os
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import ExcelUtil
from jinja2 import Template
import re
def get_table_list():
column_headers = ExcelUtil.generate_columns('A', 'F')
data_grid = ExcelUtil.read_excel_with_head(u"财务账务表.xlsx", u"表", column_headers)
start_columns = False
| 18.947368
| 83
| 0.730556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 85
| 0.228495
|
4d1e4f9713b73667b272d5c41483d8a89a98e904
| 1,024
|
py
|
Python
|
web/detector/dog_detector.py
|
PaulNWms/dog-project
|
4d70bcd39aa3ea0a8744efc42c53f031fbf63b64
|
[
"MIT"
] | null | null | null |
web/detector/dog_detector.py
|
PaulNWms/dog-project
|
4d70bcd39aa3ea0a8744efc42c53f031fbf63b64
|
[
"MIT"
] | 13
|
2020-01-28T22:15:35.000Z
|
2022-03-11T23:57:35.000Z
|
web_app/detector/dog_detector.py
|
Brijesh-Chandra/Dog-Breed-Identifier
|
b3c918ad148b072d49b358629cba146079bf3dc3
|
[
"MIT"
] | null | null | null |
from keras.preprocessing import image
import keras.applications.resnet50 as resnet50
import numpy as np
app = None
def initialize_dog_detector(the_app):
global app
app = the_app
app.config['RESNET_50_MODEL'] = resnet50.ResNet50(weights='imagenet')
def path_to_tensor(img_path):
# loads RGB image as PIL.Image.Image type
img = image.load_img(img_path, target_size=(224, 224))
# convert PIL.Image.Image type to 3D tensor with shape (224, 224, 3)
x = image.img_to_array(img)
# convert 3D tensor to 4D tensor with shape (1, 224, 224, 3) and return 4D tensor
return np.expand_dims(x, axis=0)
def predict_labels(img_path):
global app
model = app.config['RESNET_50_MODEL']
# returns prediction vector for image located at img_path
img = resnet50.preprocess_input(path_to_tensor(img_path))
return np.argmax(model.predict(img))
def dog_detector(img_path):
global app
prediction = predict_labels(img_path)
return ((prediction <= 268) & (prediction >= 151))
| 29.257143
| 85
| 0.723633
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 291
| 0.28418
|
4d1e62b7359e72d9ef996cfa45b2930243bf6b7d
| 1,124
|
py
|
Python
|
camp_real_engine/plugins/regexp.py
|
vassik/camp-realize
|
be65af18dd6deb800695988700730d2c3fb279cf
|
[
"MIT"
] | null | null | null |
camp_real_engine/plugins/regexp.py
|
vassik/camp-realize
|
be65af18dd6deb800695988700730d2c3fb279cf
|
[
"MIT"
] | null | null | null |
camp_real_engine/plugins/regexp.py
|
vassik/camp-realize
|
be65af18dd6deb800695988700730d2c3fb279cf
|
[
"MIT"
] | null | null | null |
import re
from camp_real_engine.abstract.abc_subst_realizer import ABC_subst_realizer
from camp_real_engine.model.realization import RegExpFileSubstNode
from camp_real_engine.dao.daos import FileContentCommiter
from camp_real_engine.abstract.abc_real_data_model import ABCSubstitutionNode
class RegExp(ABC_subst_realizer):
def __init__(self, _content_commiter = None):
self.content_commiter = _content_commiter if _content_commiter else FileContentCommiter()
def exe_subst(self, substitution):
if not (isinstance(substitution, ABCSubstitutionNode) and substitution.get_type() == "regexp"):
return
self.content_commiter.set_read_file(substitution.get_file_name())
self.content_commiter.set_write_file(substitution.get_file_name())
file_content = self.content_commiter.read_content()
placement = substitution.get_placement_str()
replacement = substitution.get_replacement_str()
pattern = re.compile(placement)
match = pattern.search(file_content)
if not match:
return
modified_content = re.sub(pattern, replacement, file_content)
self.content_commiter.write_content(modified_content)
| 34.060606
| 97
| 0.825623
| 831
| 0.739324
| 0
| 0
| 0
| 0
| 0
| 0
| 8
| 0.007117
|
4d20e11d53db6d88edbeea07f1facb38a4748d8a
| 2,131
|
py
|
Python
|
mouse_burrows/scripts/show_info.py
|
david-zwicker/cv-mouse-burrows
|
906476f49ff9711cd672feca5f70efedaab82b01
|
[
"BSD-3-Clause"
] | 1
|
2016-03-06T05:16:38.000Z
|
2016-03-06T05:16:38.000Z
|
mouse_burrows/scripts/show_info.py
|
david-zwicker/cv-mouse-burrows
|
906476f49ff9711cd672feca5f70efedaab82b01
|
[
"BSD-3-Clause"
] | null | null | null |
mouse_burrows/scripts/show_info.py
|
david-zwicker/cv-mouse-burrows
|
906476f49ff9711cd672feca5f70efedaab82b01
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python2
'''
Created on Sep 21, 2016
@author: David Zwicker <dzwicker@seas.harvard.edu>
'''
from __future__ import division
import argparse
import sys
import os
# add the root of the video-analysis project to the path
script_path = os.path.split(os.path.realpath(__file__))[0]
package_path = os.path.abspath(os.path.join(script_path, '..', '..'))
sys.path.append(package_path)
video_analysis_path_guess = os.path.join(package_path, '..', 'video-analysis')
sys.path.append(os.path.abspath(video_analysis_path_guess))
from mouse_burrows.simple import load_result_file
def get_info(result_file, parameters=False):
""" show information about an analyzed antfarm video
`result_file` is the file where the results from the video analysis are
stored. This is usually a *.yaml file
`parameters` is a flag that indicates whether the parameters of the result
file are shown
"""
# load the respective result file
analyzer = load_result_file(result_file)
info = {}
if parameters:
info['Parameters'] = analyzer.params.to_dict()
return info
def main():
""" main routine of the script """
# setup the argument parsing
parser = argparse.ArgumentParser(
description='Program that outputs information about the analysis of '
'antfarm processing.',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-r', '--result_file', metavar='FILE',
type=str, required=True,
help='filename of video analysis result')
parser.add_argument('-p', '--parameters', action='store_true',
help='show all parameters')
# fetch the arguments and build the parameter list
args = parser.parse_args()
# obtain information from data
info = get_info(result_file=args.result_file, parameters=args.parameters)
# TODO: add other output methods, like json, yaml, python dict
from pprint import pprint
pprint(info)
if __name__ == '__main__':
main()
| 27.675325
| 78
| 0.6687
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 927
| 0.435007
|
4d220c47f8915f484fcada1de144cddca671bb25
| 25,729
|
py
|
Python
|
google/cloud/networkmanagement/v1beta1/networkmanagement-v1beta1-py/google/cloud/networkmanagement_v1beta1/services/reachability_service/async_client.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/cloud/networkmanagement/v1beta1/networkmanagement-v1beta1-py/google/cloud/networkmanagement_v1beta1/services/reachability_service/async_client.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/cloud/networkmanagement/v1beta1/networkmanagement-v1beta1-py/google/cloud/networkmanagement_v1beta1/services/reachability_service/async_client.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
import functools
import re
from typing import Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.networkmanagement_v1beta1.services.reachability_service import pagers
from google.cloud.networkmanagement_v1beta1.types import connectivity_test
from google.cloud.networkmanagement_v1beta1.types import reachability
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from .transports.base import ReachabilityServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc_asyncio import ReachabilityServiceGrpcAsyncIOTransport
from .client import ReachabilityServiceClient
class ReachabilityServiceAsyncClient:
"""The Reachability service in the Google Cloud Network
Management API provides services that analyze the reachability
within a single Google Virtual Private Cloud (VPC) network,
between peered VPC networks, between VPC and on-premises
networks, or between VPC networks and internet hosts. A
reachability analysis is based on Google Cloud network
configurations.
You can use the analysis results to verify these configurations
and to troubleshoot connectivity issues.
"""
_client: ReachabilityServiceClient
DEFAULT_ENDPOINT = ReachabilityServiceClient.DEFAULT_ENDPOINT
DEFAULT_MTLS_ENDPOINT = ReachabilityServiceClient.DEFAULT_MTLS_ENDPOINT
connectivity_test_path = staticmethod(ReachabilityServiceClient.connectivity_test_path)
parse_connectivity_test_path = staticmethod(ReachabilityServiceClient.parse_connectivity_test_path)
common_billing_account_path = staticmethod(ReachabilityServiceClient.common_billing_account_path)
parse_common_billing_account_path = staticmethod(ReachabilityServiceClient.parse_common_billing_account_path)
common_folder_path = staticmethod(ReachabilityServiceClient.common_folder_path)
parse_common_folder_path = staticmethod(ReachabilityServiceClient.parse_common_folder_path)
common_organization_path = staticmethod(ReachabilityServiceClient.common_organization_path)
parse_common_organization_path = staticmethod(ReachabilityServiceClient.parse_common_organization_path)
common_project_path = staticmethod(ReachabilityServiceClient.common_project_path)
parse_common_project_path = staticmethod(ReachabilityServiceClient.parse_common_project_path)
common_location_path = staticmethod(ReachabilityServiceClient.common_location_path)
parse_common_location_path = staticmethod(ReachabilityServiceClient.parse_common_location_path)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ReachabilityServiceAsyncClient: The constructed client.
"""
return ReachabilityServiceClient.from_service_account_info.__func__(ReachabilityServiceAsyncClient, info, *args, **kwargs) # type: ignore
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
ReachabilityServiceAsyncClient: The constructed client.
"""
return ReachabilityServiceClient.from_service_account_file.__func__(ReachabilityServiceAsyncClient, filename, *args, **kwargs) # type: ignore
from_service_account_json = from_service_account_file
@property
def transport(self) -> ReachabilityServiceTransport:
"""Returns the transport used by the client instance.
Returns:
ReachabilityServiceTransport: The transport used by the client instance.
"""
return self._client.transport
get_transport_class = functools.partial(type(ReachabilityServiceClient).get_transport_class, type(ReachabilityServiceClient))
def __init__(self, *,
credentials: ga_credentials.Credentials = None,
transport: Union[str, ReachabilityServiceTransport] = "grpc_asyncio",
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the reachability service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.ReachabilityServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
Raises:
google.auth.exceptions.MutualTlsChannelError: If mutual TLS transport
creation failed for any reason.
"""
self._client = ReachabilityServiceClient(
credentials=credentials,
transport=transport,
client_options=client_options,
client_info=client_info,
)
async def list_connectivity_tests(self,
request: reachability.ListConnectivityTestsRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListConnectivityTestsAsyncPager:
r"""Lists all Connectivity Tests owned by a project.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.ListConnectivityTestsRequest`):
The request object. Request for the
`ListConnectivityTests` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.networkmanagement_v1beta1.services.reachability_service.pagers.ListConnectivityTestsAsyncPager:
Response for the ListConnectivityTests method.
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
request = reachability.ListConnectivityTestsRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.list_connectivity_tests,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# This method is paged; wrap the response in a pager, which provides
# an `__aiter__` convenience method.
response = pagers.ListConnectivityTestsAsyncPager(
method=rpc,
request=request,
response=response,
metadata=metadata,
)
# Done; return the response.
return response
async def get_connectivity_test(self,
request: reachability.GetConnectivityTestRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> connectivity_test.ConnectivityTest:
r"""Gets the details of a specific Connectivity Test.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.GetConnectivityTestRequest`):
The request object. Request for the
`GetConnectivityTest` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.networkmanagement_v1beta1.types.ConnectivityTest:
A Connectivity Test for a network
reachability analysis.
"""
# Create or coerce a protobuf request object.
request = reachability.GetConnectivityTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.get_connectivity_test,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Done; return the response.
return response
async def create_connectivity_test(self,
request: reachability.CreateConnectivityTestRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Creates a new Connectivity Test. After you create a test, the
reachability analysis is performed as part of the long running
operation, which completes when the analysis completes.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, containing non-existent resources in the
network, or you don't have read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
AMBIGUOUS. For more information, see the Connectivity Test
documentation.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.CreateConnectivityTestRequest`):
The request object. Request for the
`CreateConnectivityTest` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.networkmanagement_v1beta1.types.ConnectivityTest`
A Connectivity Test for a network reachability analysis.
"""
# Create or coerce a protobuf request object.
request = reachability.CreateConnectivityTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.create_connectivity_test,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("parent", request.parent),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
connectivity_test.ConnectivityTest,
metadata_type=reachability.OperationMetadata,
)
# Done; return the response.
return response
async def update_connectivity_test(self,
request: reachability.UpdateConnectivityTestRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Updates the configuration of an existing ``ConnectivityTest``.
After you update a test, the reachability analysis is performed
as part of the long running operation, which completes when the
analysis completes. The Reachability state in the test resource
is updated with the new result.
If the endpoint specifications in ``ConnectivityTest`` are
invalid (for example, they contain non-existent resources in the
network, or the user does not have read permissions to the
network configurations of listed projects), then the
reachability result returns a value of UNKNOWN.
If the endpoint specifications in ``ConnectivityTest`` are
incomplete, the reachability result returns a value of
``AMBIGUOUS``. See the documentation in ``ConnectivityTest`` for
for more details.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.UpdateConnectivityTestRequest`):
The request object. Request for the
`UpdateConnectivityTest` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.networkmanagement_v1beta1.types.ConnectivityTest`
A Connectivity Test for a network reachability analysis.
"""
# Create or coerce a protobuf request object.
request = reachability.UpdateConnectivityTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.update_connectivity_test,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("resource.name", request.resource.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
connectivity_test.ConnectivityTest,
metadata_type=reachability.OperationMetadata,
)
# Done; return the response.
return response
async def rerun_connectivity_test(self,
request: reachability.RerunConnectivityTestRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Rerun an existing ``ConnectivityTest``. After the user triggers
the rerun, the reachability analysis is performed as part of the
long running operation, which completes when the analysis
completes.
Even though the test configuration remains the same, the
reachability result may change due to underlying network
configuration changes.
If the endpoint specifications in ``ConnectivityTest`` become
invalid (for example, specified resources are deleted in the
network, or you lost read permissions to the network
configurations of listed projects), then the reachability result
returns a value of ``UNKNOWN``.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.RerunConnectivityTestRequest`):
The request object. Request for the
`RerunConnectivityTest` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be
:class:`google.cloud.networkmanagement_v1beta1.types.ConnectivityTest`
A Connectivity Test for a network reachability analysis.
"""
# Create or coerce a protobuf request object.
request = reachability.RerunConnectivityTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.rerun_connectivity_test,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
connectivity_test.ConnectivityTest,
metadata_type=reachability.OperationMetadata,
)
# Done; return the response.
return response
async def delete_connectivity_test(self,
request: reachability.DeleteConnectivityTestRequest = None,
*,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation_async.AsyncOperation:
r"""Deletes a specific ``ConnectivityTest``.
Args:
request (:class:`google.cloud.networkmanagement_v1beta1.types.DeleteConnectivityTestRequest`):
The request object. Request for the
`DeleteConnectivityTest` method.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation_async.AsyncOperation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
request = reachability.DeleteConnectivityTestRequest(request)
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = gapic_v1.method_async.wrap_method(
self._client._transport.delete_connectivity_test,
default_timeout=None,
client_info=DEFAULT_CLIENT_INFO,
)
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((
("name", request.name),
)),
)
# Send the request.
response = await rpc(
request,
retry=retry,
timeout=timeout,
metadata=metadata,
)
# Wrap the response in an operation future.
response = operation_async.from_gapic(
response,
self._client._transport.operations_client,
empty_pb2.Empty,
metadata_type=reachability.OperationMetadata,
)
# Done; return the response.
return response
async def __aenter__(self):
return self
async def __aexit__(self, exc_type, exc, tb):
await self.transport.close()
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-networkmanagement",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = (
"ReachabilityServiceAsyncClient",
)
| 41.700162
| 171
| 0.653309
| 23,553
| 0.915426
| 0
| 0
| 1,594
| 0.061953
| 17,304
| 0.672548
| 14,310
| 0.556182
|
4d237d356a17f205a24800037a5d0a053ed6c426
| 563
|
py
|
Python
|
todo/urls.py
|
fidele000/Ftodo-RestAPI-Django
|
8c695503e04a3957920910acb9f1bb823ece4287
|
[
"MIT"
] | null | null | null |
todo/urls.py
|
fidele000/Ftodo-RestAPI-Django
|
8c695503e04a3957920910acb9f1bb823ece4287
|
[
"MIT"
] | null | null | null |
todo/urls.py
|
fidele000/Ftodo-RestAPI-Django
|
8c695503e04a3957920910acb9f1bb823ece4287
|
[
"MIT"
] | null | null | null |
from django.urls import path
from rest_framework import viewsets
from rest_framework import routers
from . import views
from django.urls import include
from rest_framework.routers import DefaultRouter
router=DefaultRouter()
router.register('hello-viewset',views.HelloViewSet,basename='hello-viewset')
router.register('profile',views.UserProfileViewSet)
router.register('login',views.LoginViewSet,basename='login')
router.register('task',views.TaskViewset)
urlpatterns = [
path('helloview/',views.HelloAPIView.as_view()),
path('',include(router.urls)),
]
| 33.117647
| 76
| 0.801066
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 73
| 0.129663
|
4d23e4d034125a3f8c2a16ba07229fdc1c90a016
| 257
|
py
|
Python
|
toontown/estate/DistributedPlantBaseAI.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 99
|
2019-11-02T22:25:00.000Z
|
2022-02-03T03:48:00.000Z
|
toontown/estate/DistributedPlantBaseAI.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 42
|
2019-11-03T05:31:08.000Z
|
2022-03-16T22:50:32.000Z
|
toontown/estate/DistributedPlantBaseAI.py
|
TheFamiliarScoot/open-toontown
|
678313033174ea7d08e5c2823bd7b473701ff547
|
[
"BSD-3-Clause"
] | 57
|
2019-11-03T07:47:37.000Z
|
2022-03-22T00:41:49.000Z
|
from direct.directnotify import DirectNotifyGlobal
from direct.distributed.DistributedObjectAI import DistributedObjectAI
class DistributedPlantBaseAI(DistributedObjectAI):
notify = DirectNotifyGlobal.directNotify.newCategory('DistributedPlantBaseAI')
| 42.833333
| 82
| 0.879377
| 133
| 0.51751
| 0
| 0
| 0
| 0
| 0
| 0
| 24
| 0.093385
|
4d240f3eb85f0adcecd00489cbe4d3ad31ec57c5
| 27
|
py
|
Python
|
test.py
|
justin-th/linux-pasword-protect
|
feba8712d5bc25c417cb7297aac9c0d23566378e
|
[
"MIT"
] | null | null | null |
test.py
|
justin-th/linux-pasword-protect
|
feba8712d5bc25c417cb7297aac9c0d23566378e
|
[
"MIT"
] | null | null | null |
test.py
|
justin-th/linux-pasword-protect
|
feba8712d5bc25c417cb7297aac9c0d23566378e
|
[
"MIT"
] | null | null | null |
import os
print(os.curdir)
| 9
| 16
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4d242ba823cf6de6e20e2768b1f065a06d916125
| 302
|
py
|
Python
|
setup.py
|
samuel-spak/thermostate
|
906d1e0b79289cd51cde510c797f007674b8bdcd
|
[
"BSD-3-Clause"
] | 6
|
2020-03-31T14:25:23.000Z
|
2022-03-10T14:56:29.000Z
|
setup.py
|
samuel-spak/thermostate
|
906d1e0b79289cd51cde510c797f007674b8bdcd
|
[
"BSD-3-Clause"
] | 35
|
2017-01-26T15:31:19.000Z
|
2022-03-14T16:32:00.000Z
|
setup.py
|
samuel-spak/thermostate
|
906d1e0b79289cd51cde510c797f007674b8bdcd
|
[
"BSD-3-Clause"
] | 15
|
2017-02-08T20:07:38.000Z
|
2022-03-14T09:15:35.000Z
|
from setuptools import setup
from pathlib import Path
from typing import Dict
HERE = Path(__file__).parent
version: Dict[str, str] = {}
version_file = HERE / "src" / "thermostate" / "_version.py"
exec(version_file.read_text(), version)
setup(version=version["__version__"], package_dir={"": "src"})
| 25.166667
| 62
| 0.731788
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 51
| 0.168874
|
4d242e01b427dcb6b1bf2d2cc3562c29ca378947
| 78,627
|
py
|
Python
|
fps2c__ - Copy.py
|
GeorgeLoo/FPS
|
775bf173437d2feb09bc91b7f842226a8c752980
|
[
"MIT"
] | 1
|
2022-02-21T12:07:42.000Z
|
2022-02-21T12:07:42.000Z
|
fps2c__ - Copy.py
|
GeorgeLoo/FPS
|
775bf173437d2feb09bc91b7f842226a8c752980
|
[
"MIT"
] | null | null | null |
fps2c__ - Copy.py
|
GeorgeLoo/FPS
|
775bf173437d2feb09bc91b7f842226a8c752980
|
[
"MIT"
] | null | null | null |
'''
_
_._ _..._ .-', _.._(`))
'-. ` ' /-._.-' ',/
) \ '.
/ _ _ | \
| a a / |
\ .-. ;
'-('' ).-' ,' ;
'-; | .'
\ \ /
| 7 .__ _.-\ \
| | | ``/ /` /
/,_| | /,_/ /
/,_/ '`-'
-----------------------------------------
injured face like Duke Nukem
/moving hostages panic
children as terrorists! with RPGs
/taking cover
/Claymore 700 ball bearings
night shootouts
mp7 rifle with silencer
/2 images for hostages
/Barrett Browning M82 CQ
/go through walls, kill tango commandos with one shot
/see through walls with scope
?tablet version
Fellow shooters with Ai
/medical kits
shoot and shatter glass
guards and
/trigger waypoints
long distance shooting! sniper rifle!
show dead bodies in 3 positions: left, right, upside down!
assassinations
deeper missions
scenario announcement
scenario chooser
improve tango generation
background music
show next mission
give a summary of the performance
/fight against special forces who take 5 shots before dying
/weapons restriction!
/pause that pauses the bullets - important!
/campaign mode
/reset stats F1 key 13.3.2015
/prevent hero from going off screen 14.3.2015 pi day
tango random shooters
tango intelligent fire
tango intelligent movement, flanking!
game suicide bombers
/game hostages
hostage with timer shootings
/different rates of auto fire
small message window
bullets that can shoot through the walls!
blow cover away with bombs
/Hero cursor wasd movement
/Tangos will target Hero
RPG countdown to explosion
tangos
hostages
cover
sniper rifle and distances
blood?
/headshots
/leg shots
shield for storming rooms
weapon accuracy
range
rate of auto fire
Pistol
Name
Graphic
Aiming cursor
Sound firing
Sound hit
Safe, Semi
Number of rounds
Reload()
Choose()
Draw()
DrawRounds()
Fire()
Selector()
Knife - sheathed, stab/slash/throw
Silenced Pistol
Glock automatic pistol
Samurai sword
Parachute avoid 'obstacles'
Maze grid when covert missions
Rifle
Safe Semi Auto
Sniper Rifle
Safe Semi
Mini-gun
50 round a second!
4400 round box
SAW
safe Auto
Shotgun
spread shot
Stun granade
safe armed
Grenade
Safe Armed
Rocket
Safe Semi
Artillery
Coords
Confirm
auto fire
---------------
explosion objects
Grenades as well.
Grenade multiple launcher
use proper consts
better structure for
changing weapons
use hash to get to weapons instead of if-then-else
'''
import pyglet
from pyglet.window import key
from pyglet import clock
import random
#import Tkinter, tkMessageBox
class Const():
foo = 15589888
folder = '.\\draw\\'
backgroundFolder = '.\\background\\'
knifeKbar = 'KBar knife'
pistol1911 = 'M1911 pistol'
knifeSafe = 'safe'
knifeArmed = 'armed'
knifeThrow = 'throw'
knifeModes = (knifeSafe,knifeArmed,knifeThrow)
pistolSafe = 'safe'
pistolSemi = 'semi'
pistolModes = (pistolSafe,pistolSemi)
M4assaultRifle = 'M4 Carbine'
assaultRifleSafe = 'safe'
assaultRifleSemi = 'semi'
assaultRifleAuto = 'auto'
assaultRifleModes = (assaultRifleSafe,assaultRifleSemi,assaultRifleAuto)
machineGunModes = (assaultRifleSafe,assaultRifleAuto)
SAR21Rifle = 'SAR21'
Ultimax100SAW = 'Ultimax 100 SAW'
M134MiniGun = 'M134 Mini-Gun'
M32GRENADElauncher = 'M32 Multi-Grenade Launcher'
STCPW = 'ST KINETICS CPW'
GLOCK = 'GLOCK 18'
M249SAW = 'M249 SAW'
M107sniper = 'M107 Sniper Rifle'
ClaymoreMine = 'M18 Claymore'
MP5 = 'MP5 PDW'
suicideBomber = not False #whether can get close to tangos
HandToolConst = 'Hand Tool'
class Sounds:
soundpath='.\\sound\\'
MuteSound = 0
def __init__(self):
try:
self.minigunSound = self.Load('minigunsound.wav')
self.sar21 = self.Load('sar21.wav')
self.reLoad = self.Load('reload.wav')
self.m1911 = self.Load('M1911pistolsound.wav')
self.m4carbine = self.Load('M4carbineSound.wav')
self.pain = self.Load('Pain-SoundBible.com-1883168362.wav')
self.heropain = self.Load('Pain-Hero.wav')
self.m32MGL = self.Load('grenadeLauncher.wav') #grenadeLauncher.wav
self.hostageHitsound = self.Load('HostageFemaleScream1.wav')
self.hostageRescuedSound = self.Load('ThankYouBritishWoman.wav')
self.stcpwsnd = self.Load('CPWsound.wav')
self.glocksnd = self.Load('Glocksound.wav')
self.m249sawSound = self.Load('M249_SAW.wav')
self.m107Sound = self.Load('M107.wav')
self.m18Sound = self.Load('Claymore.wav')
self.reliefSound = self.Load('Ahhh.wav')
self.curedSound = self.Load('Ahhhh.wav')
self.mp5sound = self.Load('mp5sound.wav')
#self.stcpwsnd = self.Load('')
#self.wallhit = self.Load('wallhit.mp3')
self.player = pyglet.media.Player()
except:
print 'sound file fucked'
# self. = self.Load('')
#self.player.queue(self.gunsound)
def Load(self,f):
#print f
s = pyglet.media.StaticSource(pyglet.media.load(self.soundpath+f, streaming=False))
#print s.duration
#s.play()
return s
def Player(self,s):
self.player.queue(s)
self.player.play()
def Play(self,s):
if self.MuteSound != 1:
#print 'sound play'
s.play()
def Stop(self):
self.player.pause()
pass
def On(self):
#print 'sound on'
self.MuteSound = 0
def Off(self):
self.MuteSound = 1
gSound = Sounds()
def SpriteLoad(name, centre=False):
image = pyglet.image.load(name)
if centre:
image.anchor_x = image.width / 2
image.anchor_y = image.height / 2
return pyglet.sprite.Sprite(image)
class MessageWin(pyglet.window.Window):
def __init__(self,title):
super(MessageWin, self).__init__(resizable = False)
self.set_size(800, 600)
self.set_location(300,50)
self.maximize()
self.set_caption(title)
self.lines = []
i = 0
g = self.height - 50
while i < 4:
self.label = pyglet.text.Label('line '+str(i),
font_name='Times New Roman',
font_size=16,
x=50, y=g)
#anchor_x='center', anchor_y='center')
self.lines.append(self.label)
g -= 25
i+=1
def on_mouse_release(self,x, y, button, modifiers):
#self.close()
#self.set_visible(visible=False)
#self.minimize()
#self.set_visible(visible=False)
pass
def on_draw(self):
self.clear()
#self.switch_to()
for i in self.lines:
i.draw()
def hide(self):
self.set_fullscreen(False, screen=None)
self.set_visible(visible=False)
def show(self):
self.set_visible(visible=True)
self.set_fullscreen(True, screen=None)
def on_close(self):
#self.set_visible(visible=False)
pass
def setText(self,text,line):
self.lines[line].text = text
def on_key_press(self, symbol, modifiers):
if symbol == key.I:
self.hide()
gmw = MessageWin('Battle Report')
class BattleReport():
def __init__(self):
self.init()
def init(self):
print 'Battle report init'
self.numberheadshot = 0
self.numberbodyhit = 0
self.numberblasted = 0
self.numLegHits = 0
self.herohit = 0
self.heroblasted = 0
self.hostagesHit = 0
self.hostagesRescued = 0
self.herokilled = 0
#alternative way to gather data
#but delay in reporting data
def add(self,v):
self.report()
return v + 1
def report(self):
if self.herohit > 4:
self.herokilled += self.herohit / 5
self.herohit = 0
s = 'hero hit '+str(self.herohit)+ \
' blasted '+ str(self.heroblasted) +\
' killed ' + str(self.herokilled)
gmw.setText(s,0)
s = 'tango headhit ' + str(self.numberheadshot) + \
' tango bodyhits ' + str(self.numberbodyhit) + \
' tango leg hits ' + str(self.numLegHits)
gmw.setText(s,1)
s = 'tango blown up ' + str(self.numberblasted)
gmw.setText(s,2)
totalTangoHits = self.numberheadshot + \
self.numberbodyhit + self.numLegHits + \
self.numberblasted + self.heroblasted
s = 'Total Tango casualties ' + str(totalTangoHits) +\
' Hostages hit ' + str(self.hostagesHit) + \
' Rescued ' + str(self.hostagesRescued)
gmw.setText(s,3)
pass
gBattleRep = BattleReport()
#gBattleRep.numberbodyhit += 1
#BattleReport.bullshit = 37
#print gBattleRep.numberbodyhit, BattleReport.bullshit
#gBattleRep.report()
#tkMessageBox.showinfo('Battle Report','Stuff')
#gmw.switch_to()
#gmw.show()
#gmw.setText('fuck'+' pap',0)
'''
Bystanders
Movement all over the place
stats
explosions
bullets
'''
'''
Hostages
Moving / stationary
crying sounds
statistics tracked
different looks
statistics
drawing
'''
class Hostages():
def __init__(self):
self.hostageList = []
self.deadhostagelist = []
self.hostageSprite = SpriteLoad(Const.folder+'hostage1.jpg',centre=False)
self.hostageSprite.scale = 0.25
self.deadhostageSprite = SpriteLoad(Const.folder+'hostage1.jpg',centre=False)
self.deadhostageSprite.scale = 0.25
self.deadhostageSprite.rotation = -90.0
h2s = 0.1
self.hostage2Sprite = SpriteLoad(Const.folder+'hostage2.jpg',centre=False)
self.hostage2Sprite.scale = h2s
self.deadhostage2Sprite = SpriteLoad(Const.folder+'hostage2.jpg',centre=False)
self.deadhostage2Sprite.scale = h2s
self.deadhostage2Sprite.rotation = 90.0
self.panic = False
self.sound = gSound
self.moveDir = 'up'
self.winw = 0
self.winh = 0
clock.schedule_interval(self.autocall, 1.0)
def setPanic(self,v):
self.panic = v
def autocall(self,dt):
if self.panic:
self.panicMove()
else:
self.move()
pass
def panicMove(self):
gw = self.hostageSprite.width / 1
gh = self.hostageSprite.height / 1
for i in self.hostageList:
tx = i[0]
ty = i[1]
d = random.randrange(1,9)
if d == 1:
self.moveDir = 'up'
elif d == 2:
self.moveDir = 'down'
elif d == 3:
self.moveDir = 'left'
elif d == 4:
self.moveDir = 'right'
if self.moveDir == 'up':
if ty - gh > 0:
ty -= gh
elif self.moveDir == 'down':
if ty + gh < self.winh:
ty += gh
elif self.moveDir == 'left':
if tx - gw > 0:
tx -= gw
elif self.moveDir == 'right':
if tx + gw < self.winw:
tx += gw
i[0] = tx
i[1] = ty
pass
def move(self):
gw = self.hostageSprite.width / 8
gh = self.hostageSprite.height / 8
for i in self.hostageList:
tx = i[0]
ty = i[1]
if self.moveDir == 'up':
ty -= gh
elif self.moveDir == 'down':
ty += gh
elif self.moveDir == 'left':
tx -= gw
elif self.moveDir == 'right':
tx += gw
i[0] = tx
i[1] = ty
if self.moveDir == 'up':
self.moveDir = 'down'
elif self.moveDir == 'down':
self.moveDir = 'left'
elif self.moveDir == 'right':
self.moveDir = 'up'
elif self.moveDir == 'left':
self.moveDir = 'right'
pass
def create(self,num,winWidth,winHeight):
self.winw = winWidth
self.winh = winHeight
i = 0
self.deadhostagelist = []
while i < num:
x = random.randrange(1,winWidth-100)
y = random.randrange(1,winHeight-100)
t = random.randrange(1,3)
self.hostageList.append([x,y,t])
i += 1
def Rescue(self,bx,by):
for i in self.hostageList:
tx = i[0]
ty = i[1]
l = tx
t = ty
r = tx + self.hostageSprite.width
b = ty + self.hostageSprite.height
rect = [l, t, r, b]
if withinrect( bx, by, rect):
#print 'hostage saved'
self.sound.Play(self.sound.hostageRescuedSound)
gBattleRep.hostagesRescued += 1
gBattleRep.report()
self.hostageList.remove(i)
def hit(self,bx,by):
for i in self.hostageList:
tx = i[0]
ty = i[1]
ht = i[2]
l = tx
t = ty
if t == 1:
r = tx + self.hostageSprite.width
b = ty + self.hostageSprite.height
else:
r = tx + self.hostage2Sprite.width
b = ty + self.hostage2Sprite.height
rect = [l, t, r, b]
if withinrect( bx, by, rect):
#print 'hostage hit'
self.sound.Play(self.sound.hostageHitsound)
gBattleRep.hostagesHit += 1
gBattleRep.report()
a = [tx,ty,ht]
self.deadhostagelist.append(a)
self.hostageList.remove(i)
pass
def HitGrenade(self,grenaderect):
for i in self.hostageList:
tx = i[0]
ty = i[1]
ht = i[2]
if withinrect( tx, ty, grenaderect):
self.sound.Play(self.sound.hostageHitsound)
gBattleRep.hostagesHit += 1
gBattleRep.report()
self.hostageList.remove(i)
a = [tx,ty,ht]
self.deadhostagelist.append(a)
pass
def draw(self):
for i in self.hostageList:
x = i[0]
y = i[1]
t = i[2]
if t == 1:
self.hostageSprite.set_position(x,y)
self.hostageSprite.draw()
else:
self.hostage2Sprite.set_position(x,y)
self.hostage2Sprite.draw()
for i in self.deadhostagelist:
x = i[0]
y = i[1]
t = i[2]
if t == 1:
self.deadhostageSprite.set_position(x,y)
self.deadhostageSprite.draw()
else:
self.deadhostage2Sprite.set_position(x,y)
self.deadhostage2Sprite.draw()
class BulletHoles():
def __init__(self):
self.maxholes = 40
self.bulletHoles = []
self.holeSprite = SpriteLoad(Const.folder+'bulleth.png',centre=True)
self.holeSprite.scale = 0.5
def record(self,x,y):
self.bulletHoles.append((x,y))
def draw(self):
for i in self.bulletHoles:
self.holeSprite.x = i[0]
self.holeSprite.y = i[1]
self.holeSprite.draw()
if len(self.bulletHoles) > self.maxholes:
self.bulletHoles = []
'''
animate explosions
sound handled elsewhere
'''
class Explosion():
def __init__(self):
self.exploList = []
self.imageFrames = []
self.maxframe = 4
self.ex0 = SpriteLoad(Const.folder+'explo0.png',centre=True)
self.ex1 = SpriteLoad(Const.folder+'explo1.png',centre=True)
self.ex2 = SpriteLoad(Const.folder+'explo2.png',centre=True)
self.ex3 = SpriteLoad(Const.folder+'explo3.png',centre=True)
self.imageFrames.append(self.ex0)
self.imageFrames.append(self.ex1)
self.imageFrames.append(self.ex2)
self.imageFrames.append(self.ex3)
clock.schedule_interval(self.autocall, 0.05)
def autocall(self,dt):
for i in self.exploList:
f = i[2] #which frame
f += 1
if f < self.maxframe:
i[2] = f
else:
self.exploList.remove(i)
pass
def add(self,x,y):
a = [x,y,0]
self.exploList.append(a)
pass
def draw(self):
for i in self.exploList:
f = i[2]
self.imageFrames[f].set_position(i[0],i[1])
self.imageFrames[f].draw()
pass
class Hero():
def __init__(self):
self.heroSprite = SpriteLoad(Const.folder+'hero1.jpg',centre=True)
self.heroSprite.scale = 0.25
self.heroSprite.set_position(100,100)
self.factor = 2
self.heroHITSprite = SpriteLoad(Const.folder+'hero1hit.jpg',centre=True)
self.heroHITSprite.scale = 0.25
self.heroHITSprite.set_position(100,100)
self.move = 'stop'
clock.schedule_interval(self.autocall, 0.125)
self.heroHittimer = 0
self.status = pyglet.text.Label('Hero Health',
font_name='Times New Roman',
font_size=24,
x=1000, y = 20)
def autocall(self,dt):
self.doMovement()
if self.heroHittimer > 0:
self.heroHittimer -= 1
pass
def setscreen(self,w,h):
self.winWidth = w
self.winHeight = h
print 'setscreen'
def doMovement(self):
self.saveherox = self.heroSprite.x
self.saveheroy = self.heroSprite.y
if self.move == 'up':
self.heroSprite.y += self.heroSprite.height / self.factor
elif self.move == 'left':
self.heroSprite.x -= self.heroSprite.width / self.factor
elif self.move == 'back':
self.heroSprite.y -= self.heroSprite.height / self.factor
elif self.move == 'right':
self.heroSprite.x += self.heroSprite.width / self.factor
if self.heroSprite.x > self.winWidth or \
self.heroSprite.x < 0:
self.heroSprite.x = self.saveherox
if self.heroSprite.y > self.winHeight or \
self.heroSprite.y < 0:
self.heroSprite.y = self.saveheroy
pass
def resetPos(self):
self.heroSprite.set_position(100,100)
def draw(self):
s = 'Hits '+ str(gBattleRep.herohit) \
+ ' Killed ' + str(gBattleRep.herokilled)
self.status.text = s
self.status.draw()
if self.heroHittimer == 0:
self.heroSprite.draw()
else:
x = self.heroSprite.x
y = self.heroSprite.y
self.heroHITSprite.set_position(x,y)
self.heroHITSprite.draw()
def goUp(self):
self.move = 'up'
#print 'up'
#self.heroSprite.y += self.heroSprite.height / self.factor
pass
def goLeft(self):
self.move = 'left'
#self.heroSprite.x -= self.heroSprite.width / self.factor
pass
def goBack(self):
self.move = 'back'#self.heroSprite.y -= self.heroSprite.height / self.factor
pass
def goRight(self):
self.move = 'right'#self.heroSprite.x += self.heroSprite.width / self.factor
def stopMoving(self):
self.move = 'stop'
pass
def hit(self):
#print 'hero hit check'
#print 'xxx hero hit'
self.heroHittimer = 20 #how long to show the 'hit' drawing
def HandleModeSelect(modes,currMode):
#print Const.foo
i = 0
while i < len(modes):
if currMode == modes[i] and i < len(modes)-1:
return modes[i+1]
i += 1
return modes[0]
def withinrect( x,y,r):
x1,y1=r[0],r[1]
x2,y2=r[2],r[3]
if x>x1 and x<x2 and y>y1 and y<y2:
return True
return False
'''
man figure must go past to trigger
red to green
1 2 3
'''
class Waypoints():
def __init__(self):
self.redSpr = SpriteLoad(Const.folder+'wayRed.png',centre=False)
self.greenSpr = SpriteLoad(Const.folder+'wayGreen.png',centre=False)
self.orangeSpr = SpriteLoad(Const.folder+'wayOrange.png',centre=False)
#self.coverSpr.scale = 0.2
self.stateOff = 'Orange'
self.stateOn = 'Green'
self.stateWrong = 'Red'
self.reset()
clock.schedule_interval(self.autocall, 2.0)
pass
def autocall(self,dt):
for i in self.waylist:
if i[2] == self.stateWrong:
i[2] = self.stateOff
def draw(self):
for i in self.waylist:
x = i[0]
y = i[1]
state = i[2]
if state == self.stateOff:
self.orangeSpr.set_position(x,y)
self.orangeSpr.draw()
elif state == self.stateOn:
self.greenSpr.set_position(x,y)
self.greenSpr.draw()
elif state == self.stateWrong:
self.redSpr.set_position(x,y)
self.redSpr.draw()
pass
def checkhit(self,x,y):
for i in self.waylist:
wx = i[0]
wy = i[1]
wx1 = wx + self.orangeSpr.width
wy1 = wy + self.orangeSpr.height
r = [wx,wy,wx1,wy1]
if withinrect(x, y, r):
if i[2] != self.stateOn \
and self.checkNum(i[3]):
i[2] = self.stateOn
return True
elif i[2] == self.stateOff:
i[2] = self.stateWrong
return False
def checkNum(self,n):
if n == self.expected:
self.expected += 1
return True
return False
def complete(self):
if self.waylist == []:
return False
ret = 0
for i in self.waylist:
if i[2] == self.stateOn:
ret += 1
return (ret == len(self.waylist))
def add(self,x,y):
state = self.stateOff
a = [x,y,state,self.number]
self.waylist.append(a)
self.number += 1
pass
def reset(self):
self.waylist = []
self.number = 1
self.expected = 1
'''
goes in front of the tangos to provide cover for bullets
typeofcover
'''
class CoverLayer():
def __init__(self):
self.coverSpr = SpriteLoad(Const.folder+'brick_texture___9_by_agf81-d3a20h2.jpg')
self.coverSpr.scale = 0.2
#self.coverSpr.set_position(x,y)
def Hit(self,where,x,y):
#tx = self.coverSpr.x
#ty = self.coverSpr.y
tx = where[0]
ty = where[1]
l = tx
t = ty
r = tx + self.coverSpr.width
b = ty + self.coverSpr.height
rect = [l, t, r, b]
if withinrect( x, y, rect):
#print 'cover hit'
return True
pass
def Draw(self,where):
x = where[0]
y = where[1]
self.coverSpr.set_position(x,y)
self.coverSpr.draw()
def GetDirection():
d = random.randrange(1,9)
return d
def MoveXY(d,x,y,tw,th,ww,wh):
base = 50
mx = x
my = y
if d == 1:
if my + th < wh:
my += th
elif d == 5:
if my - th > base:
my -= th
elif d == 3 or d == 2:
if mx + tw < ww:
mx += tw
elif d == 7 or d == 4:
if mx - tw > 0:
mx -= tw
return mx,my
'''
ninja stealth
five bullets to kill one
cannot be killed by grenade
dead bodies list
'''
class TangoCommando():
def __init__(self):
self.target = SpriteLoad(Const.folder+'tango1image.png')
self.target.scale = 0.2
self.deadtarget = SpriteLoad(Const.folder+'tango1image.png')
self.deadtarget.scale = 0.2
self.deadtarget.rotation = 90 #degrees
self.deadlist = []
self.sound = gSound
self.boardlist = []
print 'init Target'
print self.target.width, self.target.height
def create(self,number,winW,winH):
i = 0
self.boardlist = []
self.deadlist = []
while i < number:
x = random.randrange(1,winW)
y = random.randrange(500,winH)
a = [x,y,0]
self.boardlist.append(a)
i+=1
pass
def getList(self):
return self.boardlist
def tangoDead(self):
if not self.boardlist == []:
return False
return True
def move(self,w,h):
i = 0
while i < len(self.boardlist):
d = GetDirection()
tw = self.target.width / 2
th = self.target.height / 2
x = self.boardlist[i][0]
y = self.boardlist[i][1]
numhit = self.boardlist[i][2]
#x = self.target.x
#y = self.target.y
#if not numhit > 4:
rx,ry = MoveXY(d, x,y,tw, th, w, h)
self.boardlist[i][0] = rx
self.boardlist[i][1] = ry
i+=1
#return rx,ry
def TangoShotcheck(self,x,y):
for where in self.boardlist:
if self.Hit(x, y, where):
self.sound.Play(self.sound.pain)
self.boardlist.remove(where)
pass
def Hit(self,x,y,where): #commmando
tx = where[0]
ty = where[1]
numhit = where[2]
l = tx
t = ty + self.target.height/4*3
r = tx + self.target.width
b = ty + self.target.height
recthead = [l, t, r, b]
l = tx
t = ty + self.target.height/4
r = tx + self.target.width
b = ty + self.target.height/4*3
rectbody = [l, t, r, b]
l = tx
t = ty
r = tx + self.target.width
b = ty + self.target.height/4
rectlegs = [l, t, r, b]
if withinrect( x, y, recthead):
#print 'head hit'
gBattleRep.numberheadshot += 1
gBattleRep.report()
numhit += 1
#return True
elif withinrect( x, y, rectbody):
#print 'body hit'
gBattleRep.numberbodyhit += 1
gBattleRep.report()
#self.sound.Play(self.sound.pain)
numhit += 1
#return True
elif withinrect( x, y, rectlegs):
#print 'leg hit'
#gBattleRep.numLegHits = gBattleRep.add( gBattleRep.numLegHits)
gBattleRep.numLegHits += 1
gBattleRep.report()
#self.sound.Play(self.sound.pain)
numhit += 1
#return True
else:
#print 'miss'
return False
where[2] = numhit
if numhit > 4:
a = [where[0],where[1]]
self.deadlist.append(a)
return True #to have sound and register as dead
else:
return False
def Draw(self):
for i in self.boardlist:
self.target.set_position(i[0],i[1])
self.target.draw()
for i in self.deadlist:
self.deadtarget.set_position(i[0],i[1])
self.deadtarget.draw()
class TargetBoard():
def __init__(self):
#self.target = SpriteLoad(Const.folder+'terrorist.png')
#self.target.scale = 0.3
#self.sound = gSound
#self.deadtarget = SpriteLoad(Const.folder+'terrorist.png')
#self.deadtarget.scale = 0.3
#self.deadtarget.rotation = 90
#self.target.set_position(x,y)
#self.target.rotation = -90.0
#self.hitlist = []
self.boardlist = []
self.deadlist = []
def create(self,number,winW,winH,Tangotype):
print 'init Target'
if Tangotype == 'real':
name = 'terrorist.png'
sz = 0.3
self.target = SpriteLoad(Const.folder+name)
self.target.scale = sz
self.sound = gSound
self.deadtarget = SpriteLoad(Const.folder+name)
self.deadtarget.scale = sz
self.deadtarget.rotation = 90
print self.target.width, self.target.height
else:
name = 'target0.jpg'
sz = 0.2
self.target = SpriteLoad(Const.folder + name)
self.target.scale = sz
self.sound = gSound
self.deadtarget = SpriteLoad(Const.folder + name)
self.deadtarget.scale = sz
self.deadtarget.rotation = 90
print self.target.width, self.target.height
i = 0
self.boardlist = []
self.deadlist = []
while i < number:
x = random.randrange(1,winW)
y = random.randrange(500,winH)
a = [x,y]
self.boardlist.append(a)
i+=1
pass
def getList(self):
return self.boardlist
def getDeadlist(self):
return self.deadlist
def tangoDead(self):
if self.boardlist != []:
return False
return True
def move(self,w,h):
#print 'move',self.target.x,w,h
i = 0
while i < len(self.boardlist):
d = GetDirection()
tw = self.target.width / 2
th = self.target.height / 2
x = self.boardlist[i][0]
y = self.boardlist[i][1]
#x = self.target.x
#y = self.target.y
rx,ry = MoveXY(d, x,y,tw, th, w, h)
self.boardlist[i][0] = rx
self.boardlist[i][1] = ry
i+=1
#return rx,ry
def TangoShotcheck(self,x,y):
for where in self.boardlist:
if self.Hit(x, y, where):
self.sound.Play(self.sound.pain)
a = [x,y]
self.deadlist.append(a)
self.boardlist.remove(where)
pass
def Hit(self,x,y,where):
tx = where[0]
ty = where[1]
l = tx
t = ty + self.target.height/4*3
r = tx + self.target.width
b = ty + self.target.height
recthead = [l, t, r, b]
l = tx
t = ty + self.target.height/4
r = tx + self.target.width
b = ty + self.target.height/4*3
rectbody = [l, t, r, b]
l = tx
t = ty
r = tx + self.target.width
b = ty + self.target.height/4
rectlegs = [l, t, r, b]
if withinrect( x, y, recthead):
#print 'head hit'
gBattleRep.numberheadshot += 1
gBattleRep.report()
return True
elif withinrect( x, y, rectbody):
#print 'body hit'
gBattleRep.numberbodyhit += 1
gBattleRep.report()
#self.sound.Play(self.sound.pain)
return True
elif withinrect( x, y, rectlegs):
#print 'leg hit'
#gBattleRep.numLegHits = gBattleRep.add( gBattleRep.numLegHits)
gBattleRep.numLegHits += 1
gBattleRep.report()
#self.sound.Play(self.sound.pain)
return True
else:
#print 'miss'
return False
def Draw(self):
for i in self.boardlist:
self.target.set_position(i[0],i[1])
self.target.draw()
for d in self.deadlist:
self.deadtarget.set_position(d[0],d[1])
self.deadtarget.draw()
#class TargetBoard0():
#def __init__(self,x,y):
#self.target = SpriteLoad(Const.folder+'target.jpg')
#self.target.scale = 0.25
#self.target.set_position(x,y)
##self.target.rotation = -90.0
##self.hitlist = []
#print 'init Target'
#print self.target.width, self.target.height
#def move(self,w,h):
##print 'move',self.target.x,w,h
#d = GetDirection()
#tw = self.target.width
#th = self.target.height
#x = self.target.x
#y = self.target.y
#self.target.x,self.target.y = MoveXY(d, x,y,tw, th, w, h)
#pass
#def Hit(self,x,y):
#tx = self.target.x
#ty = self.target.y
#l = tx
#t = ty + self.target.height/4*3
#r = tx + self.target.width
#b = ty + self.target.height
#recthead = [l, t, r, b]
#l = tx
#t = ty + self.target.height/4
#r = tx + self.target.width
#b = ty + self.target.height/4*3
#rectbody = [l, t, r, b]
#l = tx
#t = ty
#r = tx + self.target.width
#b = ty + self.target.height/4
#rectlegs = [l, t, r, b]
#if withinrect( x, y, recthead):
#print 'head hit'
#return True
#elif withinrect( x, y, rectbody):
#print 'body hit'
##self.sound.Play(self.sound.pain)
#return True
#elif withinrect( x, y, rectlegs):
#print 'leg hit'
##self.sound.Play(self.sound.pain)
#return True
#else:
##print 'miss'
#return False
#def Draw(self):
#self.target.draw()
'''
appear near hero
dot moves randomly
dot moves toward hero
tries to hit hero
number
skill
speed
location of hero
add attacks
timed attacks, then end each
check hit
RPG
sound of hero hit
/graphic
'''
class AttackHero():
def __init__(self):
self.attackL = []
clock.schedule_interval(self.autocall, 0.05)
self.attackSpr = SpriteLoad(Const.folder+'attackDot.png',centre=True)
self.attackSpr.scale = 0.5
self.hero = None
self.sound = gSound
self.badguys = []
self.pauseBool = False
pass
def autocall(self,dt):
#after some time, remove the bullet
#for i in self.attackL:
#t = i[2]
#t -= 1
#if t < 1:
#self.attackL.remove(i)
#else:
#i[2] = t
if self.pauseBool: return
self.move()
pass
def addHero(self, hero):
self.hero = hero
def addBadGuys(self, badguys):
self.badguys = badguys
pass
def draw(self):
for i in self.attackL:
x = i[0]
y = i[1]
self.attackSpr.set_position(x, y)
self.attackSpr.draw()
def add(self,hero):
'''position,time'''
random.shuffle(self.badguys)
maxb = 2 # means 3 bullets
if len(self.attackL) > maxb: return #not too many bullets!
i = 0
while i < len(self.badguys):
#h = random.randrange(100,200)
#w = random.randrange(-500,500)
#for i in self.badguys:
bp = self.badguys[i]
hx = hero.heroSprite.x
hy = hero.heroSprite.y
x = bp[0]
y = bp[1]
a = [x,y,hx,hy]
self.attackL.append(a)
i += 1
if i > maxb: break
pass
def move(self):
s = self.attackSpr.height * 2
for i in self.attackL:
x = i[0]
y = i[1]
hx = i[2]
hy = i[3]
if hx < x:
x -= s
elif hx > x:
x += s
if hy < y:
y -= s
elif hy > y:
y += s
if abs(x-hx)<s and abs(y-hy)<s:
self.attackL.remove(i)
#y -= self.attackSpr.height
i[0] = x
i[1] = y
if self.Hit(x, y): break
pass
def Hit(self,x,y):
#tx = self.coverSpr.x
#ty = self.coverSpr.y
tx = self.hero.heroSprite.x
ty = self.hero.heroSprite.y
tx -= self.hero.heroSprite.width / 2 # back from the centre
ty -= self.hero.heroSprite.height / 2
l = tx
t = ty
r = tx + self.hero.heroSprite.width
b = ty + self.hero.heroSprite.height
rect = [l, t, r, b]
if withinrect( x, y, rect):
self.sound.Play(self.sound.heropain)
self.hero.hit()
gBattleRep.herohit += 1
gBattleRep.report()
return True
return False
def pause(self):
self.pauseBool = not self.pauseBool
class ScreenTime():
def __init__(self,countup=True,inittime=0):
self.seconds = inittime
self.countup = countup
self.status = pyglet.text.Label('Time Reading',
font_name='Times New Roman',
font_size=24,
x=800, y = 20)
clock.schedule_interval(self.autocall, 1.0)
self.mode = 'stop'
def autocall(self,dt):
if self.mode == 'start':
if self.countup:
self.seconds += 1
elif not self.countup:
self.seconds -= 1
pass
def start(self):
self.mode = 'start'
pass
def stop(self):
self.mode = 'stop'
pass
def reset(self):
self.seconds = 0
def draw(self):
self.status.text = str(self.seconds)
self.status.draw()
pass
'''
to allow the number keys to be programmed with different weapons
as to the mission at hand.
'''
class EquipmentKeys():
def __init__(self):
self.keydata = []
i = 0
for i in range(10):
self.keydata.append(Const.HandToolConst)
pass
def changekey(self,key,equipment):
assert(key > -1 and key < 10)
self.keydata[key] = equipment
def get(self,key):
assert(key > -1 and key < 10)
return self.keydata[key]
def reset(self):
self.__init__()
'''
Place where stuff that can be shot at are placed.
Tango
Hostages
Cover
can be distant and nearby
distant for sniper
How to account for the shot?
Scoring?
'''
class ShootingGallery():
def __init__(self):
# prevent the tangos get faster and faster
self.runauto = 0 # ensure autocall run once only?
self.gamestage = 0 # to allow staging of different scenarios
self.stageDepth = 0 # deeper missions
self.attHero = AttackHero()
self.CBattHero = AttackHero()
self.CBattHero2 = AttackHero()
self.pauseBool = False
self.timers = ScreenTime(countup=True,inittime=0)
self.wayp = Waypoints()
def initAttack(self,hero):
self.herotarget = hero
self.herotarget.setscreen(self.winWidth,self.winHeight)
self.attHero.addHero(hero)
self.CBattHero.addHero(hero)
self.CBattHero2.addHero(hero)
self.explodeObj = Explosion()
pass
def setWinSize(self,w,h):
self.winHeight = h
self.winWidth = w
def key9(self,which):
return self.equipment.get(which)
#pass
def depthChange(self):
if self.gamestage == 1 and self.stageDepth == 1:
self.background = \
pyglet.image.load(Const.backgroundFolder+\
'aircraftcabin.jpg')
self.sound.Play(self.sound.m32MGL) #breach sound
self.equipment.reset()
self.equipment.changekey(1, Const.knifeKbar)
self.equipment.changekey(2, Const.pistol1911)
self.equipment.changekey(3, Const.STCPW)
self.equipment.changekey(4, Const.GLOCK)
self.equipment.changekey(5, Const.M107sniper)
self.equipment.changekey(6, Const.MP5)
self.timers.reset()
self.timers.start()
i = 9
self.TargetObj.create(i, self.winWidth, self.winHeight,'real')
self.CommandoBaddies.create(1, self.winWidth, self.winHeight)
self.attHero.addBadGuys(self.TargetObj.getList())
self.CBattHero.addBadGuys(self.CommandoBaddies.getList())
self.hostages = Hostages()
self.hostages.create(30,self.winWidth,self.winHeight)
self.hostages.setPanic(False)
self.stageDepth = 0
pass
def init(self):
self.sound = gSound
#self.boardlist = []
self.TargetObj = TargetBoard()
self.CommandoBaddies = TangoCommando()
self.coverlist = []
self.equipment = EquipmentKeys()
if self.gamestage == 0:
i = 0
self.equipment.reset()
self.equipment.changekey(1, Const.M32GRENADElauncher)
self.equipment.changekey(2, Const.pistol1911)
self.equipment.changekey(3, Const.STCPW)
self.equipment.changekey(4, Const.GLOCK)
self.equipment.changekey(5, Const.MP5)
self.equipment.changekey(6, Const.M107sniper)
self.equipment.changekey(7, Const.M134MiniGun)
self.equipment.changekey(8, Const.ClaymoreMine)
self.hostages = Hostages()
self.hostages.create(0,self.winWidth,self.winHeight)
self.background = \
pyglet.image.load(Const.backgroundFolder+\
'rifle_range.jpg')
self.TargetObj.create(10, self.winWidth, self.winHeight,'dummy')
elif self.gamestage == 1:
self.equipment.reset()
self.equipment.changekey(1, Const.knifeKbar)
self.equipment.changekey(2, Const.pistol1911)
self.equipment.changekey(3, Const.STCPW)
self.equipment.changekey(4, Const.GLOCK)
self.equipment.changekey(5, Const.MP5)
#self.equipment.changekey(5, Const.M249SAW)
self.wayp.add(400, 100)
self.wayp.add(500, 100)
self.wayp.add(600, 100)
self.background = \
pyglet.image.load(Const.backgroundFolder+\
'tarmac.jpg')
#'aircraftcabin.jpg')
#while i > 0: #tangos
#x = random.randrange(1,self.winWidth)
#y = random.randrange(500,self.winHeight)
#a = [x,y]
#self.boardlist.append(a)
#i-=1
elif self.gamestage == 2:
self.equipment.reset()
self.equipment.changekey(1, Const.knifeKbar)
self.equipment.changekey(2, Const.STCPW)
self.equipment.changekey(3, Const.M4assaultRifle)
self.equipment.changekey(4, Const.SAR21Rifle)
self.equipment.changekey(5, Const.Ultimax100SAW)
self.equipment.changekey(6, Const.M32GRENADElauncher)
self.equipment.changekey(7, Const.M249SAW)
self.equipment.changekey(8, Const.ClaymoreMine)
self.background = \
pyglet.image.load(Const.backgroundFolder+\
'Afghan_village_destroyed_by_the_Soviets.jpg')
self.hostages = Hostages()
self.hostages.create(0,self.winWidth,self.winHeight)
self.hostages.setPanic(False)
i = 100
self.TargetObj.create(i, self.winWidth, self.winHeight,'real')
#while i > 0: #tangos
#x = random.randrange(1,self.winWidth)
#y = random.randrange(500,self.winHeight)
#a = [x,y]
#self.boardlist.append(a)
#i-=1
self.attHero.addBadGuys(self.TargetObj.getList())
elif self.gamestage == 3:
self.timers.reset()
self.timers.start()
self.equipment.reset()
self.equipment.changekey(1, Const.knifeKbar)
self.equipment.changekey(2, Const.STCPW)
self.equipment.changekey(3, Const.M4assaultRifle)
self.equipment.changekey(4, Const.SAR21Rifle)
self.equipment.changekey(5, Const.Ultimax100SAW)
self.equipment.changekey(6, Const.M32GRENADElauncher)
self.equipment.changekey(7, Const.M134MiniGun)
self.equipment.changekey(8, Const.M107sniper)
self.hostages = Hostages()
self.hostages.create(30,self.winWidth,self.winHeight)
self.hostages.setPanic(True)
i = 10
self.TargetObj.create(i, self.winWidth, self.winHeight,'real')
#while i > 0: #tangos
#x = random.randrange(1,self.winWidth)
#y = random.randrange(500,self.winHeight)
#a = [x,y]
#self.boardlist.append(a)
#i-=1
self.attHero.addBadGuys(self.TargetObj.getList())
i = 20
self.coverlist = []
self.coverObj = CoverLayer()
while i > 0: #cover
x = random.randrange(1,self.winWidth)
y = random.randrange(1,self.winHeight+300)
#y = 200
cov = (x,y)
self.coverlist.append(cov)
i-=1
elif self.gamestage == 4:
self.sound.Play(self.sound.hostageHitsound)
self.equipment.reset()
self.equipment.changekey(1, Const.knifeKbar)
self.equipment.changekey(2, Const.GLOCK)
self.equipment.changekey(3, Const.STCPW)
self.equipment.changekey(4, Const.MP5)
self.equipment.changekey(5, Const.M107sniper)
self.timers.reset()
self.timers.start()
self.background = \
pyglet.image.load(Const.backgroundFolder+\
'Kuala-Lumpur-Federal-Hotel-Street-Front.jpg')
i = 5
self.CommandoBaddies.create(i, self.winWidth, self.winHeight)
#while i > 0: #tangos
#x = random.randrange(1,self.winWidth)
#y = random.randrange(500,self.winHeight)
#a = [x,y]
#self.boardlist.append(a)
#i-=1
self.CBattHero.addBadGuys(self.CommandoBaddies.getList())
self.hostages = Hostages()
self.hostages.create(20,self.winWidth,self.winHeight)
self.hostages.setPanic(True)
elif self.gamestage == 5:
self.equipment.reset()
self.equipment.changekey(1, Const.knifeKbar)
self.equipment.changekey(2, Const.GLOCK)
self.equipment.changekey(3, Const.STCPW)
self.equipment.changekey(4, Const.M4assaultRifle)
self.equipment.changekey(5, Const.M249SAW)
self.equipment.changekey(6, Const.M134MiniGun)
self.equipment.changekey(7, Const.M107sniper)
self.equipment.changekey(8, Const.ClaymoreMine)
self.timers.reset()
self.timers.start()
self.background = \
pyglet.image.load(Const.backgroundFolder+\
'Nuclear.power.plant.Dukovany.jpg')
i = 50
self.CommandoBaddies.create(i, self.winWidth, self.winHeight)
self.CBattHero.addBadGuys(self.CommandoBaddies.getList())
self.CBattHero2.addBadGuys(self.CommandoBaddies.getList())
self.hostages = Hostages()
self.hostages.create(0,self.winWidth,self.winHeight)
elif self.gamestage == 6:
self.equipment.reset()
self.equipment.changekey(1, Const.M4assaultRifle)
self.hostages = Hostages()
self.hostages.create(0,self.winWidth,self.winHeight)
self.background = \
pyglet.image.load(Const.backgroundFolder+\
'Arlington-National-Cemetery-during-Spring.jpg')
pass
elif self.gamestage == 7:
self.hostages = Hostages()
self.hostages.create(0,self.winWidth,self.winHeight)
self.gamestage = -1
self.timers.stop()
self.timers.reset()
gBattleRep.init() #reset stats
gBattleRep.report()
self.herotarget.resetPos()
#gfwindow = pyglet.window.Window(style=pyglet.window.Window.WINDOW_STYLE_DIALOG)
#i = 0
#self.coverlist = []
#self.coverObj = CoverLayer()
#while i > 0:
#x = random.randrange(1,self.winWidth)
#y = random.randrange(1,self.winHeight+300)
##y = 200
#cov = (x,y)
#self.coverlist.append(cov)
#i-=1
if self.runauto == 0: #run once
clock.schedule_interval(self.autocall, 0.25)
#self.background = pyglet.image.load(Const.backgroundFolder+'Afghan_village_destroyed_by_the_Soviets.jpg')
self.runauto = 1
def autocall(self,dt):
if self.pauseBool: return
i = 0
#m = len(self.boardlist)-1
if not self.TargetObj.tangoDead():
self.attHero.add(self.herotarget) #keep attacking hero
if not self.CommandoBaddies.tangoDead():
self.CBattHero.add(self.herotarget)
self.CBattHero2.add(self.herotarget)
self.TargetObj.move(self.winWidth,self.winHeight)
self.CommandoBaddies.move(self.winWidth,self.winHeight)
self.wayp.checkhit(self.herotarget.heroSprite.x,self.herotarget.heroSprite.y)
if self.gamestage == 1 and self.wayp.complete():
print 'complete'
self.wayp.reset()
self.stageDepth += 1
self.depthChange()
#if m >= 0:
#self.attHero.add(self.herotarget)
#pass
#while m > -1:
#a = self.boardlist[m][0]
#b = self.boardlist[m][1]
#x,y = self.TargetObj.move(self.winWidth,self.winHeight,a,b)
#if Const.suicideBomber and self.SuicideBomberHit(x,y):
#print 'xxx bomber'
#gBattleRep.heroblasted += 1
#gBattleRep.report()
#self.boardlist.remove(self.boardlist[m])
#else:
#self.boardlist[m][0] = x
#self.boardlist[m][1] = y
#m-=1
#pass
def Rescue(self,x,y):
self.hostages.Rescue(x, y)
def Hit(self,x,y,name):
retz = False
self.hostages.hit(x,y)
if name != Const.M107sniper and \
name != Const.M134MiniGun: #can go through walls if powerful weapon
for c in self.coverlist:
if self.coverObj.Hit(c,x,y):
retz= True
break
if retz:
return
self.TargetObj.TangoShotcheck(x,y)
self.CommandoBaddies.TangoShotcheck(x,y)
if name == Const.M107sniper or \
name == Const.M134MiniGun: #one shot equals 4 shots
for i in xrange(0,4):
self.CommandoBaddies.TangoShotcheck(x,y)
if self.TargetObj.tangoDead() and \
self.CommandoBaddies.tangoDead():
self.timers.stop()
#for i in self.boardlist:
#if self.TargetObj.Hit(x,y,i):
##print 'hit hit'
#self.sound.Play(self.sound.pain)
#self.boardlist.remove(i)
#break
#if self.boardlist == []: #tangos dead
#self.timers.stop()
def SuicideBomberHit(self,x,y):
br = 100
gl = x - br
gt = y - br
gr = x + br
gb = y + br
grect = [gl,gt,gr,gb]
hx = self.herotarget.heroSprite.x
hy = self.herotarget.heroSprite.y
if withinrect(hx,hy,grect):
self.explodeObj.add(x, y)
self.herotarget.hit()
self.sound.Play(self.sound.m32MGL)
return True
else:
return False
def HitGrenade(self,x,y):
br = 200 #blast radius
self.explodeObj.add(x, y)
gl = x - br
gt = y - br
gr = x + br
gb = y + br
grect = [gl,gt,gr,gb]
tangos = self.TargetObj.getList()
dead = self.TargetObj.getDeadlist()
for i in tangos:
if withinrect(i[0],i[1],grect):
self.sound.Play(self.sound.pain)
tangos.remove(i)
dead.append([i[0],i[1]])
gBattleRep.numberblasted += 1
gBattleRep.report()
self.hostages.HitGrenade(grect)
if self.TargetObj.tangoDead(): #tangos dead
self.timers.stop()
def Claymore(self,x,y):
#print 'claymore'
self.explodeObj.add(x, y)
w = 250
h = 300
g = 20
sx = x - w
sy = y - h
ex = x + w
ey = y + h
bx = sx
by = sy
p = 0
while True:
p += 1
#bx = random.randrange(1,self.winWidth)
#by = random.randrange(1,self.winHeight)
self.Hit(bx, by, Const.ClaymoreMine)
bx += g
if bx > ex:
bx = sx
by += g
if by > ey:
break
print 'pellets', p
def Draw(self):
# the 60 gives a status bar for free
self.background.blit(0,60,width=self.winWidth,height=self.winHeight)
#self.background.draw() #cannot do this way - cannot set width/height
self.explodeObj.draw()
#for i in self.boardlist:
#self.TargetObj.Draw(i)
self.TargetObj.Draw()
self.CommandoBaddies.Draw()
for i in self.coverlist:
self.coverObj.Draw(i)
self.hostages.draw()
self.attHero.draw()
self.CBattHero.draw()
self.timers.draw()
self.wayp.draw()
#self.hero.draw()
def pause(self):
self.pauseBool = not self.pauseBool
self.attHero.pause()
self.CBattHero.pause()
#print 'pause',self.pauseBool
#class ShootingGallery():
#gTargetBoard = TargetBoard()
gShootGallery = ShootingGallery()
gBulletHoles = BulletHoles()
class Knife():
def __init__(self,name):
self.name = name
print 'knife init'
self.mode = Const.knifeSafe
self.data = Const.folder
weapondata = self.Database(name)
self.drawing = SpriteLoad(self.data+weapondata[0])
self.drawing.scale = weapondata[1]
self.sound = gSound
#self.bulleth = bulletholes
self.mousex = 0
self.mousey = 0
self.magazine = weapondata[2]
self.ammo = self.magazine
self.reloadweapon = False
self.status = pyglet.text.Label('Hello, world',
font_name='Times New Roman',
font_size=24,
x=220, y = 20)
self.SetText()
self.reticle = weapondata[3]
def SetText(self):
self.report = self.name + ' ' + self.mode + ' ' + str(self.ammo)
self.status.text = self.report
def Database(self,name):
#filename,scale,magazine capacity,
if name == Const.knifeKbar:
return 'kbar knife side 1217_h_lg.png', \
0.25,1,'kbar knife side 1217_h_lgup.png'
else:
raise Exception("knife Weapon not exist!")
def mouse(self,x,y):
print self.name,x,y
pass
def mouseup(self,x,y):
pass
def mousedrag(self,x,y):
#knife has drag
pass
def mousepos(self,x,y): #knife
#print 'mouse',x,y
if self.mode == Const.knifeArmed:
gShootGallery.Hit(x, y,Const.knifeKbar)
def select(self):
self.mode = HandleModeSelect(Const.knifeModes, self.mode)
self.SetText()
def draw(self):
self.drawing.draw()
self.status.draw()
pass
def Reload(self):
pass
def SetSights(self,win): #knife
image = pyglet.image.load(Const.folder+self.reticle)
x = image.height / 2
y = image.width / 2
cursor = pyglet.window.ImageMouseCursor(image, x, y)
win.set_mouse_cursor( cursor)
pass
class HandTool():
def __init__(self,name):
self.name = name
self.data = Const.folder
print 'hand tool init'
self.reticle = 'Cursor hand white.png'
self.handName = 'Cursor hand whiteB.png'
self.drawing = SpriteLoad(self.data+self.handName)
self.drawing.set_position(20, 0)
self.status = pyglet.text.Label('Hello, world',
font_name='Times New Roman',
font_size=24,
x=220, y = 20)
self.SetText()
def SetText(self):
self.report = 'This hand tool can rescue hostages'
self.status.text = self.report
def Database(self,name):
#filename,scale,magazine capacity,
if name == Const.knifeKbar:
return 'kbar knife side 1217_h_lg.png', \
0.25,1,'kbar knife side 1217_h_lgup.png'
else:
raise Exception("hand wanker not exist!")
def mouse(self,x,y):
print self.name,x,y
pass
def mouseup(self,x,y):
#gShootGallery.Rescue(x, y)
pass
def mousedrag(self,x,y):
#hand has drag
pass
def mousepos(self,x,y): #knife
#print 'mouse',x,y
gShootGallery.Rescue(x, y)
pass
def select(self):
pass
def draw(self):
self.drawing.draw()
self.status.draw()
pass
def Reload(self):
pass
def SetSights(self,win):
image = pyglet.image.load(Const.folder+self.reticle)
cursor = pyglet.window.ImageMouseCursor(image, 25, 25)
win.set_mouse_cursor( cursor)
pass
#class Pistol():
#def __init__(self,
#name,
##sound,
##bulletholes,
#):
#self.name = name
#print 'pistol init'
#self.mode = Const.pistolSafe
#self.data = Const.folder
#weapondata = self.Database(name)
#self.drawing = SpriteLoad(self.data+weapondata[0])
#self.drawing.scale = weapondata[1]
#self.sound = gSound
#self.bulleth = gBulletHoles
#self.mousex = 0
#self.mousey = 0
#self.magazine = weapondata[2]
#self.ammo = self.magazine
#self.reloadweapon = False
#self.status = pyglet.text.Label('Hello, world',
#font_name='Times New Roman',
#font_size=24,
#x=220, y = 20)
#self.SetText()
#self.reticle = weapondata[3]
#pass
#def Database(self,name):
##filename,scale,magazine capacity,
#if name == Const.pistol1911:
#return 'm1911pistol.jpg',0.25,15,'reticlePistol1911.png'
#else:
#raise Exception("pistol Weapon not exist!")
#def reloadCall(self,dt):
#if self.reloadweapon:
#self.reloadtime -= 1
#if self.reloadtime < 1:
#self.ammo = self.magazine
#self.SetText()
#clock.unschedule(self.reloadCall)
#self.reloadweapon = False
#def mouse(self,x,y):
#if self.mode != Const.pistolSafe:
#self.trigger = True
#self.mousex = x
#self.mousey = y
#self.Fire()
#def mouseup(self,x,y):
#self.trigger = False
#def mousedrag(self,x,y):
##pistol got no drag
#pass
#def Fire(self):
#if self.ammo > 0:
##self.sound.Play(self.sound.sar21)
#self.sound.Play(self.sound.m1911)
#x = self.mousex
#y = self.mousey
#self.bulleth.record(x,y)
#self.ammo -= 1
#self.SetText()
##gTargetBoard.Hit(x, y)
#gShootGallery.Hit(x, y)
#def SetText(self):
#self.report = self.name + ' ' + self.mode + ' ' + str(self.ammo)
#self.status.text = self.report
#def select(self):
##print 'pistol mode'
#self.mode = HandleModeSelect(Const.pistolModes, self.mode)
##print self.mode
#self.SetText()
##print self.mode
#def draw(self):
#self.drawing.draw()
#self.bulleth.draw()
#self.status.draw()
#pass
#def Reload(self):
#self.sound.Player(self.sound.reLoad)
#self.reloadweapon = True
#self.reloadtime = 3
#clock.schedule_interval(self.reloadCall, 1.0)
#def SetSights(self,win):
#image = pyglet.image.load(Const.folder+self.reticle)
#cursor = pyglet.window.ImageMouseCursor(image, 25, 25)
#win.set_mouse_cursor( cursor)
#pass
class AssaultRifle():
def __init__(self,
name,
numberMagazines,
#bulletholes,
):
self.name = name
print 'AssaultRifle init'
self.mode = Const.assaultRifleSafe
self.rateFire = 1.0
self.trigger = False
self.auto = False
self.data = Const.folder
self.sound = gSound
self.weaponsound = None
self.availableModes = None
weapondata = self.Database(name)
self.drawing = SpriteLoad(self.data+weapondata[0])
self.drawing.scale = weapondata[1]
self.bulleth = gBulletHoles
self.mousex = 0
self.mousey = 0
self.magazine = weapondata[2]
self.ammo = self.magazine
self.reloadweapon = False
self.magazines = numberMagazines
self.status = pyglet.text.Label('Hello, world',
font_name='Times New Roman',
font_size=24,
x=220, y = 20)
self.SetText()
self.reticle = weapondata[3]
self.availableModes = weapondata[4]
self.rateFire = weapondata[5]
pass
def Database(self,name):
#filename,scale,magazine capacity,reticle,modes,rateOfFire
if name == Const.M4assaultRifle:
self.weaponsound = self.sound.m4carbine
return 'm4_1.jpg',0.25,30,'reticleM4.png',\
Const.assaultRifleModes,0.1
elif name == Const.SAR21Rifle:
self.weaponsound = self.sound.sar21
return 'sar21_1.jpg',0.3,30,'reticle.png',\
Const.assaultRifleModes,0.1
elif name == Const.Ultimax100SAW:
self.weaponsound = self.sound.sar21
return 'ultimax_mk3_3.jpg',0.2,100,'reticle.png',\
Const.machineGunModes,0.1
elif name == Const.M134MiniGun:
self.weaponsound = self.sound.minigunSound #4400
return 'minigun800px-DAM134DT.png',0.2,500,\
'reticleM4.png',Const.machineGunModes,0.01
elif name == Const.pistol1911:
self.weaponsound = self.sound.m1911
return 'm1911pistol.jpg',0.25,7,\
'reticlePistol1911.png',Const.pistolModes,1.0
elif name == Const.M32GRENADElauncher:
self.weaponsound = self.sound.m32MGL
return 'M32MGL.png',0.3,12,\
'M32_Iron_Sights_MW3DS.png',Const.pistolModes,1.0
elif name == Const.STCPW:
self.weaponsound = self.sound.stcpwsnd
return 'ST_Kinetics_CPW_Submachine_Gun_(SMG)_1.jpg',0.15,30,\
'reticle.png',Const.assaultRifleModes,0.1
elif name == Const.GLOCK:
self.weaponsound = self.sound.glocksnd
return 'glockhqdefault.jpg',0.25,18,\
'reticlePistol1911.png',Const.assaultRifleModes,0.1
elif name == Const.M249SAW:
self.weaponsound = self.sound.m249sawSound
return '800px-PEO_M249_Para_ACOG.jpg',0.25,200,\
'reticleM4.png',Const.machineGunModes,0.1
elif name == Const.M107sniper:
self.weaponsound = self.sound.m107Sound
return 'M107Cq.jpg',0.5,10,\
'M107largeSights.png',Const.pistolModes,1.0
elif name == Const.ClaymoreMine:
self.weaponsound = self.sound.m18Sound
return 'Claymore2.jpg',0.3,5,\
'Claymore2aimer.jpg',Const.pistolModes,1.0
elif name == Const.MP5:
self.weaponsound = self.sound.mp5sound
return 'mp5a3.jpg',0.3,30,\
'mp5sights.png',Const.assaultRifleModes,0.15
else:
raise Exception("Weapon not exist!")
def SetText(self):
self.report = self.name + ' ' + self.mode + ' ' + str(self.ammo) + ' ' + str(self.magazines)
self.status.text = self.report
def Fire(self):
if self.ammo > 0:
self.sound.Play(self.weaponsound)
x = self.mousex
y = self.mousey
self.bulleth.record(x,y)
self.ammo -= 1
self.SetText()
if self.name == Const.M32GRENADElauncher:
gShootGallery.HitGrenade(x, y)
elif self.name == Const.ClaymoreMine:
gShootGallery.Claymore(x, y)
elif self.name != Const.M32GRENADElauncher:
gShootGallery.Hit(x, y,self.name)
def draw(self):
self.drawing.draw()
self.bulleth.draw()
self.status.draw()
def autocall(self,dt):
if self.trigger:
#print 'autofire'
self.Fire()
def reloadCall(self,dt):
if self.reloadweapon:
self.reloadtime -= 1
if self.reloadtime < 1 and self.magazines > 0:
self.magazines -= 1
self.ammo = self.magazine
self.SetText()
clock.unschedule(self.reloadCall)
self.reloadweapon = False
def mouse(self,x,y):
#print self.name,x,y
if self.mode != Const.assaultRifleSafe:
self.trigger = True
self.mousex = x
self.mousey = y
self.Fire()
pass
def mousepos(self,x,y):
pass
def mouseup(self,x,y):
self.trigger = False
def mousedrag(self,x,y):
if self.mode == Const.assaultRifleAuto:
#self.mouse(x, y)
self.mousex = x
self.mousey = y
pass
def Reload(self):
self.sound.Player(self.sound.reLoad)
self.reloadweapon = True
self.reloadtime = 2
clock.schedule_interval(self.reloadCall, 1.0)
def select(self):
#print 'pistol mode'
#self.mode = HandleModeSelect(Const.assaultRifleModes, self.mode)
self.mode = HandleModeSelect(self.availableModes, self.mode)
self.SetText()
print self.mode
if self.mode == Const.assaultRifleAuto:
clock.schedule_interval(self.autocall, self.rateFire)
self.auto = True
elif self.auto:
clock.unschedule(self.autocall)
self.auto = False
def SetSights(self,win):
image = pyglet.image.load(Const.folder+self.reticle)
x = image.height / 2
y = image.width / 2
cursor = pyglet.window.ImageMouseCursor(image, x, y)
win.set_mouse_cursor( cursor)
pass
def AddMagazines(self,numMags):
self.magazines += numMags
class CurrentGame():
def __init__(self):
self.currentWeapon = 'nothing'
self.weaponDict = {}
#self.sound = Sounds()
self.sound = gSound
self.bulletholes = BulletHoles()
self.knife = Knife(Const.knifeKbar)
self.weaponDict[Const.knifeKbar] = self.knife
self.hand = HandTool(Const.HandToolConst)
self.weaponDict[Const.HandToolConst] = self.hand
#self.pistol = Pistol(Const.pistol1911,
##self.sound,
##self.bulletholes
#)
#self.weaponDict[Const.pistol1911] = self.pistol
#self.M4assaultRifle = AssaultRifle(Const.M4assaultRifle,
#self.sound,
#self.bulletholes)
#self.weaponDict[Const.M4assaultRifle] = self.M4assaultRifle
#self.Sar21assaultRifle = AssaultRifle(Const.SAR21Rifle,
#self.sound,
#self.bulletholes)
#self.weaponDict[Const.SAR21Rifle] = self.Sar21assaultRifle
self.AddRifleWeapon(Const.M4assaultRifle,10)
self.AddRifleWeapon(Const.SAR21Rifle,10)
self.AddRifleWeapon(Const.Ultimax100SAW,3)
self.AddRifleWeapon(Const.M134MiniGun,1)
self.AddRifleWeapon(Const.pistol1911,5)
self.AddRifleWeapon(Const.M32GRENADElauncher,6)
self.AddRifleWeapon(Const.STCPW,5)
self.AddRifleWeapon(Const.GLOCK,5)
self.AddRifleWeapon(Const.M249SAW,2)
self.AddRifleWeapon(Const.M107sniper,4)
self.AddRifleWeapon(Const.ClaymoreMine, 2)
self.AddRifleWeapon(Const.MP5, 5)
self.choose(Const.HandToolConst) #default to hand
self.hero = Hero()
gShootGallery.initAttack(self.hero)
#self.tb = TargetBoard()
def AddRifleWeapon(self,name,magazinesCarried):
self.weaponDict[name] = AssaultRifle(name,
magazinesCarried
#self.sound,
#self.bulletholes
)
pass
def choose(self,weapon):
self.currentWeapon = weapon
#print 'current weapon', self.currentWeapon
try:
self.cw = self.weaponDict[self.currentWeapon]
#print 'namee', self.cw.name
except:
print '{ERROR} choose weapon'
def mousedown(self,x,y):
#print 'name m', self.cw.name
self.cw.mouse(x,y)
#self.tb.Hit(x, y)
def mousepos(self,x,y):
self.cw.mousepos(x,y)
def mouseup(self,x,y):
self.cw.mouseup(x,y)
def mousedrag(self,x,y):
self.cw.mousedrag(x,y)
def reloadWeapom(self):
self.cw.Reload()
def select(self):
self.cw.select()
def draw(self):
#gTargetBoard.Draw()
gShootGallery.Draw()
self.cw.draw()
self.hero.draw()
def SetSight(self,win):
self.cw.SetSights(win)
#image = pyglet.image.load(Const.folder+'reticle.png')
#cursor = pyglet.window.ImageMouseCursor(image, 25, 25)
#return cursor
def AddMagazines(self):
self.cw.AddMagazines(5)
def key9(self,whichkey):
self.choose(gShootGallery.key9(whichkey))
pass
#class CurrentGame():
class FPSWin(pyglet.window.Window):
def __init__(self):
super(FPSWin, self).__init__(resizable = True)
self.maximize()
#self.set_visible(visible=False)
self.ikey = False # allow i key to toggle
self.set_caption('SHOOTER the game by George Loo')
#self.set_fullscreen(True, screen=None)
#self.set_exclusive_mouse()
#self.set_size(1600, 900)
#print 'winsize',Const.winHeight, Const.winWidth
#self.set_location(300,50)
self.clear()
gShootGallery.setWinSize(self.width, self.height)
gShootGallery.init()
self.game = CurrentGame()
self.game.SetSight(self) #pass in window self
#gmw.set_visible(visible=True)
self.set_visible(visible=True)
def on_mouse_press(self,x, y, button, modifiers):
if button==pyglet.window.mouse.LEFT:
#print 'mouse left'
self.game.mousedown(x,y)
pass
#self.set_fullscreen(False, screen=None)
if button==pyglet.window.mouse.RIGHT:
#print 'mouse right'
pass
def on_mouse_release(self,x, y, button, modifiers):
if button==pyglet.window.mouse.LEFT:
#print 'mouse left up'
self.game.mouseup(x,y)
pass
if button==pyglet.window.mouse.RIGHT:
#print 'mouse right up'
pass
def on_mouse_motion(self, x, y, dx, dy):
##print 'motion'
self.game.mousepos(x,y)
#pass
#def on_resize(self, width, height):
#print 'resize',width, height
#self.set_size(width, height)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
if buttons==pyglet.window.mouse.LEFT:
#print 'drag'
self.game.mousedrag(x,y)
pass
def on_key_press(self, symbol, modifiers):
if symbol == key.F12:
#print 'F12'
pass
elif symbol == key._1:
#print '1',Const.knifeKbar
#self.game.choose(Const.knifeKbar)
self.game.key9(1)
self.game.SetSight(self)
elif symbol == key._2:
#print '2',Const.pistol1911
self.game.key9(2)
#self.game.choose(Const.pistol1911)
self.game.SetSight(self)
elif symbol == key._3:
#print '3'
#self.game.choose(Const.M4assaultRifle)
self.game.key9(3)
self.game.SetSight(self)
elif symbol == key._4:
#self.game.choose(Const.SAR21Rifle)
self.game.key9(4)
#print '4'
self.game.SetSight(self)
#self.set_mouse_cursor( self.game.GetSight())
elif symbol == key._5:
#self.game.choose(Const.Ultimax100SAW)
self.game.key9(5)
self.game.SetSight(self)
#print '5'
elif symbol == key._6:
self.game.key9(6)
#self.game.choose(Const.M134MiniGun)
self.game.SetSight(self)
#print '6'
elif symbol == key._7:
self.game.key9(7)
#self.game.choose(Const.M32GRENADElauncher)
self.game.SetSight(self)
#print '7'
elif symbol == key._8:
self.game.key9(8)
self.game.SetSight(self)
pass #print '8'
#gmw.hide()
elif symbol == key._9:
self.game.key9(9)
self.game.SetSight(self)
pass
#print '9'
elif symbol == key._0:
#print '0'
self.game.key9(0)
#self.game.choose(Const.HandToolConst)
self.game.SetSight(self)
pass
elif symbol == key.I:
if not self.ikey:
self.set_fullscreen(False, screen=None)
self.ikey = True
gmw.show()
elif self.ikey:
#gmw.hide()
self.set_fullscreen(True, screen=None)
self.ikey = False
#self.set_visible(visible=False)
#
elif symbol == key.Z:
#print 'Z'
self.game.reloadWeapom()
elif symbol == key.B:
#print 'B - selector'
self.game.select()
elif symbol == key.W:
self.game.hero.goUp()
elif symbol == key.A:
self.game.hero.goLeft()
elif symbol == key.S:
self.game.hero.goBack()
elif symbol == key.D:
self.game.hero.goRight()
def on_key_release(self, symbol, modifiers):
if symbol == key.W:
self.game.hero.stopMoving()
elif symbol == key.A:
self.game.hero.stopMoving()
elif symbol == key.S:
self.game.hero.stopMoving()
elif symbol == key.D:
self.game.hero.stopMoving()
elif symbol == key.F1:
gBattleRep.init() #reset stats
gBattleRep.report()
elif symbol == key.F12 and modifiers & key.MOD_SHIFT:
print 'shift F12'
gShootGallery.init()
elif symbol == key.F12:
gShootGallery.gamestage += 1
gShootGallery.init()
elif symbol == key.F: #first aid = F
if gBattleRep.herohit > 0:
gSound.Play(gSound.reliefSound)
gBattleRep.herohit -= 1 #first aid
gBattleRep.report()
if gBattleRep.herohit == 0:
gSound.Play(gSound.curedSound)
elif symbol == key.F10:
self.game.AddMagazines()
elif symbol == key.PAUSE:
gShootGallery.pause()
def on_draw(self):
self.clear()
self.game.draw()
def on_close(self):
gmw.close()
self.close()
if __name__ == "__main__":
#gmw = MessageWin('Messages')
#gmw2 = MessageWin('Main')
m = FPSWin()
pyglet.app.run()
| 31.114761
| 118
| 0.525176
| 68,236
| 0.867844
| 0
| 0
| 0
| 0
| 0
| 0
| 16,861
| 0.214443
|
4d246a042f4d01726d7da3a16c2ca45068a1a3cb
| 2,462
|
py
|
Python
|
exercises/networking_v2/roles/ansible-network.network-engine/lib/network_engine/plugins/template/__init__.py
|
rcalvaga/linklight
|
bb6364272c167c017cb2ee0790015143df29fa19
|
[
"MIT"
] | 1
|
2020-03-29T17:35:59.000Z
|
2020-03-29T17:35:59.000Z
|
exercises/networking_v2/roles/ansible-network.network-engine/lib/network_engine/plugins/template/__init__.py
|
rcalvaga/linklight
|
bb6364272c167c017cb2ee0790015143df29fa19
|
[
"MIT"
] | null | null | null |
exercises/networking_v2/roles/ansible-network.network-engine/lib/network_engine/plugins/template/__init__.py
|
rcalvaga/linklight
|
bb6364272c167c017cb2ee0790015143df29fa19
|
[
"MIT"
] | 1
|
2020-03-30T11:00:47.000Z
|
2020-03-30T11:00:47.000Z
|
# (c) 2018, Ansible by Red Hat, inc
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import collections
from ansible.module_utils.six import iteritems, string_types
from ansible.errors import AnsibleUndefinedVariable
class TemplateBase(object):
def __init__(self, templar):
self._templar = templar
def __call__(self, data, variables, convert_bare=False):
return self.template(data, variables, convert_bare)
def run(self, template, variables):
pass
def template(self, data, variables, convert_bare=False):
if isinstance(data, collections.Mapping):
templated_data = {}
for key, value in iteritems(data):
templated_key = self.template(key, variables, convert_bare=convert_bare)
templated_value = self.template(value, variables, convert_bare=convert_bare)
templated_data[templated_key] = templated_value
return templated_data
elif isinstance(data, collections.Iterable) and not isinstance(data, string_types):
return [self.template(i, variables, convert_bare=convert_bare) for i in data]
else:
data = data or {}
tmp_avail_vars = self._templar._available_variables
self._templar.set_available_variables(variables)
try:
resp = self._templar.template(data, convert_bare=convert_bare)
resp = self._coerce_to_native(resp)
except AnsibleUndefinedVariable:
resp = None
pass
finally:
self._templar.set_available_variables(tmp_avail_vars)
return resp
def _coerce_to_native(self, value):
if not isinstance(value, bool):
try:
value = int(value)
except Exception:
if value is None or len(value) == 0:
return None
pass
return value
def _update(self, d, u):
for k, v in iteritems(u):
if isinstance(v, collections.Mapping):
d[k] = self._update(d.get(k, {}), v)
else:
d[k] = v
return d
| 34.676056
| 92
| 0.620634
| 1,969
| 0.799756
| 0
| 0
| 0
| 0
| 0
| 0
| 262
| 0.106418
|
4d24ec942e343c870cf4c7c64a35ce9e7ec32816
| 748
|
py
|
Python
|
test/try_closures.py
|
RemuLang/sijuiacion-lang
|
e3b5be79fb7afadc0790311e612ddd430b3f0b9d
|
[
"MIT"
] | 21
|
2019-10-13T14:11:32.000Z
|
2021-12-14T02:42:12.000Z
|
test/try_closures.py
|
RemuLang/sijuiacion-lang
|
e3b5be79fb7afadc0790311e612ddd430b3f0b9d
|
[
"MIT"
] | 1
|
2020-01-07T13:14:46.000Z
|
2020-01-09T16:58:07.000Z
|
test/try_closures.py
|
RemuLang/sijuiacion-lang
|
e3b5be79fb7afadc0790311e612ddd430b3f0b9d
|
[
"MIT"
] | 1
|
2020-08-13T16:17:09.000Z
|
2020-08-13T16:17:09.000Z
|
from Redy.Opt import feature, constexpr
import timeit
class Closure(tuple):
def __call__(self, a):
c, f = self
return f(c, a)
def f1(x):
def g(y):
return x + y
return g
def fc(c, y):
return c + y
@feature(constexpr)
def f2(x):
return constexpr[Closure]((x, constexpr[fc]))
print(f1(1)(2))
print(f2(1)(2))
# 3
# 3
# mk closure
print(timeit.timeit("f(1)", globals=dict(f=f1)))
print(timeit.timeit("f(1)", globals=dict(f=f2)))
# 0.15244655999958923
# 0.16590227899905585
f1_ = f1(2)
f2_ = f2(2)
print(timeit.timeit("f(1)", globals=dict(f=f1_)))
print(timeit.timeit("f(1)", globals=dict(f=f2_)))
# 0.08070355000018026
# 0.20936105600048904
# So, use builtin closures instead of making our own
| 15.914894
| 52
| 0.639037
| 91
| 0.121658
| 0
| 0
| 80
| 0.106952
| 0
| 0
| 178
| 0.237968
|
4d2521ea6310ee9cc5f131827f5a83488f594d5f
| 66
|
py
|
Python
|
python/testData/refactoring/move/docstringTypes/before/src/b.py
|
jnthn/intellij-community
|
8fa7c8a3ace62400c838e0d5926a7be106aa8557
|
[
"Apache-2.0"
] | 2
|
2018-12-29T09:53:39.000Z
|
2018-12-29T09:53:42.000Z
|
python/testData/refactoring/move/docstringTypes/before/src/b.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/refactoring/move/docstringTypes/before/src/b.py
|
Cyril-lamirand/intellij-community
|
60ab6c61b82fc761dd68363eca7d9d69663cfa39
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
def f(x):
'''Does nothing.
:type x: a.C
'''
pass
| 9.428571
| 20
| 0.409091
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 42
| 0.636364
|
4d2554382ea5d2316a7d1a204e1adf6165ec8877
| 195
|
py
|
Python
|
datapack/validate.py
|
emorgan00/EasyDatapacks
|
535bf30a23e4e8fa22ff827bc6c223f91a0228ed
|
[
"MIT"
] | 35
|
2019-06-23T22:35:56.000Z
|
2022-02-23T18:09:25.000Z
|
datapack/validate.py
|
emorgan00/EasyDatapacks
|
535bf30a23e4e8fa22ff827bc6c223f91a0228ed
|
[
"MIT"
] | 5
|
2019-07-08T04:54:21.000Z
|
2022-03-24T12:44:19.000Z
|
datapack/validate.py
|
emorgan00/EasyDatapacks
|
535bf30a23e4e8fa22ff827bc6c223f91a0228ed
|
[
"MIT"
] | 5
|
2019-06-24T04:09:15.000Z
|
2022-02-22T03:50:41.000Z
|
# this should accept a command as a string, and raturn a string detailing the issue
# if <command> is not a valid vanilla minecraft command. None otherwise.
def check(command):
return None
| 27.857143
| 83
| 0.753846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 155
| 0.794872
|
4d26e4038ffff6b5c711d810347580c7c6e22de3
| 44
|
py
|
Python
|
Python/Tutorial - 2/strings.py
|
JC2295/FCC_Tutorial_Projects
|
990e1221b2177acb9e4db0264adab518620404a0
|
[
"MIT"
] | null | null | null |
Python/Tutorial - 2/strings.py
|
JC2295/FCC_Tutorial_Projects
|
990e1221b2177acb9e4db0264adab518620404a0
|
[
"MIT"
] | null | null | null |
Python/Tutorial - 2/strings.py
|
JC2295/FCC_Tutorial_Projects
|
990e1221b2177acb9e4db0264adab518620404a0
|
[
"MIT"
] | null | null | null |
print("One")
print("Two")
print("Three")
| 6.285714
| 14
| 0.590909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 17
| 0.386364
|
4d272df6572584be280304452391a2a0947eefaa
| 4,220
|
py
|
Python
|
frontend/Two_Dim_System.py
|
Pugavkomm/NS-analyst
|
698af0e94f57b431fd77c17c49d4a23f11d21d3f
|
[
"MIT"
] | null | null | null |
frontend/Two_Dim_System.py
|
Pugavkomm/NS-analyst
|
698af0e94f57b431fd77c17c49d4a23f11d21d3f
|
[
"MIT"
] | null | null | null |
frontend/Two_Dim_System.py
|
Pugavkomm/NS-analyst
|
698af0e94f57b431fd77c17c49d4a23f11d21d3f
|
[
"MIT"
] | null | null | null |
"""AI is creating summary for
"""
from frontend import main_window
from PyQt5 import QtWidgets
from frontend import input_system
from PyQt5.QtWidgets import QInputDialog, qApp
from qt_material import apply_stylesheet
style_sheets = ['dark_amber.xml',
'dark_blue.xml',
'dark_cyan.xml',
'dark_lightgreen.xml',
'dark_pink.xml',
'dark_purple.xml',
'dark_red.xml',
'dark_teal.xml',
'dark_yellow.xml',
'light_amber.xml',
'light_blue.xml',
'light_cyan.xml',
'light_cyan_500.xml',
'light_lightgreen.xml',
'light_pink.xml',
'light_purple.xml',
'light_red.xml',
'light_teal.xml',
'light_yellow.xml']
class Two_Dim_system(QtWidgets.QMainWindow, main_window.Ui_MainWindow, input_system.Ui_input_system):
"""AI is creating summary for App
Args:
QtWidgets ([type]): [description]
main_window ([type]): [description]
"""
def __init__(self):
"""AI is creating summary for __init__
"""
super().__init__()
self.ui = main_window.Ui_MainWindow()
self.ui.setupUi(self)
self.InitUI()
def InitUI(self):
"""AI is creating summary for setupUi
"""
self.setupUi(self)
# self.statusBar = QStatusBar()
# self.setStatusBar(self.statusBar)
# self.menuFile.setStatusTip()
# self.menuFile.setStatusTip("test")
self.actionExit.triggered.connect(qApp.quit)
self.darkamber.triggered.connect(lambda: self.__change_theme(style_sheets.index('dark_amber.xml')))
self.lightamber.triggered.connect(lambda: self.__change_theme(style_sheets.index('light_amber.xml')))
self.darkblue.triggered.connect(lambda: self.__change_theme(style_sheets.index('dark_blue.xml')))
self.lightblue.triggered.connect(lambda: self.__change_theme(style_sheets.index('light_blue.xml')))
self.darkcyan.triggered.connect(lambda: self.__change_theme(style_sheets.index('dark_cyan.xml')))
self.lightcyan.triggered.connect(lambda: self.__change_theme(style_sheets.index('light_cyan.xml')))
self.darklightgreen.triggered.connect(lambda: self.__change_theme(style_sheets.index('dark_lightgreen.xml')))
self.lightlightgreen.triggered.connect(lambda: self.__change_theme(style_sheets.index('light_lightgreen.xml')))
self.darkpink.triggered.connect(lambda: self.__change_theme(style_sheets.index('dark_pink.xml')))
self.lightping.triggered.connect(lambda: self.__change_theme(style_sheets.index('light_pink.xml')))
self.darkpurple.triggered.connect(lambda: self.__change_theme(style_sheets.index('dark_purple.xml')))
self.lightpurple.triggered.connect(lambda: self.__change_theme(style_sheets.index('light_purple.xml')))
self.darkred.triggered.connect(lambda: self.__change_theme(style_sheets.index('dark_red.xml')))
self.lightred.triggered.connect(lambda: self.__change_theme(style_sheets.index('light_red.xml')))
self.darkteal.triggered.connect(lambda: self.__change_theme(style_sheets.index('dark_teal.xml')))
self.lightteal.triggered.connect(lambda: self.__change_theme(style_sheets.index('light_teal.xml')))
self.darkyellow.triggered.connect(lambda: self.__change_theme(style_sheets.index('dark_yellow.xml')))
self.lightyellow.triggered.connect(lambda: self.__change_theme(style_sheets.index('light_yellow.xml')))
self.actionInput_system.triggered.connect(self.__input_system)
def __input_system(self):
self.window = QtWidgets.QMainWindow()
self.ui = input_system.Ui_input_system()
self.ui.setupUi(self.window)
self.window.show()
def __change_theme(self, number: int):
"""AI is creating summary for change_theme
Args:
number (int): [description]
"""
with open('config_theme', 'w') as file:
file.write(str(number))
apply_stylesheet(self, theme=style_sheets[number])
print('TEST')
| 45.869565
| 119
| 0.667536
| 3,336
| 0.790521
| 0
| 0
| 0
| 0
| 0
| 0
| 1,153
| 0.273223
|
4d283228992f8ac0459bba73af7effe988be2fc1
| 1,525
|
py
|
Python
|
get_board_array.py
|
SuperStormer/minesweeper-bot
|
b503752ef4f4c1650ea48609dcebf0757d5ad209
|
[
"MIT"
] | null | null | null |
get_board_array.py
|
SuperStormer/minesweeper-bot
|
b503752ef4f4c1650ea48609dcebf0757d5ad209
|
[
"MIT"
] | 2
|
2022-01-13T00:50:34.000Z
|
2022-03-11T23:26:44.000Z
|
get_board_array.py
|
SuperStormer/minesweeper-bot
|
b503752ef4f4c1650ea48609dcebf0757d5ad209
|
[
"MIT"
] | null | null | null |
import mss
import numpy as np
from PIL import Image
from config import BOARD_HEIGHT, BOARD_WIDTH
CELL_SIZE = 22
BOARD_X = 14
BOARD_Y = 111
COLOR_CODES = {
(0, 0, 255): 1,
(0, 123, 0): 2,
(255, 0, 0): 3,
(0, 0, 123): 4,
(123, 0, 0): 5,
(0, 123, 123): 6,
(0, 0, 0): 7,
(123, 123, 123): 8,
(189, 189, 189): 0 #unopened/opened blank
}
def get_cell_type(cell) -> int:
cell_type = COLOR_CODES[cell.getpixel((15, 16))]
#cell_type=COLOR_CODES[cell.getpixel((13,14))]
if cell_type == 0 and cell.getpixel((1, 16)) != (255, 255, 255):
cell_type = -1
return cell_type
def get_board_array() -> np.ndarray:
with mss.mss() as sct:
screenshot = sct.grab(sct.monitors[0])
img = Image.frombytes('RGB', screenshot.size, screenshot.bgra, 'raw', 'BGRX')
#board=img.crop((384,111,1044,463))
board = img.crop((BOARD_X, BOARD_Y, BOARD_X + CELL_SIZE * BOARD_WIDTH, BOARD_Y + CELL_SIZE * BOARD_HEIGHT))
width, height = board.size
cell_imgs = [
board.crop((i, j, i + CELL_SIZE, j + CELL_SIZE)) for j in range(0, height, CELL_SIZE) for i in range(0, width, CELL_SIZE)
]
cells = np.fromiter((get_cell_type(cell) for cell in cell_imgs), dtype=np.int8)
grid = np.reshape(cells, (BOARD_HEIGHT, BOARD_WIDTH))
#surrond grid with -1(so you can make cell_surrondings with no errors)
return np.concatenate(
(
np.full((1, BOARD_WIDTH + 2), -1, dtype=np.int8), #top row of -1
np.insert(grid, (0, BOARD_WIDTH), -1, axis=1), #fill sides with -1
np.full((1, BOARD_WIDTH + 2), -1, dtype=np.int8) #bottom row of -1
)
)
| 32.446809
| 123
| 0.662951
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 239
| 0.156721
|
4d293734f2f02cf252d19002878c81331dcad9c7
| 963
|
py
|
Python
|
Challenges/13/tests/test_stack_and_queue_brackets.py
|
makkahwi/data-structures-and-algorithms
|
06551786258bb7dabb9b0ab07c0f80ff78abca41
|
[
"MIT"
] | null | null | null |
Challenges/13/tests/test_stack_and_queue_brackets.py
|
makkahwi/data-structures-and-algorithms
|
06551786258bb7dabb9b0ab07c0f80ff78abca41
|
[
"MIT"
] | null | null | null |
Challenges/13/tests/test_stack_and_queue_brackets.py
|
makkahwi/data-structures-and-algorithms
|
06551786258bb7dabb9b0ab07c0f80ff78abca41
|
[
"MIT"
] | null | null | null |
import pytest
from stack_and_queue_brackets.stack_and_queue_brackets import validate_brackets
def test_a_valid():
actual = validate_brackets("[text]")
expected = True
assert actual == expected
def test_another_valid():
actual = validate_brackets("(1)[2](3)")
expected = True
assert actual == expected
def test_no_closing():
actual = validate_brackets("[{()]")
expected = False
assert actual == expected
def test_no_opening():
actual = validate_brackets("(])")
expected = False
assert actual == expected
def test_opening_dont_match_closing():
actual = validate_brackets("({])")
expected = False
assert actual == expected
def test_null():
with pytest.raises(Exception):
validate_brackets(None)
def test_no_brackets():
with pytest.raises(Exception):
validate_brackets("text")
def test_empty_string():
with pytest.raises(Exception):
validate_brackets("")
| 20.0625
| 79
| 0.688474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 45
| 0.046729
|
4d2a55ccfddd9dd9215b0c629a81b67391bf257c
| 360
|
py
|
Python
|
tts/symbols.py
|
entn-at/tn2-wg
|
00f59da91a1e23020b20210ea62d838e20c049f2
|
[
"BSD-3-Clause"
] | 38
|
2019-07-03T21:40:57.000Z
|
2021-11-30T15:59:20.000Z
|
tts/symbols.py
|
entn-at/tn2-wg
|
00f59da91a1e23020b20210ea62d838e20c049f2
|
[
"BSD-3-Clause"
] | 4
|
2019-08-20T12:19:10.000Z
|
2021-07-29T11:20:59.000Z
|
tts/symbols.py
|
entn-at/tn2-wg
|
00f59da91a1e23020b20210ea62d838e20c049f2
|
[
"BSD-3-Clause"
] | 11
|
2019-07-04T09:17:27.000Z
|
2021-11-14T21:05:04.000Z
|
""" from https://github.com/keithito/tacotron """
_pad = '_'
#_punctuation = '!\'(),.:;? '
_punctuation = '!",.:;? '
_special = '-'
#_letters = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
_letters = "АБВГДЕЁЖЗИЙКЛМНОПРСТУФХЦЧШЩЪЫЬЭЮЯабвгдеёжзийклмнопрстуфхцчшщъыьэюя"
symbols = [_pad] + list(_special) + list(_punctuation) + list(_letters)
| 32.727273
| 79
| 0.711111
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 294
| 0.690141
|
4d2be9274f40cf5428dff78da1531be1cba5e3f0
| 28
|
py
|
Python
|
tests/fixtures/pkg1/pkg2/pkg3/pkg4/pkg5/__init__.py
|
shashankrnr32/pytkdocs
|
bf04764f1608970643932329c9f6c8c63a0c5632
|
[
"0BSD"
] | 21
|
2021-02-20T05:20:52.000Z
|
2022-03-04T20:57:16.000Z
|
tests/fixtures/pkg1/pkg2/pkg3/pkg4/pkg5/__init__.py
|
shashankrnr32/pytkdocs
|
bf04764f1608970643932329c9f6c8c63a0c5632
|
[
"0BSD"
] | 84
|
2020-03-22T15:29:56.000Z
|
2021-02-09T21:47:11.000Z
|
tests/fixtures/pkg1/pkg2/pkg3/pkg4/pkg5/__init__.py
|
shashankrnr32/pytkdocs
|
bf04764f1608970643932329c9f6c8c63a0c5632
|
[
"0BSD"
] | 21
|
2020-04-09T13:56:23.000Z
|
2021-01-19T19:18:42.000Z
|
"""Hello from the abyss."""
| 14
| 27
| 0.607143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 27
| 0.964286
|
4d2c6196de9f0ffebba719e30abbaf48e28d2d23
| 5,719
|
py
|
Python
|
test/test_tdodbc.py
|
Teradata/PyTd
|
5e960ed4c380c4f8ae84d582ad779a87adce5ae1
|
[
"MIT"
] | 133
|
2015-07-27T22:12:58.000Z
|
2021-08-31T05:26:38.000Z
|
test/test_tdodbc.py
|
Teradata/PyTd
|
5e960ed4c380c4f8ae84d582ad779a87adce5ae1
|
[
"MIT"
] | 121
|
2015-07-30T18:03:03.000Z
|
2021-08-09T13:46:37.000Z
|
test/test_tdodbc.py
|
Teradata/PyTd
|
5e960ed4c380c4f8ae84d582ad779a87adce5ae1
|
[
"MIT"
] | 57
|
2015-07-27T10:41:08.000Z
|
2021-04-26T08:58:57.000Z
|
# The MIT License (MIT)
#
# Copyright (c) 2015 by Teradata
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import unittest
import os
import teradata
from teradata import tdodbc, util
class TdOdbcTest (unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.username = cls.password = util.setupTestUser(udaExec, dsn)
def testGlobals(self):
self.assertEqual(tdodbc.apilevel, "2.0")
self.assertEqual(tdodbc.threadsafety, 1)
self.assertEqual(tdodbc.paramstyle, "qmark")
def testSystemNotFound(self):
with self.assertRaises(tdodbc.DatabaseError) as cm:
tdodbc.connect(system="continuum.td.teradata.com",
username=self.username, password=self.password)
self.assertTrue("08004" in cm.exception.msg, cm.exception)
def testBadCredentials(self):
with self.assertRaises(tdodbc.DatabaseError) as cm:
tdodbc.connect(system=system, username="bad", password="bad")
self.assertEqual(cm.exception.code, 8017, cm.exception.msg)
def testConnect(self):
conn = tdodbc.connect(
system=system, username=self.username, password=self.password)
self.assertIsNotNone(conn)
conn.close()
def testConnectBadDriver(self):
with self.assertRaises(tdodbc.InterfaceError) as cm:
tdodbc.connect(
system=system, username=self.username,
password=self.password,
driver="BadDriver")
self.assertEqual(cm.exception.code, "DRIVER_NOT_FOUND")
def testCursorBasics(self):
with tdodbc.connect(system=system, username=self.username,
password=self.password, autoCommit=True) as conn:
self.assertIsNotNone(conn)
with conn.cursor() as cursor:
count = 0
for row in cursor.execute("SELECT * FROM DBC.DBCInfo"):
self.assertEqual(len(row), 2)
self.assertIsNotNone(row[0])
self.assertIsNotNone(row['InfoKey'])
self.assertIsNotNone(row['infokey'])
self.assertIsNotNone(row.InfoKey)
self.assertIsNotNone(row.infokey)
self.assertIsNotNone(row[1])
self.assertIsNotNone(row['InfoData'])
self.assertIsNotNone(row['infodata'])
self.assertIsNotNone(row.infodata)
self.assertIsNotNone(row.InfoData)
row[0] = "test1"
self.assertEqual(row[0], "test1")
self.assertEqual(row['InfoKey'], "test1")
self.assertEqual(row.infokey, "test1")
row['infokey'] = "test2"
self.assertEqual(row[0], "test2")
self.assertEqual(row['InfoKey'], "test2")
self.assertEqual(row.infokey, "test2")
row.infokey = "test3"
self.assertEqual(row[0], "test3")
self.assertEqual(row['InfoKey'], "test3")
self.assertEqual(row.InfoKey, "test3")
count += 1
self.assertEqual(cursor.description[0][0], "InfoKey")
self.assertEqual(cursor.description[0][1], tdodbc.STRING)
self.assertEqual(cursor.description[1][0], "InfoData")
self.assertEqual(cursor.description[1][1], tdodbc.STRING)
self.assertEqual(count, 3)
def testExecuteWithParamsMismatch(self):
with self.assertRaises(teradata.InterfaceError) as cm:
with tdodbc.connect(system=system, username=self.username,
password=self.password,
autoCommit=True) as conn:
self.assertIsNotNone(conn)
with conn.cursor() as cursor:
cursor.execute(
"CREATE TABLE testExecuteWithParamsMismatch (id INT, "
"name VARCHAR(128), dob TIMESTAMP)")
cursor.execute(
"INSERT INTO testExecuteWithParamsMismatch "
"VALUES (?, ?, ?)", (1, "TEST", ))
self.assertEqual(
cm.exception.code, "PARAMS_MISMATCH", cm.exception.msg)
configFiles = [os.path.join(os.path.dirname(__file__), 'udaexec.ini')]
udaExec = teradata.UdaExec(configFiles=configFiles, configureLogging=False)
dsn = 'ODBC'
odbcConfig = udaExec.config.section(dsn)
system = odbcConfig['system']
super_username = odbcConfig['username']
super_password = odbcConfig['password']
if __name__ == '__main__':
unittest.main()
| 43.656489
| 79
| 0.616541
| 4,164
| 0.728099
| 0
| 0
| 108
| 0.018884
| 0
| 0
| 1,615
| 0.282392
|
4d2d2acea9bb79c046b8abea693dc31ff18efd72
| 143
|
py
|
Python
|
submissions/abc035/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 1
|
2021-05-10T01:16:28.000Z
|
2021-05-10T01:16:28.000Z
|
submissions/abc035/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | 3
|
2021-05-11T06:14:15.000Z
|
2021-06-19T08:18:36.000Z
|
submissions/abc035/a.py
|
m-star18/atcoder
|
08e475810516602fa088f87daf1eba590b4e07cc
|
[
"Unlicense"
] | null | null | null |
import sys
input = sys.stdin.readline
w, h = map(int, input().split())
if w / h == 4 / 3:
ans = '4:3'
else:
ans = '16:9'
print(ans)
| 11.916667
| 32
| 0.538462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 11
| 0.076923
|
4d2da9056c6d973976290183ad18c7e824e87fbe
| 1,029
|
py
|
Python
|
setup.py
|
JosiahBradley/mod2win
|
f3636faea8cce041be2d9933574aa1ccd4b818ac
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
JosiahBradley/mod2win
|
f3636faea8cce041be2d9933574aa1ccd4b818ac
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
JosiahBradley/mod2win
|
f3636faea8cce041be2d9933574aa1ccd4b818ac
|
[
"Apache-2.0"
] | null | null | null |
import setuptools
long_description = ""
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
author='Josiah Bradley',
author_email='JosiahBradley@gmail.com',
name="mod2win",
url="https://github.com/JosiahBradley/mod2win",
version="0.0.1",
entry_points={
'console_scripts': [
'play = mod2win.levels.level_launcher:launch',
'compile = mod2win.levels.level_launcher:_compile',
'scrub = mod2win.levels.level_launcher:scrub',
'restore = mod2win.levels.level_launcher:restore',
'spiral = mod2win.levels.spiral_test:main',
]
},
package_dir={'': 'src'},
packages=setuptools.find_packages('src'),
include_package_data=True,
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache License",
"Operating System :: OS Independent",
],
)
| 31.181818
| 63
| 0.640428
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 506
| 0.49174
|
4d2f1c615c504ceda8bcf1bd0cf231a9e5310a56
| 4,914
|
py
|
Python
|
release/stubs.min/Autodesk/Revit/UI/Plumbing.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 182
|
2017-06-27T02:26:15.000Z
|
2022-03-30T18:53:43.000Z
|
release/stubs.min/Autodesk/Revit/UI/Plumbing.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 28
|
2017-06-27T13:38:23.000Z
|
2022-03-15T11:19:44.000Z
|
release/stubs.min/Autodesk/Revit/UI/Plumbing.py
|
htlcnn/ironpython-stubs
|
780d829e2104b2789d5f4d6f32b0ec9f2930ca03
|
[
"MIT"
] | 67
|
2017-06-28T09:43:59.000Z
|
2022-03-20T21:17:10.000Z
|
# encoding: utf-8
# module Autodesk.Revit.UI.Plumbing calls itself Plumbing
# from RevitAPIUI,Version=17.0.0.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
class IPipeFittingAndAccessoryPressureDropUIServer(IExternalServer):
""" Interface for external servers providing optional UI for pipe fitting and pipe accessory coefficient calculation. """
def GetDBServerId(self):
"""
GetDBServerId(self: IPipeFittingAndAccessoryPressureDropUIServer) -> Guid
Returns the Id of the corresponding DB server for which this server provides an
optional UI.
Returns: The Id of the DB server.
"""
pass
def ShowSettings(self,data):
"""
ShowSettings(self: IPipeFittingAndAccessoryPressureDropUIServer,data: PipeFittingAndAccessoryPressureDropUIData) -> bool
Shows the settings UI.
data: The input data of the calculation.
Returns: True if the user makes any changes in the UI,false otherwise.
"""
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
class PipeFittingAndAccessoryPressureDropUIData(object,IDisposable):
""" The input and output data used by external UI servers for storing UI settings. """
def Dispose(self):
""" Dispose(self: PipeFittingAndAccessoryPressureDropUIData) """
pass
def GetUIDataItems(self):
"""
GetUIDataItems(self: PipeFittingAndAccessoryPressureDropUIData) -> IList[PipeFittingAndAccessoryPressureDropUIDataItem]
Gets all UI data items stored in the UI data.
Returns: An array of UI data items.
"""
pass
def GetUnits(self):
"""
GetUnits(self: PipeFittingAndAccessoryPressureDropUIData) -> Units
Gets units.
Returns: The Units object.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: PipeFittingAndAccessoryPressureDropUIData,disposing: bool) """
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: PipeFittingAndAccessoryPressureDropUIData) -> bool
"""
class PipeFittingAndAccessoryPressureDropUIDataItem(object,IDisposable):
""" The input and output data used by external UI servers for initializing and storing the UI settings. """
def Dispose(self):
""" Dispose(self: PipeFittingAndAccessoryPressureDropUIDataItem) """
pass
def GetEntity(self):
"""
GetEntity(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> Entity
Returns the entity set by UI server.
or an invalid entity otherwise.
Returns: The returned Entity.
"""
pass
def GetPipeFittingAndAccessoryData(self):
"""
GetPipeFittingAndAccessoryData(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> PipeFittingAndAccessoryData
Gets the fitting data stored in the UI data item.
Returns: The fitting data stored in the UI data item.
"""
pass
def ReleaseUnmanagedResources(self,*args):
""" ReleaseUnmanagedResources(self: PipeFittingAndAccessoryPressureDropUIDataItem,disposing: bool) """
pass
def SetEntity(self,entity):
"""
SetEntity(self: PipeFittingAndAccessoryPressureDropUIDataItem,entity: Entity)
Stores the entity in the UI data item.
entity: The Entity to be stored.
"""
pass
def __enter__(self,*args):
""" __enter__(self: IDisposable) -> object """
pass
def __exit__(self,*args):
""" __exit__(self: IDisposable,exc_type: object,exc_value: object,exc_back: object) """
pass
def __init__(self,*args):
""" x.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signaturex.__init__(...) initializes x; see x.__class__.__doc__ for signature """
pass
def __repr__(self,*args):
""" __repr__(self: object) -> str """
pass
IsValidObject=property(lambda self: object(),lambda self,v: None,lambda self: None)
"""Specifies whether the .NET object represents a valid Revit entity.
Get: IsValidObject(self: PipeFittingAndAccessoryPressureDropUIDataItem) -> bool
"""
| 28.905882
| 215
| 0.71571
| 4,667
| 0.949735
| 0
| 0
| 0
| 0
| 0
| 0
| 3,656
| 0.743997
|
4d2f795e5817013dda3708c8ac386c1a237e9181
| 240
|
py
|
Python
|
src/base/apps.py
|
jhernandez18p/mobyapp
|
4add7bd3b3f1e933bbb6941674bf84f4c4462685
|
[
"MIT"
] | null | null | null |
src/base/apps.py
|
jhernandez18p/mobyapp
|
4add7bd3b3f1e933bbb6941674bf84f4c4462685
|
[
"MIT"
] | 7
|
2020-06-05T17:31:06.000Z
|
2022-03-11T23:16:34.000Z
|
src/base/apps.py
|
jhernandez18p/mobyapp
|
4add7bd3b3f1e933bbb6941674bf84f4c4462685
|
[
"MIT"
] | null | null | null |
from django.apps import AppConfig
from django.db.models.signals import post_migrate
from django.utils.translation import gettext_lazy as _
class SitesConfig(AppConfig):
name = 'src.base'
verbose_name = _("Modulo de Frontend")
| 26.666667
| 54
| 0.766667
| 94
| 0.391667
| 0
| 0
| 0
| 0
| 0
| 0
| 30
| 0.125
|
4d31fdb12184ec34b1dcc98d224f9491db93ddd4
| 708
|
py
|
Python
|
migrations/versions/010_Add_uploader_link_to_upload.py
|
LCBRU/lbrc_upload
|
be42fef97b67c1f25329db52ae3a88eb293a1203
|
[
"MIT"
] | null | null | null |
migrations/versions/010_Add_uploader_link_to_upload.py
|
LCBRU/lbrc_upload
|
be42fef97b67c1f25329db52ae3a88eb293a1203
|
[
"MIT"
] | null | null | null |
migrations/versions/010_Add_uploader_link_to_upload.py
|
LCBRU/lbrc_upload
|
be42fef97b67c1f25329db52ae3a88eb293a1203
|
[
"MIT"
] | null | null | null |
from sqlalchemy import MetaData, Table, Index, Column, Integer
meta = MetaData()
def upgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
upload = Table("upload", meta, autoload=True)
uploader_id = Column("uploader_id", Integer)
uploader_id.create(upload)
idx_upload_uploader_id = Index("idx_upload_uploader_id", upload.c.uploader_id)
idx_upload_uploader_id.create(migrate_engine)
def downgrade(migrate_engine):
meta = MetaData(bind=migrate_engine)
upload = Table("upload", meta, autoload=True)
idx_upload_uploader_id = Index("idx_upload_uploader_id", upload.c.uploader_id)
idx_upload_uploader_id.drop(migrate_engine)
upload.c.uploader_id.drop()
| 28.32
| 82
| 0.75565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 77
| 0.108757
|
4d32026c6758449b672d084b5b5fc4c71016f623
| 3,380
|
py
|
Python
|
redbot/resource/active_check/base.py
|
Malvoz/redbot
|
0edef8d4efefddde49d36cd97e471fc187837169
|
[
"MIT"
] | null | null | null |
redbot/resource/active_check/base.py
|
Malvoz/redbot
|
0edef8d4efefddde49d36cd97e471fc187837169
|
[
"MIT"
] | null | null | null |
redbot/resource/active_check/base.py
|
Malvoz/redbot
|
0edef8d4efefddde49d36cd97e471fc187837169
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Subrequests to do things like range requests, content negotiation checks,
and validation.
This is the base class for all subrequests.
"""
from abc import ABCMeta, abstractmethod
from configparser import SectionProxy
from typing import List, Tuple, Type, Union, TYPE_CHECKING
from redbot.resource.fetch import RedFetcher
from redbot.speak import Note, levels, categories
from redbot.type import StrHeaderListType
if TYPE_CHECKING:
from redbot.resource import (
HttpResource,
) # pylint: disable=cyclic-import,unused-import
class SubRequest(RedFetcher, metaclass=ABCMeta):
"""
Base class for a subrequest of a "main" HttpResource, made to perform
additional behavioural tests on the resource.
"""
check_name = "undefined"
response_phrase = "undefined"
def __init__(self, config: SectionProxy, base_resource: "HttpResource") -> None:
self.config = config
self.base = base_resource # type: HttpResource
RedFetcher.__init__(self, config)
self.check_done = False
self.on("fetch_done", self._check_done)
@abstractmethod
def done(self) -> None:
"""The subrequest is done, process it. Must be overridden."""
raise NotImplementedError
def _check_done(self) -> None:
if self.preflight():
self.done()
self.check_done = True
self.emit("check_done")
def check(self) -> None:
modified_headers = self.modify_request_headers(list(self.base.request.headers))
RedFetcher.set_request(
self,
self.base.request.uri,
self.base.request.method,
modified_headers,
self.base.request.payload,
)
RedFetcher.check(self)
@abstractmethod
def modify_request_headers(
self, base_request_headers: StrHeaderListType
) -> StrHeaderListType:
"""Usually overridden; modifies the request headers."""
return base_request_headers
def add_base_note(
self, subject: str, note: Type[Note], **kw: Union[str, int]
) -> None:
"Add a Note to the base resource."
kw["response"] = self.response_phrase
self.base.add_note(subject, note, **kw)
def check_missing_hdrs(self, hdrs: List[str], note: Type[Note]) -> None:
"""
See if the listed headers are missing in the subrequest; if so,
set the specified note.
"""
missing_hdrs = []
for hdr in hdrs:
if (
hdr in self.base.response.parsed_headers
and hdr not in self.response.parsed_headers
):
missing_hdrs.append(hdr)
if missing_hdrs:
self.add_base_note("headers", note, missing_hdrs=", ".join(missing_hdrs))
self.add_note("headers", note, missing_hdrs=", ".join(missing_hdrs))
class MISSING_HDRS_304(Note):
category = categories.VALIDATION
level = levels.WARN
summary = "%(response)s is missing required headers."
text = """\
HTTP requires `304 Not Modified` responses to have certain headers, if they are also present in a
normal (e.g., `200 OK` response).
%(response)s is missing the following headers: `%(missing_hdrs)s`.
This can affect cache operation; because the headers are missing, caches might remove them from
their cached copies."""
| 32.190476
| 97
| 0.657988
| 2,804
| 0.829586
| 0
| 0
| 376
| 0.111243
| 0
| 0
| 1,096
| 0.32426
|
4d3223ccf8b9ace60c35ba3ab835c0690408e671
| 92
|
py
|
Python
|
demo/demo14.py
|
LXG-Shadow/SongRecogn
|
e02363db5dc40b6128c46f19249044c94e5ba425
|
[
"Apache-2.0"
] | 22
|
2019-02-25T20:58:58.000Z
|
2021-07-15T01:45:00.000Z
|
demo/demo14.py
|
aynakeya/SongRecogn
|
e02363db5dc40b6128c46f19249044c94e5ba425
|
[
"Apache-2.0"
] | 4
|
2021-04-07T13:27:44.000Z
|
2021-06-15T17:32:04.000Z
|
demo/demo14.py
|
aynakeya/SongRecogn
|
e02363db5dc40b6128c46f19249044c94e5ba425
|
[
"Apache-2.0"
] | 4
|
2020-05-16T14:08:24.000Z
|
2021-06-07T08:59:07.000Z
|
import getopt
a = "asdf asdf"
option,args = getopt.getopt(a,"","")
print(option,type(args))
| 18.4
| 36
| 0.684783
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 15
| 0.163043
|
4d360ecaf65d937ea0be727ba4568099673793e8
| 41
|
py
|
Python
|
eyap/utils/ghtools/__init__.py
|
emin63/eyap
|
783bdede298e63bbafee81b50cd1e899c43f5847
|
[
"BSD-3-Clause"
] | null | null | null |
eyap/utils/ghtools/__init__.py
|
emin63/eyap
|
783bdede298e63bbafee81b50cd1e899c43f5847
|
[
"BSD-3-Clause"
] | 2
|
2017-07-17T03:50:32.000Z
|
2017-08-05T02:39:36.000Z
|
eyap/utils/ghtools/__init__.py
|
emin63/eyap
|
783bdede298e63bbafee81b50cd1e899c43f5847
|
[
"BSD-3-Clause"
] | null | null | null |
"""Additional GitHub specific tools.
"""
| 13.666667
| 36
| 0.707317
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 40
| 0.97561
|
4d36ab2ecf64dbe104d9ad83b84b202b59495ecf
| 13,674
|
py
|
Python
|
page_api.py
|
tomkludy/md_to_conf
|
3b03f1c68eea1f8a6d788afab0add63f6d4dcf46
|
[
"MIT"
] | null | null | null |
page_api.py
|
tomkludy/md_to_conf
|
3b03f1c68eea1f8a6d788afab0add63f6d4dcf46
|
[
"MIT"
] | null | null | null |
page_api.py
|
tomkludy/md_to_conf
|
3b03f1c68eea1f8a6d788afab0add63f6d4dcf46
|
[
"MIT"
] | null | null | null |
"""
# --------------------------------------------------------------------------------------------------
# Page APIs
# --------------------------------------------------------------------------------------------------
"""
import os
import tempfile
import re
import json
import collections
import mimetypes
import urllib
import urllib.parse
import common
from file_api import FILE_API
from child_pages import CHILD_PAGES
from page_cache import PAGE_CACHE
from globals import LOGGER
from globals import SPACE_KEY
from globals import CONFLUENCE_API_URL
from globals import SIMULATE
from globals import ANCESTOR
class _PageApi:
"""
APIs for dealing with pages in Confluence
"""
__IMAGE_LINK_PAGES = {}
def __add_images(self, page_id, html, filepath):
"""
Scan for images and upload as attachments or child pages if found
:param page_id: Confluence page id
:param html: html string
:param filepath: markdown file full path
:return: html with modified image reference
"""
source_folder = os.path.dirname(os.path.abspath(filepath))
# <img/> tags turn into attachments
for tag in re.findall('<img(.*?)/>', html):
orig_rel_path = re.search('src="(.*?)"', tag).group(1)
alt_text = re.search('alt="(.*?)"', tag).group(1)
rel_path = urllib.parse.unquote(orig_rel_path)
abs_path = os.path.join(source_folder, rel_path)
basename = os.path.basename(rel_path)
self.__upload_attachment(page_id, abs_path, alt_text)
if re.search('http.*', rel_path) is None:
if CONFLUENCE_API_URL.endswith('/wiki'):
html = html.replace('%s' % (orig_rel_path),
'/wiki/download/attachments/%s/%s' % (page_id, basename))
else:
html = html.replace('%s' % (orig_rel_path),
'/download/attachments/%s/%s' % (page_id, basename))
# <a href="<image>">[Name]</a> turns into a sub-page
ancestors = common.get_page_as_ancestor(page_id)
for ref in re.findall(r'<a href=\"([^\"]+)\">([^<]+)</a>', html):
if not ref[0].startswith(('http', '/')) and ref[0].endswith('.png'):
dirname = os.path.abspath(os.path.dirname(filepath))
rel_image_from_page = os.path.join(dirname, ref[0])
image = os.path.normpath(rel_image_from_page)
alt = ref[1]
if image in self.__IMAGE_LINK_PAGES:
page = self.__IMAGE_LINK_PAGES[image]
else:
file = tempfile.NamedTemporaryFile(mode='w', delete=False)
title = urllib.parse.unquote(os.path.basename(image))
title = "%s - Diagram" % (os.path.splitext(title)[0])
file.write('# %s\n' % title)
temp_dirname = os.path.abspath(os.path.dirname(file.name))
rel_image_from_temp = os.path.relpath(image, temp_dirname)
file.write('\n' % (alt, rel_image_from_temp))
file.close()
title = FILE_API.get_title(file.name)
subhtml = FILE_API.get_html(file.name)
self.create_or_update_page(title, subhtml, ancestors, file.name)
os.remove(file.name)
page = PAGE_CACHE.get_page(title)
self.__IMAGE_LINK_PAGES[image] = page
CHILD_PAGES.mark_page_active(page.id)
html = html.replace(ref[0], page.link)
return html
def create_or_update_page(self, title, body, ancestors, filepath):
"""
Create a new page
:param title: confluence page title
:param body: confluence page content
:param ancestors: confluence page ancestor
:param filepath: markdown file full path
:return: created or updated page id
"""
page = PAGE_CACHE.get_page(title)
if page:
return self.update_page(page.id, title, body, page.version, ancestors, filepath)
else:
LOGGER.info('Creating page %s...', title)
url = '%s/rest/api/content/' % CONFLUENCE_API_URL
new_page = {'type': 'page',
'title': title,
'space': {'key': SPACE_KEY},
'body': {
'storage': {
'value': body,
'representation': 'storage'
}
},
'ancestors': ancestors
}
LOGGER.debug("data: %s", json.dumps(new_page))
response = common.make_request_post(url, data=json.dumps(new_page))
data = response.json()
space_name = data[u'space'][u'name']
page_id = data[u'id']
version = data[u'version'][u'number']
link = '%s%s' % (CONFLUENCE_API_URL, data[u'_links'][u'webui'])
LOGGER.info('Page created in %s with ID: %s.', space_name, page_id)
LOGGER.info('URL: %s', link)
# label the page
self.__label_page(page_id)
img_check = re.search(r'<img(.*?)\/>', body)
if img_check:
LOGGER.info('Attachments found, update procedure called.')
return self.update_page(page_id, title, body, version, ancestors, filepath)
else:
return page_id
def update_page(self, page_id, title, body, version, ancestors, filepath):
"""
Update a page
:param page_id: confluence page id
:param title: confluence page title
:param body: confluence page content
:param version: confluence page version
:param ancestors: confluence page ancestor
:param filepath: markdown file full path
:return: updated page id
"""
LOGGER.info('Updating page %s...', title)
# Add images and attachments
body = self.__add_images(page_id, body, filepath)
# See if the page actually needs to be updated or not
existing = PAGE_CACHE.get_page(title)
if existing:
if title == existing.title and \
body == existing.body and \
ancestors[0]['id'] == existing.ancestor:
LOGGER.info('No changes on the page; update not necessary')
return page_id
else:
LOGGER.info('Changes detected; update nessary')
if title != existing.title:
LOGGER.debug('update required: title %s != %s', title, existing.title)
if body != existing.body:
LOGGER.debug('update required: body %s != %s', body, existing.body)
if ancestors[0]['id'] != existing.ancestor:
LOGGER.debug('update required: ancestor %s != %s',
ancestors[0]['id'], existing.ancestor)
PAGE_CACHE.forget_page(title)
url = '%s/rest/api/content/%s' % (CONFLUENCE_API_URL, page_id)
page_json = {
"id": page_id,
"type": "page",
"title": title,
"space": {"key": SPACE_KEY},
"body": {
"storage": {
"value": body,
"representation": "storage"
}
},
"version": {
"number": version + 1,
"minorEdit": True
},
'ancestors': ancestors
}
response = common.make_request_put(url, data=json.dumps(page_json))
data = response.json()
link = '%s%s' % (CONFLUENCE_API_URL, data[u'_links'][u'webui'])
LOGGER.info("Page updated successfully.")
LOGGER.info('URL: %s', link)
return data[u'id']
def __label_page(self, page_id):
"""
Attach a label to the page to indicate it was auto-generated
"""
LOGGER.info("Labeling page %s", page_id)
url = '%s/rest/api/content/%s/label' % (CONFLUENCE_API_URL, page_id)
page_json = [{ "name": "md_to_conf" }]
common.make_request_post(url, data=json.dumps(page_json))
def __get_attachment(self, page_id, filename):
"""
Get page attachment
:param page_id: confluence page id
:param filename: attachment filename
:return: attachment info in case of success, False otherwise
"""
url = '%s/rest/api/content/%s/child/attachment?filename=%s' \
'&expand=metadata.properties.hash' \
% (CONFLUENCE_API_URL, page_id, filename)
response = common.make_request_get(url)
data = response.json()
LOGGER.debug('data: %s', str(data))
if len(data[u'results']) >= 1:
data = data[u'results'][0]
att_id = data[u'id']
att_hash = None
props = data[u'metadata'][u'properties']
if u'hash' in props:
hash_prop = props[u'hash'][u'value']
if u'sha256' in hash_prop:
att_hash = hash_prop[u'sha256']
att_info = collections.namedtuple('AttachmentInfo', ['id', 'hash'])
attr_info = att_info(att_id, att_hash)
return attr_info
return False
def __upload_attachment(self, page_id, file, comment):
"""
Upload an attachment
:param page_id: confluence page id
:param file: attachment file
:param comment: attachment comment
:return: boolean
"""
if re.search('http.*', file):
return False
content_type = mimetypes.guess_type(file)[0]
filename = os.path.basename(file)
if not os.path.isfile(file):
LOGGER.error('File %s cannot be found --> skip ', file)
return False
sha = FILE_API.get_sha_hash(file)
file_to_upload = {
'comment': comment,
'file': (filename, open(file, 'rb'), content_type, {'Expires': '0'})
}
attachment = self.__get_attachment(page_id, filename)
if attachment:
if sha == attachment.hash:
LOGGER.info('File %s has not changed --> skip', file)
return True
else:
LOGGER.debug('File %s has changed', file)
url = '%s/rest/api/content/%s/child/attachment/%s/data' % \
(CONFLUENCE_API_URL, page_id, attachment.id)
else:
LOGGER.debug('File %s is new', file)
url = '%s/rest/api/content/%s/child/attachment/' % (CONFLUENCE_API_URL, page_id)
LOGGER.info('Uploading attachment %s...', filename)
response = common.make_request_upload(url, file_to_upload)
data = response.json()
LOGGER.debug('data: %s', str(data))
# depending on create or update, sometimes you get a collection
# and sometimes you get a single item
if u'results' in data:
data = data[u'results'][0]
attachment_id = data['id']
# Set the SHA hash metadata on the attachment so that it can be later compared
# first, get the current version of the property if it exists
url = '%s/rest/api/content/%s/property/hash' % (CONFLUENCE_API_URL, attachment_id)
response = common.make_request_get(url, False)
if response.status_code == 200:
data = response.json()
LOGGER.debug('data: %s', str(data))
version = data[u'version'][u'number']
else:
version = 0
# then set the hash propery
page_json = {
"value": {
"sha256": sha
},
"version": {
"number": version + 1,
"minorEdit": True
}
}
LOGGER.debug('data: %s', json.dumps(page_json))
response = common.make_request_put(url, data=json.dumps(page_json))
return True
def create_dir_landing_page(self, dir_landing_page_file, ancestors):
"""
Create landing page for a directory
:param dir_landing_page_file: the raw markdown file to use for landing page html generation
:param ancestors: the ancestor pages of the new landing page
:return: the created landing page id
"""
landing_page_title = FILE_API.get_title(dir_landing_page_file)
html = FILE_API.get_html(dir_landing_page_file)
if SIMULATE:
common.log_html(html, landing_page_title)
return []
return self.create_or_update_page(landing_page_title, html, \
ancestors, dir_landing_page_file)
def create_trash(self):
"""
Create a __ORPHAN__ folder under the root ancestor
"""
file = tempfile.NamedTemporaryFile(mode='w', delete=False)
file.write('''# __ORPHAN__
<p>~!Files under this folder are NOT present in the source repo and and were moved here in lieu of deletion.!~</p>
If these files are no longer needed, it is safe to delete this folder.
''')
file.close()
title = FILE_API.get_title(file.name)
html = FILE_API.get_html(file.name)
root_ancestors = common.get_page_as_ancestor(ANCESTOR)
page_id = self.create_or_update_page(title, html, root_ancestors, file.name)
return page_id
PAGE_API = _PageApi()
| 36.270557
| 114
| 0.546365
| 13,036
| 0.953342
| 0
| 0
| 0
| 0
| 0
| 0
| 4,296
| 0.314173
|
4d37f40cf39b5e290df6e5f9680f28b3b0ec78f5
| 5,918
|
py
|
Python
|
testing/tests/data_handling/test_predict.py
|
JSKenyon/QuartiCal
|
2113855b080cfecc4a1c77cc9dad346ef3619716
|
[
"MIT"
] | null | null | null |
testing/tests/data_handling/test_predict.py
|
JSKenyon/QuartiCal
|
2113855b080cfecc4a1c77cc9dad346ef3619716
|
[
"MIT"
] | null | null | null |
testing/tests/data_handling/test_predict.py
|
JSKenyon/QuartiCal
|
2113855b080cfecc4a1c77cc9dad346ef3619716
|
[
"MIT"
] | 1
|
2022-03-18T14:30:04.000Z
|
2022-03-18T14:30:04.000Z
|
from copy import deepcopy
import pytest
from quartical.data_handling.predict import (parse_sky_models,
daskify_sky_model_dict,
get_support_tables)
import dask.array as da
import numpy as np
from numpy.testing import assert_array_almost_equal
expected_clusters = {"DIE": {"point": 22, "gauss": 24},
"B290": {"point": 1, "gauss": 2},
"C242": {"point": 0, "gauss": 1},
"G195": {"point": 0, "gauss": 1},
"H194": {"point": 0, "gauss": 2},
"I215": {"point": 0, "gauss": 1},
"R283": {"point": 1, "gauss": 0},
"V317": {"point": 0, "gauss": 1}}
@pytest.fixture(params=["", "@dE"], ids=["di", "dd"],
scope="module")
def raw_model_recipe(request, lsm_name):
return lsm_name + request.param
@pytest.fixture(scope="module")
def model_opts(base_opts, raw_model_recipe, beam_name):
model_opts = deepcopy(base_opts.input_model)
model_opts.recipe = raw_model_recipe
model_opts.beam = beam_name + "/JVLA-L-centred-$(corr)_$(reim).fits"
model_opts.beam_l_axis = "-X"
model_opts.beam_m_axis = "Y"
return model_opts
@pytest.fixture(scope="module")
def ms_opts(base_opts, freq_chunk, time_chunk):
ms_opts = deepcopy(base_opts.input_ms)
ms_opts.freq_chunk = freq_chunk
ms_opts.time_chunk = time_chunk
return ms_opts
@pytest.fixture(scope="function")
def sky_model_dict(recipe):
return parse_sky_models(recipe.ingredients.sky_models)
@pytest.fixture(scope="function")
def dask_sky_dict(sky_model_dict):
return daskify_sky_model_dict(sky_model_dict, 10)
@pytest.fixture(scope="module")
def support_tables(ms_name):
return get_support_tables(ms_name)
# -----------------------------parse_sky_models--------------------------------
@pytest.mark.predict
@pytest.mark.parametrize("source_fields", [
("point", ["radec", "stokes", "spi", "ref_freq"]),
("gauss", ["radec", "stokes", "spi", "ref_freq", "shape"])
])
def test_expected_fields(sky_model_dict, source_fields):
# Check that we have all the fields we expect.
source_type, fields = source_fields
check = True
for clusters in sky_model_dict.values():
for cluster in clusters.values():
for field in fields:
if source_type in cluster:
check &= field in cluster[source_type]
assert check
@pytest.mark.predict
@pytest.mark.parametrize("source_fields", [
("point", ["radec", "stokes", "spi", "ref_freq"]),
("gauss", ["radec", "stokes", "spi", "ref_freq", "shape"])
])
def test_nsource(sky_model_dict, source_fields):
# Check for the expected number of point sources.
source_type, fields = source_fields
expected_n_source = [s[source_type] for s in expected_clusters.values()]
for field in fields:
n_source = [len(cluster.get(source_type, {field: []})[field])
for clusters in sky_model_dict.values()
for cluster in clusters.values()]
if len(n_source) == 1:
expected_n_source = [sum(expected_n_source)]
assert n_source == expected_n_source
# -------------------------daskify_sky_model_dict------------------------------
@pytest.mark.predict
def test_chunking(dask_sky_dict):
# Check for consistent chunking.
check = True
for sky_model_name, sky_model in dask_sky_dict.items():
for cluster_name, cluster in sky_model.items():
for source_type, sources in cluster.items():
for arr in sources.values():
check &= all([c <= 10 for c in arr.chunks[0]])
assert check is True
# ----------------------------get_support_tables-------------------------------
@pytest.mark.predict
@pytest.mark.parametrize("table", ["ANTENNA", "DATA_DESCRIPTION", "FIELD",
"SPECTRAL_WINDOW", "POLARIZATION"])
def test_support_fields(support_tables, table):
# Check that we have all expected support tables.
assert table in support_tables
@pytest.mark.predict
@pytest.mark.parametrize("table", ["ANTENNA"])
def test_lazy_tables(support_tables, table):
# Check that the antenna table is lazily evaluated.
assert all([isinstance(dvar.data, da.Array)
for dvar in support_tables[table][0].data_vars.values()])
@pytest.mark.predict
@pytest.mark.parametrize("table", ["DATA_DESCRIPTION", "FIELD",
"SPECTRAL_WINDOW", "POLARIZATION"])
def test_nonlazy_tables(support_tables, table):
# Check that the expected tables are not lazily evaluated.
assert all([isinstance(dvar.data, np.ndarray)
for dvar in support_tables[table][0].data_vars.values()])
# ---------------------------------predict-------------------------------------
# NOTE: No coverage attempt is made for the predict internals copied from
# https://github.com/ska-sa/codex-africanus. This is because the majority
# of this functionality should be tested by codex-africanus. We do check that
# both the direction-independent predict and direction-dependent predict work
# for a number of different input values.
@pytest.mark.predict
def test_predict(predicted_xds_list, data_xds_list_w_model_col):
# Check that the predicted visibilities are consistent with the MeqTrees
# visibilities stored in MODEL_DATA.
for xds_ind in range(len(predicted_xds_list)):
predicted_vis = predicted_xds_list[xds_ind].MODEL_DATA.data
predicted_vis = predicted_vis.sum(axis=2) # Sum over directions.
expected_vis = data_xds_list_w_model_col[xds_ind].MODEL_DATA.data
assert_array_almost_equal(predicted_vis, expected_vis)
# -----------------------------------------------------------------------------
| 32.163043
| 79
| 0.622339
| 0
| 0
| 0
| 0
| 4,349
| 0.734877
| 0
| 0
| 1,765
| 0.298243
|
4d391ca815462113e85dde20f4caa4e28b604358
| 300
|
py
|
Python
|
HACKERRANK_Numpy/concatenated.py
|
StefaniaSferragatta/ADM2020-HW1
|
8f85ac1c8dd4bff52c5c17987c9e96b209a93830
|
[
"MIT"
] | null | null | null |
HACKERRANK_Numpy/concatenated.py
|
StefaniaSferragatta/ADM2020-HW1
|
8f85ac1c8dd4bff52c5c17987c9e96b209a93830
|
[
"MIT"
] | null | null | null |
HACKERRANK_Numpy/concatenated.py
|
StefaniaSferragatta/ADM2020-HW1
|
8f85ac1c8dd4bff52c5c17987c9e96b209a93830
|
[
"MIT"
] | null | null | null |
import numpy
N,M,P = map(int,input().split())
p_cols1 =numpy.array([input().split() for _ in range(N)],int)
p_cols1.shape = (N,P)
p_cols2 =numpy.array([input().split() for _ in range(M)],int)
p_cols2.shape = (M,P)
concatenated = numpy.concatenate((p_cols1, p_cols2), axis = 0)
print(concatenated)
| 25
| 62
| 0.686667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
4d3a0734d340535665b3ebc270eb897a3c7611c7
| 4,171
|
py
|
Python
|
Discord Status Changer.py
|
vragonx/DiscordStatusChanger
|
376a78e5653f99d266a0a45ac3ecc8d71159bd49
|
[
"Apache-2.0"
] | null | null | null |
Discord Status Changer.py
|
vragonx/DiscordStatusChanger
|
376a78e5653f99d266a0a45ac3ecc8d71159bd49
|
[
"Apache-2.0"
] | null | null | null |
Discord Status Changer.py
|
vragonx/DiscordStatusChanger
|
376a78e5653f99d266a0a45ac3ecc8d71159bd49
|
[
"Apache-2.0"
] | 1
|
2021-06-06T07:24:14.000Z
|
2021-06-06T07:24:14.000Z
|
from colorama import Fore, init, Style
import requests
import random
import ctypes
import time
import os
ctypes.windll.kernel32.SetConsoleTitleW('Discord Status Changer')
init(convert=True, autoreset=True)
SuccessCounter = 0
ErrorCounter = 0
os.system('cls')
print(Fore.RED + '\n[' + Fore.WHITE + Style.BRIGHT + '0' + Style.RESET_ALL + Fore.RED + '] ' + Fore.WHITE + Style.BRIGHT + 'Discord Status Changer by vragon')
print(Fore.GREEN + '\n[' + Fore.WHITE + Style.BRIGHT + '1' + Style.RESET_ALL + Fore.GREEN + '] ' + Fore.WHITE + Style.BRIGHT + 'Text')
print(Fore.GREEN + '[' + Fore.WHITE + Style.BRIGHT + '2' + Style.RESET_ALL + Fore.GREEN + '] ' + Fore.WHITE + Style.BRIGHT + 'Text including emoji')
try:
option = int(input(Fore.GREEN + '\n> ' + Fore.WHITE + Style.BRIGHT))
except ValueError as e:
print(' ')
print(Fore.RED + '[ERROR] ' + Fore.WHITE + Style.BRIGHT + str(e))
input()
quit()
if option == 1:
os.system('cls')
print(Fore.WHITE + Style.BRIGHT + '\nToken:')
token = str(input(Fore.GREEN + '> ' + Fore.WHITE + Style.BRIGHT))
print(' ')
def ChangeStatus():
global SuccessCounter
global ErrorCounter
try:
session = requests.Session()
headers = {
'authorization': token,
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/0.0.306 Chrome/78.0.3904.130 Electron/7.1.11 Safari/537.36',
'content-type': 'application/json'
}
text = random.choice(['Text1', 'Text2', 'Text3'])
data = '{"custom_status":{"text":"' + text + '"}}'
r = session.patch('https://discordapp.com/api/v6/users/@me/settings', headers=headers, data=data)
if '"custom_status": {"text": "' in r.text:
print(Fore.GREEN + '[SUCCESS] ' + Fore.WHITE + Style.BRIGHT + 'Status changed: ' + str(text))
SuccessCounter += 1
ctypes.windll.kernel32.SetConsoleTitleW('Discord Status Changer | Success: ' + str(SuccessCounter) + ' | Errors: ' + str(ErrorCounter))
else:
print(r.text)
except:
pass
time.sleep(1)
while True:
ChangeStatus()
elif option == 2:
os.system('cls')
print(Fore.WHITE + Style.BRIGHT + '\nToken:')
token = str(input(Fore.GREEN + '> ' + Fore.WHITE + Style.BRIGHT))
print(Fore.WHITE + Style.BRIGHT + '\nEmoji name:')
EmojiName = str(input(Fore.GREEN + '> ' + Fore.WHITE + Style.BRIGHT))
print(Fore.WHITE + Style.BRIGHT + '\nEmoji ID:')
try:
EmojiID = int(input(Fore.GREEN + '> ' + Fore.WHITE + Style.BRIGHT))
except ValueError as e:
print(' ')
print(Fore.RED + '[ERROR] ' + Fore.WHITE + Style.BRIGHT + str(e))
input()
quit()
print(' ')
def ChangeStatus():
global SuccessCounter
global ErrorCounter
try:
session = requests.Session()
headers = {
'authorization': token,
'user-agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/0.0.306 Chrome/78.0.3904.130 Electron/7.1.11 Safari/537.36',
'content-type': 'application/json'
}
text = random.choice(['Text1', 'Text2', 'Text3'])
data = '{"custom_status":{"text":"' + text + '","emoji_id":"' + str(EmojiID) + '","emoji_name":"' + str(EmojiName) + '"}}'
r = session.patch('https://discordapp.com/api/v6/users/@me/settings', headers=headers, data=data)
if 'custom_status' in r.text:
print(Fore.GREEN + '[SUCCESS] ' + Fore.WHITE + Style.BRIGHT + 'Status changed: ' + str(text))
SuccessCounter += 1
ctypes.windll.kernel32.SetConsoleTitleW('Discord Status Changer | Success: ' + str(SuccessCounter) + ' | Errors: ' + str(ErrorCounter))
else:
print(r.text)
except:
pass
time.sleep(1)
while True:
ChangeStatus()
| 45.336957
| 176
| 0.562455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,087
| 0.260609
|
4d3c620a15280505542a7dd73460b5056d95dccf
| 1,269
|
py
|
Python
|
hw1/feature_summary.py
|
doochi/gct634-ai613-2021
|
af12a1ea9c622fca17928f8431cc0983470f97db
|
[
"MIT"
] | 9
|
2021-09-04T04:11:47.000Z
|
2022-01-06T13:00:32.000Z
|
hw1/feature_summary.py
|
doochi/gct634-ai613-2021
|
af12a1ea9c622fca17928f8431cc0983470f97db
|
[
"MIT"
] | null | null | null |
hw1/feature_summary.py
|
doochi/gct634-ai613-2021
|
af12a1ea9c622fca17928f8431cc0983470f97db
|
[
"MIT"
] | 19
|
2021-09-12T10:13:09.000Z
|
2022-01-28T01:37:42.000Z
|
# GCT634 (2018) HW1
#
# Mar-18-2018: initial version
#
# Juhan Nam
#
import sys
import os
import numpy as np
import matplotlib.pyplot as plt
data_path = './dataset/'
mfcc_path = './mfcc/'
MFCC_DIM = 20
def mean_mfcc(dataset='train'):
f = open(data_path + dataset + '_list.txt','r')
if dataset == 'train':
mfcc_mat = np.zeros(shape=(MFCC_DIM, 1100))
else:
mfcc_mat = np.zeros(shape=(MFCC_DIM, 300))
i = 0
for file_name in f:
# load mfcc file
file_name = file_name.rstrip('\n')
file_name = file_name.replace('.wav','.npy')
mfcc_file = mfcc_path + file_name
mfcc = np.load(mfcc_file)
# mean pooling
temp = np.mean(mfcc, axis=1)
mfcc_mat[:,i]= np.mean(mfcc, axis=1)
i = i + 1
f.close()
return mfcc_mat
if __name__ == '__main__':
train_data = mean_mfcc('train')
valid_data = mean_mfcc('valid')
plt.figure(1)
plt.subplot(2,1,1)
plt.imshow(train_data, interpolation='nearest', origin='lower', aspect='auto')
plt.colorbar(format='%+2.0f dB')
plt.subplot(2,1,2)
plt.imshow(valid_data, interpolation='nearest', origin='lower', aspect='auto')
plt.colorbar(format='%+2.0f dB')
plt.show()
| 18.940299
| 82
| 0.597321
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 250
| 0.197006
|
4d3c97fcf24ccce0fd906a45948aebe7fed32f87
| 6,685
|
py
|
Python
|
package/kedro_viz/services/layers.py
|
pascalwhoop/kedro-viz
|
5fd8dd8033da5f3d37c80a7adb51b60fd8daa64d
|
[
"BSD-3-Clause-Clear",
"Apache-2.0"
] | 246
|
2019-07-08T15:27:34.000Z
|
2022-01-09T18:47:11.000Z
|
package/kedro_viz/services/layers.py
|
pascalwhoop/kedro-viz
|
5fd8dd8033da5f3d37c80a7adb51b60fd8daa64d
|
[
"BSD-3-Clause-Clear",
"Apache-2.0"
] | 222
|
2019-07-08T14:07:13.000Z
|
2022-01-06T19:19:27.000Z
|
package/kedro_viz/services/layers.py
|
pascalwhoop/kedro-viz
|
5fd8dd8033da5f3d37c80a7adb51b60fd8daa64d
|
[
"BSD-3-Clause-Clear",
"Apache-2.0"
] | 53
|
2019-07-14T14:06:47.000Z
|
2021-12-06T22:21:46.000Z
|
# Copyright 2021 QuantumBlack Visual Analytics Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, AND
# NONINFRINGEMENT. IN NO EVENT WILL THE LICENSOR OR OTHER CONTRIBUTORS
# BE LIABLE FOR ANY CLAIM, DAMAGES, OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF, OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#
# The QuantumBlack Visual Analytics Limited ("QuantumBlack") name and logo
# (either separately or in combination, "QuantumBlack Trademarks") are
# trademarks of QuantumBlack. The License does not grant you any right or
# license to the QuantumBlack Trademarks. You may not use the QuantumBlack
# Trademarks or any confusingly similar mark as a trademark for your product,
# or use the QuantumBlack Trademarks in any other manner that might cause
# confusion in the marketplace, including but not limited to in advertising,
# on websites, or on software.
#
# See the License for the specific language governing permissions and
# limitations under the License.
"""`kedro_viz.services.layers` defines layers-related logic."""
import logging
from collections import defaultdict
from typing import Dict, List, Set
from toposort import CircularDependencyError, toposort_flatten
from kedro_viz.models.graph import GraphNode
logger = logging.getLogger(__name__)
def sort_layers(
nodes: Dict[str, GraphNode], dependencies: Dict[str, Set[str]]
) -> List[str]:
"""Given a DAG represented by a dictionary of nodes, some of which have a `layer` attribute,
along with their dependencies, return the list of all layers sorted according to
the nodes' topological order, i.e. a layer should appear before another layer in the list
if its node is a dependency of the other layer's node, directly or indirectly.
For example, given the following graph:
node1(layer=a) -> node2 -> node4 -> node6(layer=d)
| ^
v |
node3(layer=b) -> node5(layer=c)
The layers ordering should be: [a, b, c, d]
In theory, this is a problem of finding the
[transitive closure](https://en.wikipedia.org/wiki/Transitive_closure) in a graph of layers
and then toposort them. The algorithm below follows a repeated depth-first search approach:
* For every node, find all layers that depends on it in a depth-first search.
* While traversing, build up a dictionary of {node_id -> layers} for the node
that have already been visited.
* Turn the final {node_id -> layers} into a {layer -> layers} to represent the layers'
dependencies. Note: the key is a layer and the values are the parents of that layer,
just because that's the format toposort requires.
* Feed this layers dictionary to ``toposort`` and return the sorted values.
* Raise CircularDependencyError if the layers cannot be sorted topologically,
i.e. there are cycles among the layers.
Args:
nodes: A dictionary of {node_id -> node} represents the nodes in the graph.
dependencies: A dictionary of {node_id -> set(child_ids)}
represents the direct dependencies between nodes in the graph.
Returns:
The list of layers sorted based on topological order.
Raises:
CircularDependencyError: When the layers have cyclic dependencies.
"""
node_layers: Dict[str, Set[str]] = {} # map node_id to the layers that depend on it
def find_child_layers(node_id: str) -> Set[str]:
"""For the given node_id, find all layers that depend on it in a depth-first manner.
Build up the node_layers dependency dictionary while traversing so each node is visited
only once.
Note: Python's default recursive depth limit is 1000, which means this algorithm won't
work for pipeline with more than 1000 nodes. However, we can rewrite this using stack if
we run into this limit in practice.
"""
if node_id in node_layers:
return node_layers[node_id]
node_layers[node_id] = set()
# The layer of the current node can also be considered as depending on that node.
# This is to cater for the edge case where all nodes are completely disjoint from each other
# and no dependency graph for layers can be constructed,
# yet the layers still need to be displayed.
node_layer = getattr(nodes[node_id], "layer", None)
if node_layer is not None:
node_layers[node_id].add(node_layer)
# for each child node of the given node_id,
# mark its layer and all layers that depend on it as child layers of the given node_id.
for child_node_id in dependencies[node_id]:
child_node = nodes[child_node_id]
child_layer = getattr(child_node, "layer", None)
if child_layer is not None:
node_layers[node_id].add(child_layer)
node_layers[node_id].update(find_child_layers(child_node_id))
return node_layers[node_id]
# populate node_layers dependencies
for node_id in nodes:
find_child_layers(node_id)
# compute the layer dependencies dictionary based on the node_layers dependencies,
# represented as {layer -> set(parent_layers)}
layer_dependencies = defaultdict(set)
for node_id, child_layers in node_layers.items():
node_layer = getattr(nodes[node_id], "layer", None)
# add the node's layer as a parent layer for all child layers.
# Even if a child layer is the same as the node's layer, i.e. a layer is marked
# as its own parent, toposort still works so we don't need to check for that explicitly.
if node_layer is not None:
for layer in child_layers:
layer_dependencies[layer].add(node_layer)
# toposort the layer_dependencies to find the layer order.
# Note that for string, toposort_flatten will default to alphabetical order for tie-break.
try:
return toposort_flatten(layer_dependencies)
except CircularDependencyError:
logger.warning(
"Layers visualisation is disabled as circular dependency detected among layers."
)
return []
| 48.093525
| 100
| 0.698579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 4,986
| 0.745849
|
4d3cec7b76ef0e1f26a6a2ea0b4008e98f8e6357
| 6,941
|
py
|
Python
|
fixed_width_gen.py
|
pradnyaalc/fixed_width_file_generation
|
989eb34f57a6061f89c4889ec1c3db3a45b86723
|
[
"Apache-2.0"
] | null | null | null |
fixed_width_gen.py
|
pradnyaalc/fixed_width_file_generation
|
989eb34f57a6061f89c4889ec1c3db3a45b86723
|
[
"Apache-2.0"
] | null | null | null |
fixed_width_gen.py
|
pradnyaalc/fixed_width_file_generation
|
989eb34f57a6061f89c4889ec1c3db3a45b86723
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# importing libraries
import json
from copy import deepcopy
from decimal import Decimal
import time
class FixedWidth:
def get_config(self, config_file):
"""
reads the json specification file and returns a dictionary
:param config_file: json
:return: dict
"""
with open(config_file) as json_file:
data = json.load(json_file)
return data
def __init__(self, config_file, **kwargs):
self.config = self.get_config(config_file)
self.fixed_width_encoding = self.config['FixedWidthEncoding']
self.header = self.config['IncludeHeader']
self.delimited_encoding = self.config['DelimitedEncoding']
self.data = {}
# '\n' to append at the end of each fixed with line
self.line_end = kwargs.pop('line_end', '\n')
self.file = open(kwargs['data_file_name'], 'w', encoding=self.fixed_width_encoding)
# check for required attributes in the json specification
if any([x not in self.config.keys() for x in
('ColumnNames', 'Offsets', 'FixedWidthEncoding', 'IncludeHeader', 'DelimitedEncoding')]):
raise ValueError(
"Not all required attributes are provided for generating the fixed width file")
#check if the number of columns and the number of offsets are equal
if len(self.config['ColumnNames']) != len(self.config['Offsets']):
raise ValueError(
"Number of fields and the number of offsets should be equal"
)
for key, value in self.config.items():
# check the type of the attribute values in the config file
if isinstance(value, list):
if not all(isinstance(x, str) for x in value):
raise ValueError(
"The elements in %s have invalid type. Allowed: 'string'" % (key))
elif not isinstance(value, str):
raise ValueError(
"Invalid value type for %s. Allowed: 'string'" % (key))
# generate a list of columns along with their lengths/offsets
field_list = []
for i in range(len(self.config['ColumnNames'])):
field_list.append((self.config['ColumnNames'][i], int(self.config['Offsets'][i])))
self.fields = deepcopy(field_list)
def update(self, data_values):
self.data.update(data_values)
def validate_data(self):
"""
checks whether the given data is valid or invalid
:return: Boolean
"""
for field in self.fields:
field_name = field[0]
length = field[1]
# check if the required field names are present in the data
if field_name in self.data:
data = self.data[field_name]
# ensure value passed in is not too long for the field
field_data = self.format_field(field_name)
if len(str(field_data)) > length:
raise ValueError("%s is too long (limited to %d \
characters)." % (field_name, length))
else: # no value passed in
# if required but not provided
if field_name in self.config["ColumnNames"]:
raise ValueError("Field %s is required, but was \
not provided." % (field_name,))
return True
def format_field(self, field):
"""
format the data for each field and convert them into string
:param field: input the field_name
:return: string format of the data corresponding to field name
"""
data = self.data[field]
if data is None:
return ''
return str(data)
def build_line(self):
"""
Build fixed width line depending upon the lengths mentioned in config
:return: line: fixed width line
"""
self.validate_data()
line = ''
# add header if true
if self.header:
for x in self.fields:
dat = x[0]
justify = dat.ljust
dat = justify(x[1], " ")
line += dat
line += self.line_end
self.header = False
for x in self.fields:
field = x[0]
length = x[1]
if field in self.data:
dat = self.format_field(field)
else:
dat = ''
# left justify the string
justify = dat.ljust
dat = justify(length, " ")
line += dat
return line + self.line_end
def write_file(self):
"""
write the fixed width line into the file with specified encoding
:return:
"""
line = self.build_line()
self.file.write(line)
def close_file(self):
self.file.close()
def parser(self, data_file, csv_file):
"""
Parse the given fixed width file and convert it into csv file with given encoding
:param data_file: fixed with file
:param csv_file: csv file name to generate
:return:
"""
try:
read_file = open(data_file, 'r', encoding=self.fixed_width_encoding)
except IOError:
raise IOError("Could not read the file %s" % (data_file))
try:
write_file = open(csv_file, 'w', encoding=self.delimited_encoding)
except IOError:
raise IOError("Could not write to the file %s" % (csv_file))
for line in read_file:
parts = []
counter = 0
for field in self.fields:
parts.append(line[counter:counter + field[1]].strip())
counter += field[1]
write_file.write(",".join(parts) + "\n")
read_file.close()
write_file.close()
def main():
data = [{"f1": "Ms", "f2": "Michael", "f3": 32, "f4": "vr", "f5": Decimal('40.7128'),
"f6": Decimal('-74.005'), "f7": -100, "f8": Decimal('1.0001'), "f9": "abcdefg1234###q", "f10": "Pradnya"},
{"f1": "Mr", "f2": "Smith", "f3": 32, "f4": "r", "f5": Decimal('38.7128'),
"f6": Decimal('-64.005'), "f7": -130, "f8": Decimal('1.0001'), "f9": "abcdefg1234###q", "f10": "Alchetti"}]
config_file = "spec.json"
fx = FixedWidth(config_file, data_file_name='fixed_width_file.txt')
for each in data:
fx.update(each)
fx.write_file()
fx.close_file()
fx.parser("fixed_width_file.txt", "fixed_width_file_csv.csv")
while True:
print("Done converting and parsing")
#time.sleep(300)
if __name__ == '__main__':
main()
| 33.370192
| 121
| 0.53998
| 5,968
| 0.859818
| 0
| 0
| 0
| 0
| 0
| 0
| 2,518
| 0.362772
|
4d3cfc02ebd8ee1182122794d381b0a0a452d148
| 855
|
py
|
Python
|
52digest.py
|
Ferdandez/homework
|
07df8e0c63e93773e7fc354bfb4e6ae301d49124
|
[
"MIT"
] | null | null | null |
52digest.py
|
Ferdandez/homework
|
07df8e0c63e93773e7fc354bfb4e6ae301d49124
|
[
"MIT"
] | null | null | null |
52digest.py
|
Ferdandez/homework
|
07df8e0c63e93773e7fc354bfb4e6ae301d49124
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# 52digest.py
import re
import sys
# Write a program that performs an EcoRI digest on the SARS-COV2 genome
# The program should have 2 arguments
# 1. The genome file
# 2. The restriction pattern
# The output should be the sizes of the restriction fragments
originseen = False
seq = ''
digest = sys.argv[2]
filename = sys.argv[1]
with open(filename) as fp:
for line in fp.readlines():
if line.startswith('ORIGIN'): originseen = True
if originseen:
words = line.split()
seq += ''.join(words[1:])
#print(len(seq))
count = 0
k = len(sys.argv[2])
match = re.search(digest, seq)
for i in range(len(seq)-k+1):
scope = seq[i:i+k]
if scope == "gaattc": print(count)
if scope == "gaattc": count = 0
count += 1
"""
python3 52digest.py ../Data/sars-cov2.gb gaattc
1160
10573
5546
448
2550
2592
3569
2112
1069
"""
| 19
| 71
| 0.679532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 402
| 0.470175
|
4d3d62f955634f9c834d309435153be67f95acc0
| 2,394
|
py
|
Python
|
yt_dlp/extractor/willow.py
|
mrBliss/yt-dlp
|
aecd021656b672dbb617e5bae54a8986f9c4ebaf
|
[
"Unlicense"
] | 80
|
2021-05-25T11:33:49.000Z
|
2022-03-29T20:36:53.000Z
|
yt_dlp/extractor/willow.py
|
mrBliss/yt-dlp
|
aecd021656b672dbb617e5bae54a8986f9c4ebaf
|
[
"Unlicense"
] | 53
|
2017-04-12T19:53:18.000Z
|
2022-02-22T10:33:13.000Z
|
yt_dlp/extractor/willow.py
|
mrBliss/yt-dlp
|
aecd021656b672dbb617e5bae54a8986f9c4ebaf
|
[
"Unlicense"
] | 22
|
2021-05-07T05:01:27.000Z
|
2022-03-26T19:10:54.000Z
|
# coding: utf-8
from ..utils import ExtractorError
from .common import InfoExtractor
class WillowIE(InfoExtractor):
_VALID_URL = r'https?://(www\.)?willow\.tv/videos/(?P<id>[0-9a-z-_]+)'
_GEO_COUNTRIES = ['US']
_TESTS = [{
'url': 'http://willow.tv/videos/d5winning-moment-eng-vs-ind-streaming-online-4th-test-india-tour-of-england-2021',
'info_dict': {
'id': '169662',
'display_id': 'd5winning-moment-eng-vs-ind-streaming-online-4th-test-india-tour-of-england-2021',
'ext': 'mp4',
'title': 'Winning Moment: 4th Test, England vs India',
'thumbnail': 'https://aimages.willow.tv/ytThumbnails/6748_D5winning_moment.jpg',
'duration': 233,
'timestamp': 1630947954,
'upload_date': '20210906',
'location': 'Kennington Oval, London',
'series': 'India tour of England 2021',
},
'params': {
'skip_download': True, # AES-encrypted m3u8
},
}, {
'url': 'http://willow.tv/videos/highlights-short-ind-vs-nz-streaming-online-2nd-t20i-new-zealand-tour-of-india-2021',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_data = self._parse_json(self._html_search_regex(
r'var\s+data_js\s*=\s*JSON\.parse\(\'(.+)\'\)', webpage,
'data_js'), video_id)
video = next((v for v in video_data.get('trending_videos') or []
if v.get('secureurl')), None)
if not video:
raise ExtractorError('No videos found')
formats = self._extract_m3u8_formats(video['secureurl'], video_id, 'mp4')
self._sort_formats(formats)
return {
'id': str(video.get('content_id')),
'display_id': video.get('video_slug'),
'title': video.get('video_name') or self._html_search_meta('twitter:title', webpage),
'formats': formats,
'thumbnail': video.get('yt_thumb_url') or self._html_search_meta(
'twitter:image', webpage, default=None),
'duration': video.get('duration_seconds'),
'timestamp': video.get('created_date'),
'location': video.get('venue'),
'series': video.get('series_name'),
}
| 40.576271
| 125
| 0.581036
| 2,306
| 0.963241
| 0
| 0
| 0
| 0
| 0
| 0
| 1,059
| 0.442356
|
4d3d9898f84561406d75802e9d8427f790dcd648
| 32
|
py
|
Python
|
vesicashapi/vesicash.py
|
vesicash/vesicash-python-sdk
|
0a665e302b88c4eeb316a635c5485c9c3c1fffeb
|
[
"Apache-2.0"
] | null | null | null |
vesicashapi/vesicash.py
|
vesicash/vesicash-python-sdk
|
0a665e302b88c4eeb316a635c5485c9c3c1fffeb
|
[
"Apache-2.0"
] | null | null | null |
vesicashapi/vesicash.py
|
vesicash/vesicash-python-sdk
|
0a665e302b88c4eeb316a635c5485c9c3c1fffeb
|
[
"Apache-2.0"
] | 1
|
2021-04-20T14:54:40.000Z
|
2021-04-20T14:54:40.000Z
|
""" Entry point defined here """
| 32
| 32
| 0.65625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 32
| 1
|
4d3e1930813427009e7819522279ea71f06fd637
| 4,044
|
py
|
Python
|
src/dissregarded/NetArivalsDeparturesHoures.py
|
sebastian-konicz/WRM
|
e60aafada7bb85df955a7e1357f33fe5846f4e6c
|
[
"MIT"
] | 1
|
2019-11-02T18:24:19.000Z
|
2019-11-02T18:24:19.000Z
|
src/dissregarded/NetArivalsDeparturesHoures.py
|
sebastian-konicz/WRM
|
e60aafada7bb85df955a7e1357f33fe5846f4e6c
|
[
"MIT"
] | 7
|
2020-03-24T17:48:28.000Z
|
2022-03-12T00:05:13.000Z
|
src/dissregarded/NetArivalsDeparturesHoures.py
|
sebastian-konicz/WRM
|
e60aafada7bb85df955a7e1357f33fe5846f4e6c
|
[
"MIT"
] | null | null | null |
import folium
from folium.plugins import MarkerCluster
import pandas as pd
import datetime
from pathlib import Path
# pd.options.display.max_columns = 50
def main(dir):
# Loading Data Set
print("Loading dataset")
RentalData = pd.read_csv(dir + r'\data\processed\RentalData2015.csv')
# Changind the StartDate and EndDate to datetime format
RentalData["StartDate"] = pd.to_datetime(RentalData["StartDate"])
RentalData["EndDate"] = pd.to_datetime(RentalData["EndDate"])
RentalData['hour_start'] = RentalData["StartDate"].map(lambda x: x.hour)
RentalData['hour_end'] = RentalData["StartDate"].map(lambda x: x.hour)
# Excluding form computation trips made between the same station
SameTrips = RentalData[RentalData['StartStation'] == RentalData['EndStation']]
SameTrips = SameTrips.reset_index(drop=True)
print(SameTrips.tail())
RentalData = RentalData[RentalData['StartStation'] != RentalData['EndStation']]
RentalData = RentalData.reset_index(drop=True)
print(RentalData.tail())
def get_trip_counts_by_hour(hour):
# Locations of datastations
locations = RentalData.groupby("s_number").first()
locations = locations[["StartStation", "s_lat", "s_lng"]]
# Time of day
subset_start = RentalData[(RentalData['hour_start'] >= hour) & (RentalData['hour_start'] <= 9)]
subset_end = RentalData[(RentalData['hour_start'] >= hour) & (RentalData['hour_start'] <= 9)]
# Counting trips FROM docking station (departures)
departure_counts = subset_start.groupby("StartStation").count().iloc[:, [0]]
departure_counts.columns = ['DepartureCount']
# Counting trips TO docking station (arrivals)
arrival_counts = subset_end.groupby("EndStation").count().iloc[:, [0]]
arrival_counts.columns = ["ArrivalCount"]
# Joining departure counts and arrival counts
trip_counts = departure_counts.join(arrival_counts)
# Merging with locations to get latitude and longitude of station
trip_counts = pd.merge(trip_counts, locations, on="StartStation")
# trip_counts.to_csv(dir + r'\data\processed\TripCounts.csv', encoding='utf-8', index=False)
return trip_counts
def plot_station_counts(trip_counts):
# generate a new map
folium_map = folium.Map(location=[51.099783, 17.03082], zoom_start=13, tiles="CartoDB dark_matter")
# For each row in the data, add a cicle marker
for index, row in trip_counts.iterrows():
# Calculate net departures
net_departures = (row["DepartureCount"] - row["ArrivalCount"])
# Popup message that is shown on click.
popuptext = "{}<br> total departures: {}<br> total arrivals: {}<br> net departures: {}"
popuptext = popuptext.format(row["StartStation"], row["DepartureCount"], row["ArrivalCount"], net_departures)
popup = folium.Popup(html=popuptext, max_width=250, min_width=150)
# Radius of circles
radius = abs(net_departures / 10)
# Color of the marker
if net_departures > 0:
# color="#FFCE00" # orange / # color="#007849" # green
color = "#E37222" # tangerine
else:
# color="#0375B4" # blue / # color="#FFCE00" # yellow
color = "#0A8A9F" # teal
# add marker to the map
folium.CircleMarker(location=(row["s_lat"], row["s_lng"]),
radius=radius,
color=color,
popup=popup,
fill=True).add_to(folium_map)
# Saving map to folder
folium_map.save(dir + r"\images\sites\NetArivalDepartures2015.html")
return folium_map
trip_counts = get_trip_counts_by_hour(5)
folium_map = plot_station_counts(trip_counts)
if __name__ == "__main__":
project_dir = str(Path(__file__).resolve().parents[2])
main(project_dir)
| 42.568421
| 121
| 0.638477
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,455
| 0.359792
|
4d3ee1ccb8692f8cfb3b7d31686fa015b7d46982
| 5,470
|
py
|
Python
|
bin/lineage_parser.py
|
brianlee99/UVP
|
5b7ff26c09d84760d4220268f34fb4814848eb4a
|
[
"MIT"
] | null | null | null |
bin/lineage_parser.py
|
brianlee99/UVP
|
5b7ff26c09d84760d4220268f34fb4814848eb4a
|
[
"MIT"
] | null | null | null |
bin/lineage_parser.py
|
brianlee99/UVP
|
5b7ff26c09d84760d4220268f34fb4814848eb4a
|
[
"MIT"
] | null | null | null |
#! /usr/bin/python
import sys
""" This script accepts the final annotation file and the lineage marker SNPs file """
""" and infers the lineage and possible sublineage classification of the isolate """
""" it requires a sample ID name (string) and an output file name(string) """
"""
Author: Matthew Ezewudo
CPTR ReSeqTB Project - Critical Path Institute
"""
input1 = sys.argv[1]
input2 = sys.argv[2]
input3 = sys.argv[3]
input4 = sys.argv[4]
fh1 = open(input1, 'r')
sublinn = ""
(lineage,position,ref,alt) = ([],[],[],[])
prevlin = []
prevsub = []
tribes = ["lineages","Indo-Oceanic","East-Asian","East-African-Indian","Euro-American","West-Africa 1","West-Africa 2","Ethiopian"]
(concord,discord,concord1,discord1,count) = (0,0,0,0,0)
discordance = False
sublinneage = False
linfour = ""
hrv37 = ""
BOV = ""
BOV_AFRI = ""
for lines in fh1:
if lines.startswith('#'):
continue
fields = lines.rstrip("\r\n").split("\t")
lineage.append(fields[0])
position.append(fields[1])
ref.append(fields[2])
alt.append(fields[3])
fh1.close()
fh2 = open(input2,'r')
for lines in fh2:
count += 1
fields = lines.rstrip("\r\n").split("\t")
if fields[2] == '931123':
linfour = fields[2]
if fields[2] == '1759252':
hrv37 = fields[2]
if fields[2] == '2831482':
BOV = fields[2]
if fields[2] == '1882180':
BOV_AFRI = '1882180'
if fields[2] in position:
ind = position.index(fields[2])
if alt[ind] == fields[4]:
if len(lineage[ind]) > 1:
sublin = lineage[ind]
prevsub.append(sublin)
sublinn = prevsub[0]
print "SNP" + " " + position[ind] + " " + "suggests sub-lineage: " + lineage[ind]
if prevsub[0] != sublin:
discord += 1
else:
concord +=1
for i in range(0,len(prevsub)):
if len(sublinn) < len(prevsub[i]) :
sublinn = prevsub[i]
else:
lin = lineage[ind]
prevlin.append(lin)
print "SNP" + " " + position[ind] + " " + "suggests lineage: " + lineage[ind]
if prevlin[0] != lin:
discord1 += 1
else:
concord1 += 1
fh2.close()
fh3 = open(input3,'w')
print >> fh3, "Sample ID" + "\t" + "Lineage" + "\t" + "Lineage Name" + "\t" + "Sublineage"
split_first = ['NA']
if len(prevsub) > 0:
split_first = sublinn.split(".")
sublinneage = True
if len(prevlin) == 0:
if len(BOV) > 0:
print "Lineage: " + "BOV"
print >> fh3, input4 + "\t" + "BOV" + "\t" + "Bovis" + "\t" + "NA"
if len(BOV) == 0 or len(BOV_AFRI) == 0:
for i in range(0,len(prevsub)):
split_lin = prevsub[i].split(".")
if split_lin[0] != split_first[0]:
discordance = True
if split_lin[1] != split_first[1]:
discordance = True
if discordance:
print "no precise lineage inferred"
print >> fh3, "no precise lineage inferred"
sys.exit(1)
else:
if len(split_first) > 1:
print "Lineage: " + split_first[0] + " : " + tribes[int(split_first[0])]
print "Sub-lineage: " + sublinn
print >> fh3, input4 + "\t" + split_first[0] + "\t" + tribes[int(split_first[0])] + "\t" + sublinn
elif len(linfour) < 2:
print "Absence of SNP 931123 suggests lineage 4"
print "Lineage: " + "4" + " : " + "Euro-American"
if len(hrv37) > 2:
print >> fh3, input4 + "\t" + "4" + "\t" + "Euro American" + "\t" + "NA"
elif len(hrv37) < 2:
print "Absence of SNP 1759252 suggests sublineage 4.9"
print >> fh3, input4 + "\t" + "4" + "\t" + "Euro American" + "\t" + "4.9"
else:
print "No Informative SNPs detected"
print >> fh3, "No Informative SNPs detected"
else:
if len(prevlin) > 1:
for j in range(0,len(prevlin)):
if prevlin[0] != prevlin[j]:
discordance = True
if discordance == True:
print "no concordance between predicted lineage and sublineage(s)"
print >> fh3, "no concordance between predicted lineage and sublineage(s)"
sys.exit(1)
else:
if len(sublinn) < 1:
print "Lineage: " + prevlin[0] + " " + tribes[int(prevlin[0])]
print >> fh3, input4 + "\t" + prevlin[0] + "\t" + tribes[int(prevlin[0])] + "\t" + "NA"
elif len(sublinn) > 1:
for i in range(0,len(prevsub)):
split_lin = prevsub[i].split(".")
if split_lin[0] != prevlin[0] and split_lin[0] != 'BOV_AFRI':
discordance = True
if split_lin[0] != split_first[0]:
discordance = True
if discordance:
print "no precise lineage inferred"
print >> fh3, "no precise lineage inferred"
sys.exit(1)
else:
print "Lineage: " + prevlin[0] + " " + tribes[int(prevlin[0])]
if sublinn.startswith('BOV_A'):
print >> fh3, input4 + "\t" + prevlin[0] + "\t" + tribes[int(prevlin[0])] + "\t" + "NA"
else:
print "Sub-lineage: " + sublinn
print >> fh3, input4 + "\t" + prevlin[0] + "\t" + tribes[int(prevlin[0])] + "\t" + sublinn
| 36.466667
| 131
| 0.518464
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,364
| 0.24936
|
4d3ef71c75b3b75d2218b109a8f46905d02e164e
| 10,038
|
py
|
Python
|
rdr_service/offline/metrics_export.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 39
|
2017-10-13T19:16:27.000Z
|
2021-09-24T16:58:21.000Z
|
rdr_service/offline/metrics_export.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
rdr_service/offline/metrics_export.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 19
|
2017-09-15T13:58:00.000Z
|
2022-02-07T18:33:20.000Z
|
from rdr_service import clock, config
from rdr_service.code_constants import EHR_CONSENT_QUESTION_CODE, PPI_SYSTEM, RACE_QUESTION_CODE, UNMAPPED
from rdr_service.dao.code_dao import CodeDao
from rdr_service.dao.database_utils import get_sql_and_params_for_array, replace_isodate
from rdr_service.dao.hpo_dao import HPODao
from rdr_service.field_mappings import NON_EHR_QUESTIONNAIRE_MODULE_FIELD_NAMES
from rdr_service.model.base import get_column_name
from rdr_service.model.participant_summary import ParticipantSummary
from rdr_service.offline.metrics_config import ANSWER_FIELD_TO_QUESTION_CODE
from rdr_service.offline.sql_exporter import SqlExporter
# from rdr_service.offline.metrics_pipeline import MetricsPipeline
from rdr_service.participant_enums import QuestionnaireStatus, TEST_EMAIL_PATTERN, TEST_HPO_NAME
# TODO: filter out participants that have withdrawn in here
_PARTICIPANTS_CSV = "participants_%d.csv"
_HPO_IDS_CSV = "hpo_ids_%d.csv"
_ANSWERS_CSV = "answers_%d.csv"
_ALL_CSVS = [_PARTICIPANTS_CSV, _HPO_IDS_CSV, _ANSWERS_CSV]
_QUEUE_NAME = "metrics-pipeline"
_PARTICIPANT_SQL_TEMPLATE = """
SELECT p.participant_id, ps.date_of_birth date_of_birth,
(SELECT ISODATE[MIN(bo.created)] FROM biobank_order bo
WHERE bo.participant_id = p.participant_id
AND bo.order_status is null or bo.order_status <> 2) first_order_date,
(SELECT ISODATE[MIN(bs.confirmed)] FROM biobank_stored_sample bs
WHERE bs.biobank_id = p.biobank_id) first_samples_arrived_date,
(SELECT ISODATE[MIN(pm.finalized)] FROM physical_measurements pm
WHERE pm.participant_id = p.participant_id
AND pm.finalized is not null
AND pm.status is null or pm.status <> 2) first_physical_measurements_date,
(SELECT ISODATE[MIN(bss.confirmed)] FROM biobank_stored_sample bss
WHERE bss.biobank_id = p.biobank_id
AND bss.test IN {}) first_samples_to_isolate_dna_date, {}
FROM participant p, participant_summary ps
WHERE p.participant_id = ps.participant_id
AND p.participant_id % :num_shards = :shard_number
AND p.hpo_id != :test_hpo_id
AND p.withdrawal_status != 2
AND NOT ps.email LIKE :test_email_pattern
AND p.is_test_participant != TRUE
"""
# Find HPO ID changes in participant history.
_HPO_ID_QUERY = """
SELECT ph.participant_id participant_id, hpo.name hpo,
ISODATE[ph.last_modified] last_modified
FROM participant_history ph, hpo, participant p
WHERE ph.participant_id % :num_shards = :shard_number
AND ph.hpo_id = hpo.hpo_id
AND ph.participant_id = p.participant_id
AND ph.hpo_id != :test_hpo_id
AND p.hpo_id != :test_hpo_id
AND p.withdrawal_status != 2
AND p.is_test_participant != TRUE
AND NOT EXISTS
(SELECT * FROM participant_history ph_prev
WHERE ph_prev.participant_id = ph.participant_id
AND ph_prev.version = ph.version - 1
AND ph_prev.hpo_id = ph.hpo_id)
AND NOT EXISTS
(SELECT * FROM participant_summary ps
WHERE ps.participant_id = ph.participant_id
AND ps.email LIKE :test_email_pattern)
"""
_ANSWER_QUERY = """
SELECT qr.participant_id participant_id, ISODATE[qr.created] start_time,
qc.value question_code,
(SELECT CASE WHEN ac.mapped THEN ac.value ELSE :unmapped END FROM code ac
WHERE ac.code_id = qra.value_code_id) answer_code,
qra.value_string answer_string
FROM questionnaire_response_answer qra, questionnaire_response qr, questionnaire_question qq,
code qc, participant p
WHERE qra.questionnaire_response_id = qr.questionnaire_response_id
AND qra.question_id = qq.questionnaire_question_id
AND qq.code_id = qc.code_id
AND qq.code_id in ({})
AND qr.participant_id % :num_shards = :shard_number
AND qr.participant_id = p.participant_id
AND p.hpo_id != :test_hpo_id
AND p.withdrawal_status != 2
AND p.is_test_participant != TRUE
AND NOT EXISTS
(SELECT * FROM participant_summary ps
WHERE ps.participant_id = p.participant_id
AND ps.email LIKE :test_email_pattern)
ORDER BY qr.participant_id, qr.created, qc.value
"""
def _get_params(num_shards, shard_number):
test_hpo = HPODao().get_by_name(TEST_HPO_NAME)
return {
"num_shards": num_shards,
"shard_number": shard_number,
"test_hpo_id": test_hpo.hpoId,
"test_email_pattern": TEST_EMAIL_PATTERN,
}
def _get_participant_sql(num_shards, shard_number):
module_time_fields = [
"(CASE WHEN ps.{0} = :submitted THEN ISODATE[ps.{1}] ELSE NULL END) {1}".format(
get_column_name(ParticipantSummary, field_name), get_column_name(ParticipantSummary, field_name + "Time")
)
for field_name in NON_EHR_QUESTIONNAIRE_MODULE_FIELD_NAMES
]
modules_sql = ", ".join(module_time_fields)
dna_tests_sql, params = get_sql_and_params_for_array(config.getSettingList(config.DNA_SAMPLE_TEST_CODES), "dna")
params.update(_get_params(num_shards, shard_number))
params["submitted"] = int(QuestionnaireStatus.SUBMITTED)
return replace_isodate(_PARTICIPANT_SQL_TEMPLATE.format(dna_tests_sql, modules_sql)), params
def _get_hpo_id_sql(num_shards, shard_number):
return replace_isodate(_HPO_ID_QUERY), _get_params(num_shards, shard_number)
def _get_answer_sql(num_shards, shard_number):
code_dao = CodeDao()
code_ids = []
question_codes = list(ANSWER_FIELD_TO_QUESTION_CODE.values())
question_codes.append(RACE_QUESTION_CODE)
question_codes.append(EHR_CONSENT_QUESTION_CODE)
for code_value in question_codes:
code = code_dao.get_code(PPI_SYSTEM, code_value)
code_ids.append(str(code.codeId))
params = _get_params(num_shards, shard_number)
params["unmapped"] = UNMAPPED
return replace_isodate(_ANSWER_QUERY.format((",".join(code_ids)))), params
class MetricsExport(object):
"""Exports data from the database needed to generate metrics.
Exports are performed in a chain of tasks, each of which can run for up to 10 minutes.
A configurable number of shards allows each data set being exported to be broken up into pieces
that can complete in time; sharded output also makes MapReduce on the result run faster.
When the last task is done, the MapReduce pipeline for metrics is kicked off.
"""
@classmethod
def _export_participants(self, bucket_name, filename_prefix, num_shards, shard_number):
sql, params = _get_participant_sql(num_shards, shard_number)
SqlExporter(bucket_name).run_export(
filename_prefix + _PARTICIPANTS_CSV % shard_number, sql, params, backup=True
)
@classmethod
def _export_hpo_ids(self, bucket_name, filename_prefix, num_shards, shard_number):
sql, params = _get_hpo_id_sql(num_shards, shard_number)
SqlExporter(bucket_name).run_export(filename_prefix + _HPO_IDS_CSV % shard_number, sql, params, backup=True)
@classmethod
def _export_answers(self, bucket_name, filename_prefix, num_shards, shard_number):
sql, params = _get_answer_sql(num_shards, shard_number)
SqlExporter(bucket_name).run_export(filename_prefix + _ANSWERS_CSV % shard_number, sql, params, backup=True)
@staticmethod
def start_export_tasks(bucket_name, num_shards):
"""Entry point to exporting data for use by the metrics pipeline. Begins the export of
the first shard of the participant data."""
filename_prefix = "%s/" % clock.CLOCK.now().isoformat()
# TODO: Do we need to convert this to a Cloud task?
# deferred.defer(
MetricsExport._start_participant_export(bucket_name, filename_prefix, num_shards, 0)
# )
@staticmethod
def _start_export(
bucket_name,
filename_prefix,
num_shards,
shard_number,
export_methodname,
next_shard_methodname,
next_type_methodname,
finish_methodname=None,
):
getattr(MetricsExport, export_methodname)(bucket_name, filename_prefix, num_shards, shard_number)
shard_number += 1
if shard_number == num_shards:
if next_type_methodname:
# deferred.defer(
getattr(MetricsExport, next_type_methodname), bucket_name, filename_prefix, num_shards, 0
# )
else:
getattr(MetricsExport, finish_methodname)(bucket_name, filename_prefix, num_shards)
else:
# deferred.defer(
getattr(MetricsExport, next_shard_methodname), bucket_name, filename_prefix, num_shards, shard_number
# )
@classmethod
def _start_participant_export(cls, bucket_name, filename_prefix, num_shards, shard_number):
MetricsExport._start_export(
bucket_name,
filename_prefix,
num_shards,
shard_number,
"_export_participants",
"_start_participant_export",
"_start_hpo_id_export",
)
@classmethod
def _start_hpo_id_export(cls, bucket_name, filename_prefix, num_shards, shard_number):
MetricsExport._start_export(
bucket_name,
filename_prefix,
num_shards,
shard_number,
"_export_hpo_ids",
"_start_hpo_id_export",
"_start_answers_export",
)
@classmethod
def _start_answers_export(cls, bucket_name, filename_prefix, num_shards, shard_number):
MetricsExport._start_export(
bucket_name,
filename_prefix,
num_shards,
shard_number,
"_export_answers",
"_start_answers_export",
None,
"_start_metrics_pipeline",
)
# @classmethod
# def _start_metrics_pipeline(cls, bucket_name, filename_prefix, num_shards):
# input_files = []
# for csv_filename in _ALL_CSVS:
# input_files.extend([filename_prefix + csv_filename % shard for shard
# in range(0, num_shards)])
# pipeline = MetricsPipeline(bucket_name, clock.CLOCK.now(), input_files)
# pipeline.start(queue_name=_QUEUE_NAME)
| 41.139344
| 117
| 0.724846
| 4,298
| 0.428173
| 0
| 0
| 3,362
| 0.334927
| 0
| 0
| 4,500
| 0.448296
|
4d3f64220d8ff34f2c9e4737de791b02b5323b50
| 701
|
py
|
Python
|
fundata/dota2/player/player_detail.py
|
mengyuanhumy/fundata
|
e4090edf967e2ad7f7efadd64a7fc2ae8dc1ed32
|
[
"MIT"
] | null | null | null |
fundata/dota2/player/player_detail.py
|
mengyuanhumy/fundata
|
e4090edf967e2ad7f7efadd64a7fc2ae8dc1ed32
|
[
"MIT"
] | null | null | null |
fundata/dota2/player/player_detail.py
|
mengyuanhumy/fundata
|
e4090edf967e2ad7f7efadd64a7fc2ae8dc1ed32
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from ...client import get_api_client
def get_player_detail_stats(player_id):
"""玩家常规统计数据
参数player_id,玩家ID, int
返回dict格式
"""
if get_player_data_status(player_id)==2:
client=get_api_client()
uri="/fundata-dota2-free/v2/player/"+str(player_id)+"/detail_stats"
return client.api(uri,{})
else:
print("player_id=%i has no data"%player_id)
return 0
def get_player_data_status(player_id):
"""玩家数据状态
参数player_id,玩家ID, int
返回dict格式: status统计数据的状态, 2有数据,1没有数据
"""
client=get_api_client()
uri="/fundata-dota2-free/v2/player/"+str(player_id)+"/data_status"
res=client.api(uri,{})
if res["retcode"]==200 and res["data"]["status"]==2:
return 2
else:
return 1
| 22.612903
| 69
| 0.71184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 386
| 0.483104
|
4d40d6894572ebb56bff51cbd51d17f087ba2234
| 2,454
|
py
|
Python
|
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
|
panfeiyy/ambari
|
24077510723ede93d3024784f0b04422adaf56d6
|
[
"Apache-2.0"
] | 16
|
2018-05-24T10:28:24.000Z
|
2021-08-05T03:13:26.000Z
|
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
|
panfeiyy/ambari
|
24077510723ede93d3024784f0b04422adaf56d6
|
[
"Apache-2.0"
] | 3
|
2021-05-09T12:37:16.000Z
|
2022-03-02T10:13:24.000Z
|
ambari-server/src/main/resources/stacks/BIGTOP/0.8/services/YARN/package/scripts/resourcemanager.py
|
panfeiyy/ambari
|
24077510723ede93d3024784f0b04422adaf56d6
|
[
"Apache-2.0"
] | 17
|
2018-07-06T08:57:00.000Z
|
2021-11-04T11:00:36.000Z
|
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Ambari Agent
"""
import sys
from resource_management import *
from yarn import yarn
from service import service
class Resourcemanager(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
yarn(name='resourcemanager')
def start(self, env):
import params
env.set_params(params)
self.configure(env) # FOR SECURITY
service('resourcemanager',
action='start'
)
def stop(self, env):
import params
env.set_params(params)
service('resourcemanager',
action='stop'
)
def status(self, env):
import status_params
env.set_params(status_params)
check_process_status(status_params.resourcemanager_pid_file)
pass
def refreshqueues(self, env):
import params
self.configure(env)
env.set_params(params)
service('resourcemanager',
action='refreshQueues'
)
def decommission(self, env):
import params
env.set_params(params)
rm_kinit_cmd = params.rm_kinit_cmd
yarn_user = params.yarn_user
conf_dir = params.hadoop_conf_dir
user_group = params.user_group
yarn_refresh_cmd = format("{rm_kinit_cmd} yarn --config {conf_dir} rmadmin -refreshNodes")
File(params.exclude_file_path,
content=Template("exclude_hosts_list.j2"),
owner=yarn_user,
group=user_group
)
if params.update_exclude_file_only == False:
Execute(yarn_refresh_cmd,
environment= {'PATH' : params.execute_path },
user=yarn_user)
pass
pass
if __name__ == "__main__":
Resourcemanager().execute()
| 24.058824
| 94
| 0.711899
| 1,516
| 0.617767
| 0
| 0
| 0
| 0
| 0
| 0
| 990
| 0.403423
|
4d4284d5a0b58c47616dd4e99223550ab8085447
| 166
|
py
|
Python
|
src/core/command.py
|
cfmcdonald-78/Hexcrawler
|
79ca4ab9327abf08de1743612c23eb89aa53a2b9
|
[
"MIT"
] | null | null | null |
src/core/command.py
|
cfmcdonald-78/Hexcrawler
|
79ca4ab9327abf08de1743612c23eb89aa53a2b9
|
[
"MIT"
] | null | null | null |
src/core/command.py
|
cfmcdonald-78/Hexcrawler
|
79ca4ab9327abf08de1743612c23eb89aa53a2b9
|
[
"MIT"
] | 1
|
2021-12-01T01:38:12.000Z
|
2021-12-01T01:38:12.000Z
|
'''
Created on Jul 19, 2012
@author: Chris
'''
class Command(object):
def validate(self, game):
pass
def execute(self, game):
pass
| 12.769231
| 29
| 0.554217
| 117
| 0.704819
| 0
| 0
| 0
| 0
| 0
| 0
| 47
| 0.283133
|
4d42f4da01153d9efccca4d19cc6efc9b683c41b
| 8,039
|
py
|
Python
|
gui/trimGui.py
|
lhalb/gfmanager
|
449f071b3239faa672b7f06122dfc9bc23e68d79
|
[
"MIT"
] | 1
|
2022-01-18T12:53:17.000Z
|
2022-01-18T12:53:17.000Z
|
gui/trimGui.py
|
lhalb/gfmanager
|
449f071b3239faa672b7f06122dfc9bc23e68d79
|
[
"MIT"
] | null | null | null |
gui/trimGui.py
|
lhalb/gfmanager
|
449f071b3239faa672b7f06122dfc9bc23e68d79
|
[
"MIT"
] | null | null | null |
from PyQt5 import QtGui, QtWidgets
import seaborn as sns
from gui import trimming as tri
from gui import boxes as BOX
import matplotlib.image as mpimg
from math import floor, ceil
class TrimDialog(QtWidgets.QDialog, tri.Ui_Dialog):
def __init__(self, data=None):
super(TrimDialog, self).__init__()
self.setupUi(self)
# Setze icon
icon = QtGui.QIcon(":/img/icons/trim.png")
self.setWindowIcon(icon)
self.trim_data = data
self.cal_val = 1
# Fill Combo-Box
additems = [i for i in data.columns if i not in ['pos-x', 'pos-y', 'edge']]
self.cb_cols.addItems(additems)
# Init Data-Plot
self.plt_data = self.dataPlot.canvas
self.data_ax = self.plt_data.fig.add_subplot(111)
self.init_slider()
self.update_violin()
self.vline = self.data_ax.axvline(data[self.cb_cols.currentText()].min(), color='r')
self.vline_max = self.data_ax.axvline(ceil(data[self.cb_cols.currentText()].max()), color='b')
# Init Image-Plot
self.imagePlot.hide()
self.plt_image = self.imagePlot.canvas
self.image_ax = self.plt_image.fig.add_subplot(111)
self.scat, = self.image_ax.plot([], [], marker='o', ms=5, ls='', color='r')
# trigger am Ende laden
self.setup_triggers()
def setup_triggers(self):
self.sliderTRIM_min.valueChanged.connect(self.update_data)
self.sliderTRIM_max.valueChanged.connect(self.update_data)
self.but_openImage.clicked.connect(self.load_image)
self.cb_cols.currentTextChanged.connect(self.update_element)
self.cb_edgeGrains.clicked.connect(self.update_element)
self.txt_kalwert.returnPressed.connect(self.update_cal_val)
self.lab_cut_min.editingFinished.connect(self.set_min_slider)
self.lab_cut_max.editingFinished.connect(self.set_max_slider)
self.but_cut_min.clicked.connect(self.manipulate_max)
self.but_cut_max.clicked.connect(self.manipulate_min)
def update_element(self):
self.update_violin()
self.init_vline()
bool_max = self.but_cut_min.isChecked()
bool_min = self.but_cut_max.isChecked()
self.init_slider(h_max=bool_max, h_min=bool_min)
self.update_scatter_data()
def update_violin(self):
self.data_ax.clear()
curr_text = self.cb_cols.currentText()
if self.cb_edgeGrains.isChecked():
corr_data = self.trim_data[self.trim_data['edge'] == 0]
else:
corr_data = self.trim_data
data = corr_data[curr_text]
sns.violinplot(x=data, ax=self.data_ax, cut=0)
self.plt_data.fig.tight_layout()
self.plt_data.draw_idle()
def init_vline(self):
curr_text = self.cb_cols.currentText()
min_val = self.trim_data[curr_text].min()
max_val = self.trim_data[curr_text].max()
self.vline = self.data_ax.axvline(min_val, color='r')
self.vline_max = self.data_ax.axvline(max_val, color='b')
self.plt_data.draw_idle()
def init_slider(self, h_min=False, h_max=False):
sli_min = self.sliderTRIM_min
sli_max = self.sliderTRIM_max
curr_text = self.cb_cols.currentText()
if self.cb_edgeGrains.isChecked():
data = self.trim_data[self.trim_data['edge'] == 0]
else:
data = self.trim_data
min_val = floor(data[curr_text].min())
max_val = ceil(data[curr_text].max())
# Wenn die Mitte am Beginn der Daten liegen soll (nur Max-Slider aktiv)
if h_min and not h_max:
half_min = min_val
# Wenn die Mitte am Ende der Daten liegen soll (nur Min-Slider aktiv)
elif h_max and not h_min:
half_min = max_val
else:
half_min = floor((max_val-min_val)/2)
half_max = half_min + 1
sli_min.setMinimum(min_val)
sli_min.setMaximum(half_min)
if half_min != min_val and half_max != max_val:
if half_min > 10:
ticks = 10
else:
ticks = half_min - min_val
sli_min.setTickInterval(int((half_min-min_val)/ticks))
sli_max.setTickInterval(int((max_val-half_min)/ticks))
sli_max.setMinimum(half_max)
sli_max.setMaximum(max_val)
sli_min.setValue(min_val)
sli_max.setValue(max_val)
self.lab_cut_min.setText(str(min_val))
self.lab_cut_max.setText(str(max_val))
def update_vline_max(self):
act_val = self.sliderTRIM_max.value()
self.vline_max.set_xdata(act_val)
self.plt_data.draw_idle()
self.lab_cut_max.setText(str(act_val))
def update_vline(self):
act_val = self.sliderTRIM_min.value()
self.vline.set_xdata(act_val)
self.plt_data.draw_idle()
self.lab_cut_min.setText(str(act_val))
def load_image(self):
fname = QtWidgets.QFileDialog.getOpenFileName(self, 'Bilddatei laden',
filter='Bilddateien (*.png *.jpeg *.jpg *.bmp)')[0]
# Wenn Nutzer Dateipfadauswahl abbricht
if not fname:
return
img = mpimg.imread(fname)
y_max = img.shape[0]
x_max = img.shape[1]
x_cal = self.trim_data['pos-x'].max()/x_max
y_cal = self.trim_data['pos-y'].max() / y_max
self.cal_val = max(x_cal, y_cal)
self.txt_kalwert.setText(str(self.cal_val))
self.image_ax.imshow(img, origin='upper', extent=None)
self.plt_image.draw_idle()
self.plt_image.fig.tight_layout()
self.show_image_widget()
def update_cal_val(self):
self.cal_val = float(self.txt_kalwert.text())
def show_image_widget(self):
self.imagePlot.show()
def get_excluded_values(self):
data = self.trim_data
thresh_1 = self.sliderTRIM_min.value()
thresh_2 = self.sliderTRIM_max.value()
curr_text = self.cb_cols.currentText()
cond_1 = (data['edge'] == 1)
cond_2 = (data[curr_text] <= thresh_1) | (data[curr_text] >= thresh_2)
if self.cb_edgeGrains.isChecked():
cut_data = data.loc[cond_1 | cond_2]
else:
cut_data = data.loc[cond_2]
x_data = cut_data['pos-x'].values / self.cal_val
y_data = cut_data['pos-y'].values / self.cal_val
return x_data, y_data
def update_scatter_data(self):
x, y = self.get_excluded_values()
self.scat.set_xdata(x)
self.scat.set_ydata(y)
self.plt_image.draw_idle()
def update_data(self):
self.update_vline()
self.update_vline_max()
self.update_scatter_data()
def set_min_slider(self):
try:
val = int(self.lab_cut_min.text())
except ValueError:
BOX.show_error_box('Falscher Wert eingegeben.')
return
self.sliderTRIM_min.setValue(val)
self.update_data()
def set_max_slider(self):
try:
val = int(self.lab_cut_max.text())
except ValueError:
BOX.show_error_box('Falscher Wert eingegeben.')
return
self.sliderTRIM_max.setValue(val)
self.update_data()
def manipulate_max(self):
if self.but_cut_min.isChecked():
self.sliderTRIM_max.hide()
self.lab_cut_max.hide()
self.but_cut_max.hide()
self.init_slider(h_max=True)
else:
self.sliderTRIM_max.show()
self.lab_cut_max.show()
self.but_cut_min.show()
self.init_slider()
def manipulate_min(self):
if self.but_cut_max.isChecked():
self.sliderTRIM_min.hide()
self.lab_cut_min.hide()
self.but_cut_min.hide()
self.init_slider(h_min=True)
else:
self.sliderTRIM_min.show()
self.lab_cut_min.show()
self.but_cut_min.show()
self.init_slider()
| 32.812245
| 105
| 0.619729
| 7,853
| 0.976863
| 0
| 0
| 0
| 0
| 0
| 0
| 489
| 0.060828
|
4d47790f7b2c6a08485b7da418683620a521d5cf
| 1,178
|
py
|
Python
|
__init__.py
|
hankangkangjim/djlog
|
cfdac281be811adc3fc1b91672c0230cc575722f
|
[
"MIT"
] | null | null | null |
__init__.py
|
hankangkangjim/djlog
|
cfdac281be811adc3fc1b91672c0230cc575722f
|
[
"MIT"
] | null | null | null |
__init__.py
|
hankangkangjim/djlog
|
cfdac281be811adc3fc1b91672c0230cc575722f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf import settings
def _defaultdict(_dict, key):
if key not in _dict:
_dict[key] = {}
return _dict[key]
def update_logging(log_config):
if not isinstance(log_config, dict):
raise TypeError('settings.LOGGING must be adict')
loggers = _defaultdict(log_config, 'loggers')
handlers = _defaultdict(log_config, 'handlers')
filters = _defaultdict(log_config, 'filters')
default_logger = loggers.get('django.request', {})
loggers['django.request'] = {
'handlers': ['catch_except'] + default_logger.get('handlers', []),
'level': default_logger.get('level', 'ERROR'),
'propagate': default_logger.get('propagate', False),
}
handlers['catch_except'] = {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': getattr(settings, '', 'djlog.handler.CatchExceptionHandler')
}
if 'require_debug_false' not in filters:
filters['require_debug_false'] = {
'()': 'django.utils.log.RequireDebugFalse'
}
return log_config
| 29.45
| 78
| 0.618846
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 379
| 0.321732
|
4d48c769d2260ad0c3c0d32734d1d092109092b3
| 6,697
|
py
|
Python
|
manilaclient/tests/unit/common/test_httpclient.py
|
mail2nsrajesh/python-manilaclient
|
37bf2d9b4be277ece01e9ff782234d264ed4fd56
|
[
"CNRI-Python",
"Apache-1.1"
] | null | null | null |
manilaclient/tests/unit/common/test_httpclient.py
|
mail2nsrajesh/python-manilaclient
|
37bf2d9b4be277ece01e9ff782234d264ed4fd56
|
[
"CNRI-Python",
"Apache-1.1"
] | null | null | null |
manilaclient/tests/unit/common/test_httpclient.py
|
mail2nsrajesh/python-manilaclient
|
37bf2d9b4be277ece01e9ff782234d264ed4fd56
|
[
"CNRI-Python",
"Apache-1.1"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
import requests
import manilaclient
from manilaclient.common import httpclient
from manilaclient import exceptions
from manilaclient.tests.unit import utils
fake_user_agent = "fake"
fake_response = utils.TestResponse({
"status_code": 200,
"text": '{"hi": "there"}',
})
mock_request = mock.Mock(return_value=(fake_response))
bad_400_response = utils.TestResponse({
"status_code": 400,
"text": '{"error": {"message": "n/a", "details": "Terrible!"}}',
})
bad_400_request = mock.Mock(return_value=(bad_400_response))
bad_401_response = utils.TestResponse({
"status_code": 401,
"text": '{"error": {"message": "FAILED!", "details": "DETAILS!"}}',
})
bad_401_request = mock.Mock(return_value=(bad_401_response))
bad_500_response = utils.TestResponse({
"status_code": 500,
"text": '{"error": {"message": "FAILED!", "details": "DETAILS!"}}',
})
bad_500_request = mock.Mock(return_value=(bad_500_response))
retry_after_response = utils.TestResponse({
"status_code": 413,
"text": '',
"headers": {
"retry-after": "5"
},
})
retry_after_mock_request = mock.Mock(return_value=retry_after_response)
retry_after_no_headers_response = utils.TestResponse({
"status_code": 413,
"text": '',
})
retry_after_no_headers_mock_request = mock.Mock(
return_value=retry_after_no_headers_response)
retry_after_non_supporting_response = utils.TestResponse({
"status_code": 403,
"text": '',
"headers": {
"retry-after": "5"
},
})
retry_after_non_supporting_mock_request = mock.Mock(
return_value=retry_after_non_supporting_response)
def get_authed_client(retries=0):
cl = httpclient.HTTPClient("http://example.com", "token", fake_user_agent,
retries=retries, http_log_debug=True,
api_version=manilaclient.API_MAX_VERSION)
return cl
class ClientTest(utils.TestCase):
def setUp(self):
super(ClientTest, self).setUp()
self.max_version = manilaclient.API_MAX_VERSION
self.max_version_str = self.max_version.get_string()
def test_get(self):
cl = get_authed_client()
@mock.patch.object(requests, "request", mock_request)
@mock.patch('time.time', mock.Mock(return_value=1234))
def test_get_call():
resp, body = cl.get("/hi")
headers = {
"X-Auth-Token": "token",
"User-Agent": fake_user_agent,
cl.API_VERSION_HEADER: self.max_version_str,
'Accept': 'application/json',
}
mock_request.assert_called_with(
"GET",
"http://example.com/hi",
headers=headers,
**self.TEST_REQUEST_BASE)
# Automatic JSON parsing
self.assertEqual(body, {"hi": "there"})
test_get_call()
def test_get_retry_500(self):
cl = get_authed_client(retries=1)
self.requests = [bad_500_request, mock_request]
def request(*args, **kwargs):
next_request = self.requests.pop(0)
return next_request(*args, **kwargs)
@mock.patch.object(requests, "request", request)
@mock.patch('time.time', mock.Mock(return_value=1234))
def test_get_call():
resp, body = cl.get("/hi")
test_get_call()
self.assertEqual(self.requests, [])
def test_retry_limit(self):
cl = get_authed_client(retries=1)
self.requests = [bad_500_request, bad_500_request, mock_request]
def request(*args, **kwargs):
next_request = self.requests.pop(0)
return next_request(*args, **kwargs)
@mock.patch.object(requests, "request", request)
@mock.patch('time.time', mock.Mock(return_value=1234))
def test_get_call():
resp, body = cl.get("/hi")
self.assertRaises(exceptions.ClientException, test_get_call)
self.assertEqual(self.requests, [mock_request])
def test_get_no_retry_400(self):
cl = get_authed_client(retries=0)
self.requests = [bad_400_request, mock_request]
def request(*args, **kwargs):
next_request = self.requests.pop(0)
return next_request(*args, **kwargs)
@mock.patch.object(requests, "request", request)
@mock.patch('time.time', mock.Mock(return_value=1234))
def test_get_call():
resp, body = cl.get("/hi")
self.assertRaises(exceptions.BadRequest, test_get_call)
self.assertEqual(self.requests, [mock_request])
def test_get_retry_400_socket(self):
cl = get_authed_client(retries=1)
self.requests = [bad_400_request, mock_request]
def request(*args, **kwargs):
next_request = self.requests.pop(0)
return next_request(*args, **kwargs)
@mock.patch.object(requests, "request", request)
@mock.patch('time.time', mock.Mock(return_value=1234))
def test_get_call():
resp, body = cl.get("/hi")
test_get_call()
self.assertEqual(self.requests, [])
def test_get_with_retries_none(self):
cl = get_authed_client(retries=None)
@mock.patch.object(requests, "request", bad_401_request)
def test_get_call():
resp, body = cl.get("/hi")
self.assertRaises(exceptions.Unauthorized, test_get_call)
def test_post(self):
cl = get_authed_client()
@mock.patch.object(requests, "request", mock_request)
def test_post_call():
cl.post("/hi", body=[1, 2, 3])
headers = {
"X-Auth-Token": "token",
"Content-Type": "application/json",
'Accept': 'application/json',
"X-Openstack-Manila-Api-Version": self.max_version_str,
"User-Agent": fake_user_agent
}
mock_request.assert_called_with(
"POST",
"http://example.com/hi",
headers=headers,
data='[1, 2, 3]',
**self.TEST_REQUEST_BASE)
test_post_call()
| 32.043062
| 78
| 0.626101
| 4,265
| 0.636852
| 0
| 0
| 2,170
| 0.324026
| 0
| 0
| 1,377
| 0.205614
|
4d4909137a8281abf00add12e7109af6453220fd
| 1,421
|
py
|
Python
|
intro_to_algos_2020_mit/ps3/tests.py
|
venu-gopal-myneni/assignments
|
871148ccaa6291539623fc7d3f9704cb497fbcb6
|
[
"MIT"
] | 1
|
2022-02-26T13:52:31.000Z
|
2022-02-26T13:52:31.000Z
|
assignments/ps3-template/tests.py
|
tallamjr/mit-6006
|
c2aa6bb48edef5800c0779ba2eebd697d44249b5
|
[
"MIT"
] | null | null | null |
assignments/ps3-template/tests.py
|
tallamjr/mit-6006
|
c2aa6bb48edef5800c0779ba2eebd697d44249b5
|
[
"MIT"
] | null | null | null |
import unittest
from count_anagram_substrings import count_anagram_substrings
tests = (
(
(
'esleastealaslatet',
('tesla',),
),
(3,),
),
(
(
'lrldrrrllddrrlllrddd',
('ldl', 'rld'),
),
(1, 3),
),
(
(
'kkkkkvvuvkvkkkvuuvkuukkuvvkukkvkkvuvukuk',
('vkuk', 'uvku', 'kukk'),
),
(5, 6, 1),
),
(
(
'trhtrthtrthhhrtthrtrhhhtrrrhhrthrrrttrrttrthhrrrrtrtthhhhrrrtrtthrttthrthhthrhrh',
('rrrht', 'tttrr', 'rttrr', 'rhrrr'),
),
(6, 5, 6, 1),
),
(
(
'hjjijjhhhihhjjhjjhijjihjjihijiiihhihjjjihjjiijjijjhhjijjiijhjihiijjiiiijhihihhiihhiiihhiijhhhiijhijj',
('jihjhj', 'hhjiii', 'ihjhhh', 'jjjiji'),
),
(10, 6, 2, 2),
),
)
def check(test):
args, staff_sol = test
student_sol = count_anagram_substrings(*args)
return staff_sol == student_sol
class TestCases(unittest.TestCase):
def test_01(self): self.assertTrue(check(tests[ 0]))
def test_02(self): self.assertTrue(check(tests[ 1]))
def test_03(self): self.assertTrue(check(tests[ 2]))
def test_04(self): self.assertTrue(check(tests[ 3]))
def test_05(self): self.assertTrue(check(tests[ 4]))
if __name__ == '__main__':
res = unittest.main(verbosity = 3, exit = False)
| 25.375
| 115
| 0.553835
| 320
| 0.225194
| 0
| 0
| 0
| 0
| 0
| 0
| 372
| 0.261787
|
4d49fb464c7f3d5acfcbeab36ee17a5c9322cb65
| 9,387
|
py
|
Python
|
sim2real-policies/sim2real_policies/sys_id/universal_policy_online_system_identification/osi_class.py
|
eugval/sim2real_dynamics_simulation
|
2ed175803faa38792f6becc2dc91f44ae71ed9c2
|
[
"MIT"
] | 16
|
2020-07-28T14:35:44.000Z
|
2021-11-28T01:50:51.000Z
|
sim2real-policies/sim2real_policies/sys_id/universal_policy_online_system_identification/osi_class.py
|
eugval/sim2real_dynamics_simulation
|
2ed175803faa38792f6becc2dc91f44ae71ed9c2
|
[
"MIT"
] | 1
|
2020-11-26T07:58:30.000Z
|
2020-12-01T04:40:28.000Z
|
sim2real-policies/sim2real_policies/sys_id/universal_policy_online_system_identification/osi_class.py
|
eugval/sim2real_dynamics_simulation
|
2ed175803faa38792f6becc2dc91f44ae71ed9c2
|
[
"MIT"
] | 2
|
2020-10-18T01:38:49.000Z
|
2021-12-31T10:56:41.000Z
|
"""
System Identification (SI)
https://arxiv.org/abs/1702.02453
Examples of two types:
1. Off-line SI: in sim2real_policies.sys_id.common.utils
2. On-line SI
"""
from sim2real_policies.sys_id.common.operations import *
from sim2real_policies.sys_id.common.utils import *
from sim2real_policies.utils.rl_utils import load, load_model
from sim2real_policies.utils.choose_env import choose_env
class OSI(object):
"""
The class of online system identification
Args:
Projection (bool): whether exists a projection module for reducing the dimension of state
CAT_INTERNAL (bool): whether concatenate the interal state to the external observation
context_dim (int): the integral compressed dimension for the projcection module
"""
def __init__(self, env_name='SawyerReach', length=3, context_dim=3, Projection=True, CAT_INTERNAL=False):
self.cat_internal = CAT_INTERNAL
env, environment_params, environment_wrappers, environment_wrapper_arguments = choose_env(env_name)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
print('Env name: ', env_name)
print('Dimension of env state: ', state_dim)
print('Dimension of env action: ', action_dim)
self.params_dim = env.get_randomised_parameter_dimensions()
print('Dimension of randomised parameters: ', self.params_dim)
data_dim = length*(state_dim+action_dim)
if CAT_INTERNAL:
internal_state_dim = env.get_internal_state_dimension()
print('Dimension of internal state: ', internal_state_dim)
data_dim = length*(state_dim+action_dim+internal_state_dim)
else:
data_dim = length*(state_dim+action_dim)
self.osi_model = OSINetork(input_dim = data_dim, output_dim = self.params_dim)
self.env_name = env_name
self.length = length # trajectory length for prediction
if Projection:
self.proj_net = load_model(path = '../../../../data/pup_td3/model/pup_td3_projection', input_dim=self.params_dim, output_dim=context_dim)
self.policy=load(path = '../../../../data/pup_td3/model/pup_td3', alg='TD3', state_dim = state_dim+context_dim, action_dim = action_dim)
self.save_path = '../../../../../data/pup_td3/model/osi'
else:
self.proj_net = None
self.policy=load(path = '../../../../data/up_td3/model/up_td3', alg='TD3', state_dim = state_dim+self.params_dim, action_dim = action_dim)
self.save_path = '../../../../../data/up_td3/model/osi'
def predict(self, traj):
traj_input = stack_data(traj, self.length)
print(traj_input)
output = self.osi_model(traj_input).detach().numpy()
print('out: ', output)
return output
def load_model(self):
self.osi_model.load_state_dict(torch.load(self.save_path, map_location='cuda:0'))
self.osi_model.eval()
def osi_train(self, itr = 20):
# update with true dynamics parameters from simulator
print('Started OSI training stage I.'+'\n'+'--------------------------------------------------')
params, raw_history = self.online_history_collection(itr=10, PRED_PARAM=False, CAT_INTERNAL=self.cat_internal)
label, data = self.generate_data(params, raw_history)
self.osi_update(data, label, epoch=5)
print('Finished OSI training stage I.')
print('Started OSI training stage II.'+'\n'+'--------------------------------------------------')
# update with predicted dynamics parameters from simulator
losses = []
for _ in range(itr): # while not converge
params, raw_history = self.online_history_collection(PRED_PARAM=True, CAT_INTERNAL = self.cat_internal)
label, data = self.generate_data(params, raw_history)
loss = self.osi_update(data, label, epoch=5)
losses.append(loss)
plot(losses, name='osi_train')
print('Finished OSI training stage II.')
def generate_data(self, params, raw_history):
"""
generate training dataset with raw history trajectories;
length is the number of (state, action, next_state) pairs, there are l state-action pairs in length l sequence
"""
assert len(params) == len(raw_history)
label=[]
data=[]
for param, epi in zip(params, raw_history):
for i in range(0, len(epi)-self.length):
data.append(epi[i:i+self.length].reshape(-1)) # [s,a,s,a] for length=2
label.append(param)
assert len(label)==len(data)
return label, data
def online_history_collection(self, itr=30, max_steps=30, PRED_PARAM=False, CAT_INTERNAL=False):
""" collect random simulation parameters and trajetories with universal policy
https://arxiv.org/abs/1702.02453 (Preparing for the Unknown: Learning a Universal Policy with Online System Identification)
"""
env, environment_params, environment_wrappers, environment_wrapper_arguments = choose_env(self.env_name)
action_space = env.action_space
ini_state_space = env.observation_space
state_space = spaces.Box(-np.inf, np.inf, shape=(ini_state_space.shape[0]+self.params_dim, )) # add the dynamics param dim
# a random policy
data_collection_policy=DPG_PolicyNetwork(state_space, action_space, hidden_dim=512).cuda()
params_list=[]
history=[]
for eps in range(itr): # K
state = env.reset()
params = query_params(env, randomised_only=True)
epi_traj = []
params_list.append(params)
# N is 1 in this implementation, as each env.reset() will have different parameter set
for step in range(max_steps): # T
if CAT_INTERNAL:
internal_state = env.get_internal_state()
full_state = np.concatenate([state, internal_state])
else:
full_state = state
if len(epi_traj)>=self.length and PRED_PARAM:
osi_input = stack_data(epi_traj, self.length) # stack (s,a) to have same length as in the model input
pre_params = self.osi_model(osi_input).detach().numpy()
else:
pre_params = params
if self.proj_net is not None: # projected to low dimensions
pre_params = self.proj_net.get_context(pre_params)
else:
pass
# print('No projection network!')
params_state = np.concatenate((pre_params, state)) # use predicted parameters instead of true values for training, according to the paper
action = data_collection_policy.get_action(params_state)
epi_traj.append(np.concatenate((full_state, action)))
next_state, _, _, _ = env.step(action)
state = next_state
history.append(np.array(epi_traj))
print("Finished collecting data of {} trajectories.".format(itr))
return params_list, history
def osi_update(self, input, label, epoch=1, lr=1e-1):
""" Update the system identification (SI) with online data collection """
criterion = nn.MSELoss()
optimizer = optim.Adam(self.osi_model.parameters(), lr)
scheduler = optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.99) # gamma: decay for each step
input = torch.Tensor(input)
label = torch.Tensor(label)
for i in range(epoch):
predict = self.osi_model(input)
loss = criterion(predict, label)
optimizer.zero_grad()
loss.backward()
print('Train the SI model, Epoch: {} | Loss: {}'.format(i, loss))
optimizer.step()
scheduler.step()
torch.save(self.osi_model.state_dict(), self.save_path)
return loss.detach().cpu().numpy()
class OSINetork(nn.Module):
def __init__(self, input_dim, output_dim, hidden_dim=512, dropout=0.1):
""" same OSI network structure as: https://arxiv.org/abs/1702.02453 """
super(OSINetork, self).__init__()
self.linear1 = nn.Linear(input_dim, hidden_dim)
self.dropout1 = nn.Dropout(dropout)
self.linear2 = nn.Linear(hidden_dim, int(hidden_dim/2))
self.dropout2 = nn.Dropout(dropout)
self.linear3 = nn.Linear(int(hidden_dim/2), int(hidden_dim/4))
self.dropout3 = nn.Dropout(dropout)
self.linear4 = nn.Linear(int(hidden_dim/4), output_dim)
def forward(self, input):
if len(input.shape) < 2:
input = torch.FloatTensor(np.expand_dims(input, 0))
x = F.tanh(self.linear1(input))
x = self.dropout1(x)
x = F.tanh(self.linear2(x))
x = self.dropout2(x)
x = F.tanh(self.linear3(x))
x = self.dropout3(x)
x = self.linear4(x)
return x.squeeze(0)
def stack_data(traj, length):
traj = np.array(traj)
return traj[-length:, :].reshape(-1)
if __name__ == '__main__':
ENV_NAME =['SawyerReach', 'SawyerPush', 'SawyerSlide'][0]
osi = OSI(env_name = ENV_NAME, length=3, context_dim=3, Projection=False, CAT_INTERNAL=True)
osi.osi_train()
| 45.567961
| 155
| 0.629914
| 8,684
| 0.925109
| 0
| 0
| 0
| 0
| 0
| 0
| 2,399
| 0.255566
|
4d4c6f6195152d60976c1000937ec76667e66f99
| 2,099
|
py
|
Python
|
rasp_camera.py
|
BrianDau/doorbell_dash
|
940877c5019b39639e7de0081a616d20c8b5a0fc
|
[
"MIT"
] | 11
|
2017-04-12T13:27:39.000Z
|
2021-05-16T16:27:15.000Z
|
rasp_camera.py
|
BrianDau/doorbell_dash
|
940877c5019b39639e7de0081a616d20c8b5a0fc
|
[
"MIT"
] | null | null | null |
rasp_camera.py
|
BrianDau/doorbell_dash
|
940877c5019b39639e7de0081a616d20c8b5a0fc
|
[
"MIT"
] | 1
|
2019-01-10T18:33:42.000Z
|
2019-01-10T18:33:42.000Z
|
import picamera
from time import sleep
IMG_WIDTH = 800
IMG_HEIGHT = 600
IMAGE_DIR = "/home/pi/Desktop/"
IMG = "snap.jpg"
def vid():
camera = picamera.PiCamera()
camera.vflip = True
camera.hflip = True
camera.brightness = 60
#camera.resolution = (IMG_WIDTH, IMG_HEIGHT)
camera.start_preview()
camera.annotate_text = "Doorbell pressed!"
camera.annotate_text_size = 50
#display video for 5 seconds
sleep(5)
camera.stop_preview()
camera.close()
# https://www.raspberrypi.org/learning/tweeting-babbage/worksheet/
######################################################
# picamera default values:
######################################################
# camera.sharpness = 0
# camera.contrast = 0
# camera.brightness = 50
# camera.saturation = 0
# camera.ISO = 0
# camera.video_stabilization = False
# camera.exposure_compensation = 0
# camera.exposure_mode = 'auto'
# camera.meter_mode = 'average'
# camera.awb_mode = 'auto'
# camera.image_effect = 'none'
# camera.color_effects = None
# camera.rotation = 180
# camera.hflip = False
# camera.vflip = False
# camera.crop = (0.0, 0.0, 1.0, 1.0)
######################################################
# video will record 5 seconds
######################################################
# camera.start_recording('video.h264')
# sleep(5)
# camera.stop_recording()
######################################################
# add text to video:
######################################################
#camera.start_preview()
#camera.annotate_text = "Doorbell pressed!"
#camera.annotate_text_size = 50
#sleep(5)
#camera.capture('/home/pi/Desktop/text.jpg')
#camera.stop_preview()
######################################################
# loop over camera effects:
######################################################
#camera = picamera.PiCamera()
#camera.vflip = True
#camera.hflip = True
#camera.start_preview()
#for effect in camera.IMAGE_EFFECTS:
# camera.image_effect = effect
# camera.annotate_text = "Effect: %s" % effect
# sleep(1)
#camera.stop_preview()
| 26.910256
| 66
| 0.549786
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1,665
| 0.793235
|
4d4d0ea614818e4dfdde9e585c36b4fdaeb09ea4
| 4,720
|
py
|
Python
|
catalogue_flask/model.py
|
ScottWales/catalogue-flask
|
4a9e659875fee6e831e6c31018c9f9d7285dc845
|
[
"Apache-2.0"
] | null | null | null |
catalogue_flask/model.py
|
ScottWales/catalogue-flask
|
4a9e659875fee6e831e6c31018c9f9d7285dc845
|
[
"Apache-2.0"
] | null | null | null |
catalogue_flask/model.py
|
ScottWales/catalogue-flask
|
4a9e659875fee6e831e6c31018c9f9d7285dc845
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# Copyright 2017 ARC Centre of Excellence for Climate Systems Science
# author: Scott Wales <scott.wales@unimelb.edu.au>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
from flask_sqlalchemy import SQLAlchemy
import os
from datetime import datetime
db = SQLAlchemy()
class Path(db.Model):
"""
A path in the filesystem
"""
id = db.Column(db.Integer, primary_key=True)
path = db.Column(db.Text, unique=True, index=True)
basename = db.Column(db.Text, index=True)
extension = db.Column(db.Text, index=True)
uid = db.Column(db.Integer)
gid = db.Column(db.Integer, index=True)
size_bytes = db.Column(db.Integer)
modified = db.Column(db.Integer)
last_seen = db.Column(db.DateTime, index=True)
content_id = db.Column(db.Integer, db.ForeignKey('content.id'))
content = db.relationship("Content")
def add_from_filename(filename, session):
"""
Given a filename, add it to the database
"""
if not os.path.isfile(filename):
raise IOError("Not a file: %s"%filename)
abspath = os.path.abspath(filename)
path = Path.query.filter_by(path = abspath).one_or_none()
stat = os.stat(filename)
if path is not None:
path.last_seen = datetime.now()
if path.modified < stat.st_mtime:
path.update(stat)
session.add(path)
return path
path = Path()
path.path = abspath
path.update(stat)
path.last_seen = datetime.now()
session.add(path)
return path
def update(self, stat):
"""
Updates the file with new info
"""
self.basename = os.path.basename(self.path)
self.extension = os.path.splitext(self.path)[1]
self.uid = stat.st_uid
self.gid = stat.st_gid
self.size_bytes = stat.st_size
self.modified = stat.st_mtime
# Wipe the content link
self.content = None
class Content(db.Model):
"""
The contents of a file, identified via checksum
May be at multiple paths on the filesystem
sha256 is used for identification, md5 also provided for legacy
:var sha256: sha256 checksum
:var md5: md5 checksum
"""
id = db.Column(db.Integer, primary_key=True)
sha256 = db.Column(db.String, unique=True, index=True, nullable=False)
md5 = db.Column(db.String, index=True, nullable=False)
type = db.Column(db.String)
last_scanned = db.Column(db.DateTime)
paths = db.relationship("Path")
__mapper_args__ = {
'polymorphic_identity':'content',
'polymorphic_on':type
}
netcdf_variable_association = db.Table('netcdf_variable_association', db.Model.metadata,
db.Column('netcdf_id', db.Integer, db.ForeignKey('netcdf_content.id')),
db.Column('concretevar_id', db.Integer, db.ForeignKey('concrete_variable.id'))
)
class NetcdfContent(Content):
"""
Content of a NetCDF file
:var sha256: sha256 checksum
:var md5: md5 checksum
:var variables: list of :class:`~catalogue_flask.model.ConcreteVariable`
"""
id = db.Column(db.Integer, db.ForeignKey('content.id'), primary_key=True)
variables = db.relationship("ConcreteVariable",
secondary=netcdf_variable_association)
__mapper_args__ = {
'polymorphic_identity':'netcdfcontent',
}
class ConcreteVariable(db.Model):
"""
An abstract variable, may have many aliased names
:var cf_name: NetCDF-CF name
:var aliases: List of :class:`~catalogue_flask.model.Variable`
"""
id = db.Column(db.Integer, primary_key=True)
cf_name = db.Column(db.String)
aliases = db.relationship("Variable")
class Variable(db.Model):
"""
An alternate name for a variable
:var name: The name of this alias
:var concrete: The concrete variable this aliases
"""
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String)
concretevariable_id = db.Column(db.Integer, db.ForeignKey('concrete_variable.id'), index=True)
concrete = db.relationship("ConcreteVariable")
| 31.052632
| 98
| 0.661441
| 3,613
| 0.765466
| 0
| 0
| 0
| 0
| 0
| 0
| 1,887
| 0.399788
|
4d4daf56e54bd88f232ffc4ff205ca0bd68de320
| 1,823
|
py
|
Python
|
userbot/core/vcbot/controls.py
|
Rewtio/Mikoo-Userbot
|
418f0017241fa65bdf7f99c84381317cb4dbeb55
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 4
|
2022-03-03T01:31:48.000Z
|
2022-03-26T00:15:41.000Z
|
userbot/core/vcbot/controls.py
|
Rewtio/Mikoo-Userbot
|
418f0017241fa65bdf7f99c84381317cb4dbeb55
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-03-16T02:54:27.000Z
|
2022-03-17T09:17:12.000Z
|
userbot/core/vcbot/controls.py
|
Rewtio/Mikoo-Userbot
|
418f0017241fa65bdf7f99c84381317cb4dbeb55
|
[
"Naumen",
"Condor-1.1",
"MS-PL"
] | 1
|
2022-03-16T02:41:38.000Z
|
2022-03-16T02:41:38.000Z
|
# Mikoo - UserBot
# Copyright (c) 2022 Mikoo-Userbot
# Credits: @divarvian || https://github.com/divarvian
#
# This file is a part of < https://github.com/divarvian/Mikoo-Userbot/ >
# t.me/MikooUserbot & t.me/MikooUserbot
from pytgcalls.types.input_stream import AudioPiped, AudioVideoPiped
from pytgcalls.types.input_stream.quality import (
HighQualityAudio,
HighQualityVideo,
LowQualityVideo,
MediumQualityVideo,
)
from userbot import LOGS, call_py
from userbot.core.vcbot.queues import QUEUE, clear_queue, get_queue, pop_an_item
async def skip_item(chat_id: int, x: int):
if chat_id not in QUEUE:
return 0
chat_queue = get_queue(chat_id)
try:
songname = chat_queue[x][0]
chat_queue.pop(x)
return songname
except Exception as e:
LOGS.info(str(e))
return 0
async def skip_current_song(chat_id: int):
if chat_id not in QUEUE:
return 0
chat_queue = get_queue(chat_id)
if len(chat_queue) == 1:
await call_py.leave_group_call(chat_id)
clear_queue(chat_id)
return 1
songname = chat_queue[1][0]
url = chat_queue[1][1]
link = chat_queue[1][2]
type = chat_queue[1][3]
RESOLUSI = chat_queue[1][4]
if type == "Audio":
await call_py.change_stream(
chat_id,
AudioPiped(
url,
HighQualityAudio(),
),
)
elif type == "Video":
if RESOLUSI == 720:
hm = HighQualityVideo()
elif RESOLUSI == 480:
hm = MediumQualityVideo()
elif RESOLUSI == 360:
hm = LowQualityVideo()
await call_py.change_stream(
chat_id, AudioVideoPiped(url, HighQualityAudio(), hm)
)
pop_an_item(chat_id)
return [songname, link, type]
| 27.208955
| 80
| 0.623149
| 0
| 0
| 0
| 0
| 0
| 0
| 1,266
| 0.69446
| 230
| 0.126166
|
4d4ffc6370941aaa5377e63ac70bab0e9216c2d5
| 5,614
|
py
|
Python
|
src/jaeger/jaeger.py
|
Novartis/JAEGER
|
9f9441d97bb956d88b73e2d24edb65322420c251
|
[
"Apache-2.0"
] | 9
|
2021-12-27T15:49:43.000Z
|
2022-03-29T10:17:55.000Z
|
src/jaeger/jaeger.py
|
Novartis/JAEGER
|
9f9441d97bb956d88b73e2d24edb65322420c251
|
[
"Apache-2.0"
] | null | null | null |
src/jaeger/jaeger.py
|
Novartis/JAEGER
|
9f9441d97bb956d88b73e2d24edb65322420c251
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2021 Novartis Institutes for BioMedical Research Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
NAME="JAEGER"
TITLE="**JAEGER**: JT-VAE Generative Modeling"
JAEGER_HOME="/path/to/models"
BASE_DIR=JAEGER_HOME+"/assays"
TRAINING_DIR=JAEGER_HOME+"/training_data"
AVAIL_MODELS=JAEGER_HOME+"/jaeger_avail_models.csv"
### JAEGER
import pandas as pd
import numpy as np
from sklearn.decomposition import PCA
import os
# --- RDKIT imports
import rdkit.Chem as Chem
import rdkit
# --- TORCH imports
import torch
# --- JTVAE imports
from jtnn import *
from jtnn.jtprop_vae import JTPropVAE
# --- TOXSQUAD imports
from toxsquad.data import modelling_data_from_csv
import sys
import os
PACKAGE_PARENT = '..'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
sys.path.append(os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT)))
# --- JAEGER imports
from jaeger.utils.jtvae_utils import compute_properties
from jaeger.utils.jtvae_utils import get_vocab
from jaeger.utils.jtvae_utils import get_neighbors_along_directions_tree_then_graph
from jaeger.utils.jtvae_utils import check_for_similarity
from jaeger.utils.jtvae_utils import check_for_similarity_to_collection_fp
# --- utils
import argparse
def str2bool(v):
if isinstance(v, bool):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("Boolean value expected.")
### HERE I HAVE MOSTLY STREAMLIT CACHED FUNCTIONS
#try:
import streamlit as st
@st.cache(
hash_funcs={
pd.DataFrame: lambda _: None,
},
suppress_st_warning=True,
persist=True,
)
def load_csv(csv_file, index_col):
df = pd.read_csv(csv_file, index_col = index_col)
return df
@st.cache(
hash_funcs={
rdkit.DataStructs.cDataStructs.UIntSparseIntVect: lambda _: None,
rdkit.Chem.rdchem.Mol: lambda _: None,
pd.DataFrame: lambda _: None,
},
suppress_st_warning=True,
persist=True,
)
def load_data(csv_file, filter_mols=True,
drop_qualified=False,
binary_fp=True, # because these morgans are not built for modelling, but for similarity checking
pac50=True,
convert_fps_to_numpy = False):
# ok, from now on (2020 April 23)
# we'll be using pAC50s
# st.write("Cache miss ... " + csv_file)
print("Cache miss ... " + csv_file)
print("FILTER MOLS " + str(filter_mols))
morgans_df, targets, toxdata = modelling_data_from_csv(csv_file,
binary_fp=binary_fp,
filter_mols=filter_mols,
drop_qualified =drop_qualified,
convert_to_pac50 = True,
convert_fps_to_numpy = convert_fps_to_numpy)
print("TOXDATA LEN " + str(len(toxdata)))
return morgans_df, targets, toxdata
@st.cache(
allow_output_mutation=True, hash_funcs={pd.DataFrame: lambda _: None},
)
def get_vocabulary(assay_dir, assay_id, toxdata):
print("getting vocab")
return get_vocab(assay_dir, assay_id, toxdata)
@st.cache(
hash_funcs={
torch.nn.parameter.Parameter: lambda _: None,
torch.Tensor: lambda _: None,
},
allow_output_mutation=True,
)
def get_model(vocab, model_params, device, infer_dir):
torch.manual_seed(777)
model = JTPropVAE(vocab, **model_params).to(device)
model.load_state_dict(torch.load(infer_dir + "/model-ref.iter-35"))
model = model.eval()
return model
@st.cache(allow_output_mutation=True, persist=True,)
def get_embeddings(embeddings_csv_file):
print("getting embeddings")
latent = pd.read_csv(embeddings_csv_file, index_col=0, engine="c")
return latent
@st.cache(allow_output_mutation=True, persist=True,)
def get_predictions(predictions_csv_file, convert_to_pac50):
print("getting predictions")
predictions = pd.read_csv(predictions_csv_file, index_col=0, engine="c")
if convert_to_pac50:
predictions = (predictions - 6) * -1 # also convert the ground truth?
return predictions
#@st.cache
def load_avail_models():
avail_models_file = AVAIL_MODELS
available_models = pd.read_csv(avail_models_file, index_col='assay_id')
return available_models
@st.cache
def compute_pca(embeddings):
latent_size = embeddings.shape[1]
reducer = PCA(n_components=latent_size)
crds_pca = reducer.fit_transform(embeddings)
var_explained = reducer.explained_variance_
var_explained_ratios = reducer.explained_variance_ratio_
var_ticks = np.arange(0, latent_size)
var_coords = np.array(list(zip(var_ticks, np.cumsum(var_explained_ratios))))
return reducer, crds_pca, var_coords, var_explained
#except:
# e = sys.exc_info()[0]
# print("Unexpected error")
# print(e)
| 31.539326
| 110
| 0.685073
| 0
| 0
| 0
| 0
| 3,208
| 0.571429
| 0
| 0
| 1,388
| 0.247239
|
4d50bed8c76e8e60cc01b8081cea63dca711f207
| 805
|
py
|
Python
|
test/test_vlan_group.py
|
nrfta/python-netbox-client
|
68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8
|
[
"MIT"
] | null | null | null |
test/test_vlan_group.py
|
nrfta/python-netbox-client
|
68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8
|
[
"MIT"
] | null | null | null |
test/test_vlan_group.py
|
nrfta/python-netbox-client
|
68ba6dd4d7306513dc1ad38f3ac59122ba4f70a8
|
[
"MIT"
] | null | null | null |
# coding: utf-8
"""
NetBox API
API to access NetBox # noqa: E501
OpenAPI spec version: 2.8
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import unittest
import netbox_client
from netbox_client.models.vlan_group import VLANGroup # noqa: E501
from netbox_client.rest import ApiException
class TestVLANGroup(unittest.TestCase):
"""VLANGroup unit test stubs"""
def setUp(self):
pass
def tearDown(self):
pass
def testVLANGroup(self):
"""Test VLANGroup"""
# FIXME: construct object with mandatory attributes with example values
# model = netbox_client.models.vlan_group.VLANGroup() # noqa: E501
pass
if __name__ == '__main__':
unittest.main()
| 19.634146
| 79
| 0.680745
| 376
| 0.467081
| 0
| 0
| 0
| 0
| 0
| 0
| 393
| 0.488199
|