hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4324db8515cebfc5a4b074a80e38c41a97a43e50 | 6,396 | py | Python | ratings/forms/widgets.py | Nuevosmedios/django-generic-ratings | 9e03f8a37ecb0c64e1f0cbba0a9df466126dff22 | [
"MIT"
] | null | null | null | ratings/forms/widgets.py | Nuevosmedios/django-generic-ratings | 9e03f8a37ecb0c64e1f0cbba0a9df466126dff22 | [
"MIT"
] | null | null | null | ratings/forms/widgets.py | Nuevosmedios/django-generic-ratings | 9e03f8a37ecb0c64e1f0cbba0a9df466126dff22 | [
"MIT"
] | null | null | null | from decimal import Decimal
from django import forms
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
class BaseWidget(forms.TextInput):
"""
Base widget. Do not use this directly.
"""
template = None
instance = None
class SliderWidget(BaseWidget):
"""
Slider widget.
In order to use this widget you must load the jQuery.ui slider
javascript.
This widget triggers the following javascript events:
- *slider_change* with the vote value as argument
(fired when the user changes his vote)
- *slider_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('slider_change', function(event, value) {
alert('New vote: ' + value);
});
"""
def __init__(self, min_value, max_value, step, instance=None,
can_delete_vote=True, key='', read_only=False, default='',
template='ratings/slider_widget.html', attrs=None):
"""
The argument *default* is used when the initial value is None.
"""
super(SliderWidget, self).__init__(attrs)
self.min_value = min_value
self.max_value = max_value
self.step = step
self.instance = instance
self.can_delete_vote = can_delete_vote
self.read_only = read_only
self.default = default
self.template = template
self.key = key
class StarWidget(BaseWidget):
"""
Starrating widget.
In order to use this widget you must download the
jQuery Star Rating Plugin available at
http://www.fyneworks.com/jquery/star-rating/#tab-Download
and then load the required javascripts and css, e.g.::
<link href="/path/to/jquery.rating.css" rel="stylesheet" type="text/css" />
<script type="text/javascript" src="/path/to/jquery.MetaData.js"></script>
<script type="text/javascript" src="/path/to/jquery.rating.js"></script>
This widget triggers the following javascript events:
- *star_change* with the vote value as argument
(fired when the user changes his vote)
- *star_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('star_change', function(event, value) {
alert('New vote: ' + value);
});
"""
| 36.971098 | 109 | 0.613508 | from decimal import Decimal
from django import forms
from django.template.loader import render_to_string
from django.template.defaultfilters import slugify
class BaseWidget(forms.TextInput):
"""
Base widget. Do not use this directly.
"""
template = None
instance = None
def get_parent_id(self, name, attrs):
final_attrs = self.build_attrs(attrs, type=self.input_type, name=name)
return final_attrs['id']
def get_widget_id(self, prefix, name, key=''):
if self.instance:
opts = self.instance._meta
widget_id = '%s-%s-%s_%s-%s' % (prefix, name, opts.app_label, opts.module_name, self.instance.pk)
else:
widget_id = '%s-%s' % (prefix, name)
if key:
widget_id = '%s_%s' % (widget_id, slugify(key))
return widget_id
def get_values(self, min_value, max_value, step=1):
decimal_step = Decimal(str(step))
value = Decimal(str(min_value))
while value <= max_value:
yield value
value += decimal_step
class SliderWidget(BaseWidget):
"""
Slider widget.
In order to use this widget you must load the jQuery.ui slider
javascript.
This widget triggers the following javascript events:
- *slider_change* with the vote value as argument
(fired when the user changes his vote)
- *slider_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('slider_change', function(event, value) {
alert('New vote: ' + value);
});
"""
def __init__(self, min_value, max_value, step, instance=None,
can_delete_vote=True, key='', read_only=False, default='',
template='ratings/slider_widget.html', attrs=None):
"""
The argument *default* is used when the initial value is None.
"""
super(SliderWidget, self).__init__(attrs)
self.min_value = min_value
self.max_value = max_value
self.step = step
self.instance = instance
self.can_delete_vote = can_delete_vote
self.read_only = read_only
self.default = default
self.template = template
self.key = key
def get_context(self, name, value, attrs=None):
# here we convert *min_value*, *max_value*, *step* and *value*
# to string to avoid odd behaviours of Django localization
# in the template (and, for backward compatibility we do not
# want to use the *unlocalize* filter)
attrs['type'] = 'hidden'
return {
'min_value': str(self.min_value),
'max_value': str(self.max_value),
'step': str(self.step),
'can_delete_vote': self.can_delete_vote,
'read_only': self.read_only,
'default': self.default,
'parent': super(SliderWidget, self).render(name, value, attrs),
'parent_id': self.get_parent_id(name, attrs),
'value': str(value),
'has_value': bool(value),
'slider_id': self.get_widget_id('slider', name, self.key),
'label_id': 'slider-label-%s' % name,
'remove_id': 'slider-remove-%s' % name,
}
def render(self, name, value, attrs=None):
context = self.get_context(name, value, attrs or {})
return render_to_string(self.template, context)
class StarWidget(BaseWidget):
"""
Starrating widget.
In order to use this widget you must download the
jQuery Star Rating Plugin available at
http://www.fyneworks.com/jquery/star-rating/#tab-Download
and then load the required javascripts and css, e.g.::
<link href="/path/to/jquery.rating.css" rel="stylesheet" type="text/css" />
<script type="text/javascript" src="/path/to/jquery.MetaData.js"></script>
<script type="text/javascript" src="/path/to/jquery.rating.js"></script>
This widget triggers the following javascript events:
- *star_change* with the vote value as argument
(fired when the user changes his vote)
- *star_delete* without arguments
(fired when the user deletes his vote)
It's easy to bind these events using jQuery, e.g.::
$(document).bind('star_change', function(event, value) {
alert('New vote: ' + value);
});
"""
def __init__(self, min_value, max_value, step, instance=None,
can_delete_vote=True, key='', read_only=False,
template='ratings/star_widget.html', attrs=None):
super(StarWidget, self).__init__(attrs)
self.min_value = min_value
self.max_value = max_value
self.step = step
self.instance = instance
self.can_delete_vote = can_delete_vote
self.read_only = read_only
self.template = template
self.key = key
def get_context(self, name, value, attrs=None):
# here we convert *min_value*, *max_value* and *step*
# to string to avoid odd behaviours of Django localization
# in the template (and, for backward compatibility we do not
# want to use the *unlocalize* filter)
attrs['type'] = 'hidden'
split_value = int(1 / self.step)
if split_value == 1:
values = range(1, self.max_value+1)
split = u''
else:
values = self.get_values(self.min_value, self.max_value, self.step)
split = u' {split:%d}' % split_value
return {
'min_value': str(self.min_value),
'max_value': str(self.max_value),
'step': str(self.step),
'can_delete_vote': self.can_delete_vote,
'read_only': self.read_only,
'values': values,
'split': split,
'parent': super(StarWidget, self).render(name, value, attrs),
'parent_id': self.get_parent_id(name, attrs),
'value': self._get_value(value, split_value),
'star_id': self.get_widget_id('star', name, self.key),
}
def _get_value(self, original, split):
if original:
value = round(original * split) / split
return Decimal(str(value))
def render(self, name, value, attrs=None):
context = self.get_context(name, value, attrs or {})
return render_to_string(self.template, context)
| 3,698 | 0 | 242 |
4861ebbaa0bdbe504fad249efcbe04c8dded3b16 | 3,882 | py | Python | a1-supervised-learning/decision_tree.py | changrybirds/ml-2019fall | 1097530cdfa378c3c69d0ff5a3f95f9d8959cce4 | [
"MIT"
] | null | null | null | a1-supervised-learning/decision_tree.py | changrybirds/ml-2019fall | 1097530cdfa378c3c69d0ff5a3f95f9d8959cce4 | [
"MIT"
] | null | null | null | a1-supervised-learning/decision_tree.py | changrybirds/ml-2019fall | 1097530cdfa378c3c69d0ff5a3f95f9d8959cce4 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import dataset_processing as data_proc
if __name__ == "__main__":
abalone(verbose=False, show_plots=False)
online_shopping(verbose=False, show_plots=False)
| 31.306452 | 100 | 0.679547 | import numpy as np
import pandas as pd
from sklearn.tree import DecisionTreeClassifier
from sklearn.model_selection import cross_val_score
import matplotlib.pyplot as plt
import dataset_processing as data_proc
def model_complexity_curve(X_train, y_train, hp, hp_vals, cv=None):
df = pd.DataFrame(index=hp_vals, columns=['train', 'cv'])
for hp_val in hp_vals:
kwargs = {
hp: hp_val,
'random_state': data_proc.SEED_VAL}
dtclf = DecisionTreeClassifier(**kwargs)
# train data
dtclf.fit(X_train, y_train)
train_score = dtclf.score(X_train, y_train)
# get cv scores
cross_vals = cross_val_score(dtclf, X_train, y_train, cv=cv)
cv_mean = np.mean(cross_vals)
df.loc[hp_val, 'train'] = train_score
df.loc[hp_val, 'cv'] = cv_mean
return pd.DataFrame(df, dtype='float')
def run_experiment(dataset_name, X_train, X_test, y_train, y_test, verbose=False, show_plots=False):
# calculate model complexity scores for max_depth
hp = 'max_depth'
hp_vals = np.arange(3, 20) # this should vary for each hyperparameter
max_depth_mc = model_complexity_curve(
X_train, y_train, hp, hp_vals, cv=data_proc.CV_VAL)
max_depth_hp = max_depth_mc['cv'].idxmax()
if verbose:
print(max_depth_mc.head(10))
if verbose:
print(max_depth_mc.idxmax())
data_proc.plot_model_complexity_charts(
max_depth_mc['train'], max_depth_mc['cv'],
dataset_name + ': MCC for ' + hp, hp)
if show_plots:
plt.show()
plt.savefig('graphs/dt_mcc_' + hp + '_' + dataset_name + '.png')
plt.clf()
plt.close()
# calculate model complexity scores for max_features
hp = 'max_features'
hp_vals = np.arange(1, X_train.shape[1]) # this should vary for each hyperparameter
max_features_mc = model_complexity_curve(
X_train, y_train, hp, hp_vals, cv=data_proc.CV_VAL)
max_features_hp = max_features_mc['cv'].idxmax()
if verbose:
print(max_features_mc.head(10))
if verbose:
print(max_features_mc.idxmax())
data_proc.plot_model_complexity_charts(
max_features_mc['train'], max_features_mc['cv'],
dataset_name + ': MCC for ' + hp, hp)
if show_plots:
plt.show()
plt.savefig('graphs/dt_mcc_' + hp + '_' + dataset_name + '.png')
plt.clf()
plt.close()
# instantiate decision tree
dtclf = DecisionTreeClassifier(
max_depth=max_depth_hp, max_features=max_features_hp,
random_state=data_proc.SEED_VAL)
# calculate and print learning curves
train_sizes = np.linspace(0.1, 0.9, 9)
data_proc.plot_learning_curve(
dtclf, dataset_name + ': learning curves',
X_train, y_train, cv=data_proc.CV_VAL, train_sizes=train_sizes)
if show_plots:
plt.show()
plt.savefig('graphs/dt_lc_' + dataset_name + '.png')
plt.clf()
plt.close()
train_score = data_proc.model_train_score(dtclf, X_train, y_train)
test_score = data_proc.model_test_score(dtclf, X_test, y_test)
print("DTClassifier training set score for " + dataset_name + ": ", train_score)
print("DTClassifier holdout set score for " + dataset_name + ": ", test_score)
def abalone(verbose=False, show_plots=False):
X_train, X_test, y_train, y_test = data_proc.process_abalone()
run_experiment(
'abalone', X_train, X_test, y_train, y_test,
verbose=verbose, show_plots=show_plots)
def online_shopping(verbose=False, show_plots=False):
X_train, X_test, y_train, y_test = data_proc.process_online_shopping()
run_experiment(
'online_shopping', X_train, X_test, y_train, y_test,
verbose=verbose, show_plots=show_plots)
if __name__ == "__main__":
abalone(verbose=False, show_plots=False)
online_shopping(verbose=False, show_plots=False)
| 3,448 | 0 | 92 |
bc6fe1c7e25e8e6fad2daceba284a8044930c8c3 | 7,347 | py | Python | pysnmp/MY-PROCESS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/MY-PROCESS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/MY-PROCESS-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module MY-PROCESS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MY-PROCESS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:06:38 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
myMgmt, = mibBuilder.importSymbols("MY-SMI", "myMgmt")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Counter32, ObjectIdentity, MibIdentifier, TimeTicks, Bits, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32, Counter64, NotificationType, Unsigned32, IpAddress, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "ObjectIdentity", "MibIdentifier", "TimeTicks", "Bits", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32", "Counter64", "NotificationType", "Unsigned32", "IpAddress", "iso")
TruthValue, TextualConvention, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "RowStatus", "DisplayString")
myProcessMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36))
myProcessMIB.setRevisions(('2003-10-14 00:00',))
if mibBuilder.loadTexts: myProcessMIB.setLastUpdated('200310140000Z')
if mibBuilder.loadTexts: myProcessMIB.setOrganization('D-Link Crop.')
myCPUMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1))
myCpuGeneralMibsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1))
myCPUUtilization5Sec = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 1), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myCPUUtilization5Sec.setStatus('current')
myCPUUtilization1Min = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 2), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myCPUUtilization1Min.setStatus('current')
myCPUUtilization5Min = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 3), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myCPUUtilization5Min.setStatus('current')
myCPUUtilizationWarning = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 4), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myCPUUtilizationWarning.setStatus('current')
myCPUUtilizationCritical = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 5), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myCPUUtilizationCritical.setStatus('current')
myNodeCPUTotalTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2), )
if mibBuilder.loadTexts: myNodeCPUTotalTable.setStatus('current')
myNodeCPUTotalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1), ).setIndexNames((0, "MY-PROCESS-MIB", "myNodeCPUTotalIndex"))
if mibBuilder.loadTexts: myNodeCPUTotalEntry.setStatus('current')
myNodeCPUTotalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotalIndex.setStatus('current')
myNodeCPUTotalName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotalName.setStatus('current')
myNodeCPUTotal5sec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 3), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotal5sec.setStatus('current')
myNodeCPUTotal1min = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 4), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotal1min.setStatus('current')
myNodeCPUTotal5min = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 5), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotal5min.setStatus('current')
myNodeCPUTotalWarning = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 6), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myNodeCPUTotalWarning.setStatus('current')
myNodeCPUTotalCritical = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 7), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myNodeCPUTotalCritical.setStatus('current')
myProcessMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2))
myProcessMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 1))
myProcessMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 2))
myProcessMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 1, 1)).setObjects(("MY-PROCESS-MIB", "myCPUUtilizationMIBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
myProcessMIBCompliance = myProcessMIBCompliance.setStatus('current')
myCPUUtilizationMIBGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 2, 1)).setObjects(("MY-PROCESS-MIB", "myCPUUtilization5Sec"), ("MY-PROCESS-MIB", "myCPUUtilization1Min"), ("MY-PROCESS-MIB", "myCPUUtilization5Min"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
myCPUUtilizationMIBGroup = myCPUUtilizationMIBGroup.setStatus('current')
myNodeCPUTotalGroups = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 2, 2)).setObjects(("MY-PROCESS-MIB", "myNodeCPUTotalIndex"), ("MY-PROCESS-MIB", "myNodeCPUTotalName"), ("MY-PROCESS-MIB", "myNodeCPUTotal5sec"), ("MY-PROCESS-MIB", "myNodeCPUTotal1min"), ("MY-PROCESS-MIB", "myNodeCPUTotal5min"), ("MY-PROCESS-MIB", "myNodeCPUTotalWarning"), ("MY-PROCESS-MIB", "myNodeCPUTotalCritical"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
myNodeCPUTotalGroups = myNodeCPUTotalGroups.setStatus('current')
mibBuilder.exportSymbols("MY-PROCESS-MIB", myCPUMIBObjects=myCPUMIBObjects, myCPUUtilizationWarning=myCPUUtilizationWarning, myProcessMIBCompliances=myProcessMIBCompliances, myCPUUtilization5Sec=myCPUUtilization5Sec, Percent=Percent, myNodeCPUTotalEntry=myNodeCPUTotalEntry, myNodeCPUTotal5min=myNodeCPUTotal5min, myNodeCPUTotal5sec=myNodeCPUTotal5sec, myCpuGeneralMibsGroup=myCpuGeneralMibsGroup, myNodeCPUTotalCritical=myNodeCPUTotalCritical, myCPUUtilizationCritical=myCPUUtilizationCritical, myNodeCPUTotalWarning=myNodeCPUTotalWarning, myProcessMIBConformance=myProcessMIBConformance, myCPUUtilization1Min=myCPUUtilization1Min, myCPUUtilization5Min=myCPUUtilization5Min, PYSNMP_MODULE_ID=myProcessMIB, myNodeCPUTotalTable=myNodeCPUTotalTable, myProcessMIBCompliance=myProcessMIBCompliance, myNodeCPUTotalGroups=myNodeCPUTotalGroups, myNodeCPUTotalIndex=myNodeCPUTotalIndex, myProcessMIB=myProcessMIB, myProcessMIBGroups=myProcessMIBGroups, myCPUUtilizationMIBGroup=myCPUUtilizationMIBGroup, myNodeCPUTotalName=myNodeCPUTotalName, myNodeCPUTotal1min=myNodeCPUTotal1min)
| 109.656716 | 1,073 | 0.755138 | #
# PySNMP MIB module MY-PROCESS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/MY-PROCESS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:06:38 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, ObjectIdentifier, OctetString = mibBuilder.importSymbols("ASN1", "Integer", "ObjectIdentifier", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, SingleValueConstraint, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "SingleValueConstraint", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint")
myMgmt, = mibBuilder.importSymbols("MY-SMI", "myMgmt")
ObjectGroup, ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ObjectGroup", "ModuleCompliance", "NotificationGroup")
Counter32, ObjectIdentity, MibIdentifier, TimeTicks, Bits, ModuleIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Gauge32, Integer32, Counter64, NotificationType, Unsigned32, IpAddress, iso = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "ObjectIdentity", "MibIdentifier", "TimeTicks", "Bits", "ModuleIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Gauge32", "Integer32", "Counter64", "NotificationType", "Unsigned32", "IpAddress", "iso")
TruthValue, TextualConvention, RowStatus, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TruthValue", "TextualConvention", "RowStatus", "DisplayString")
myProcessMIB = ModuleIdentity((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36))
myProcessMIB.setRevisions(('2003-10-14 00:00',))
if mibBuilder.loadTexts: myProcessMIB.setLastUpdated('200310140000Z')
if mibBuilder.loadTexts: myProcessMIB.setOrganization('D-Link Crop.')
class Percent(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 100)
myCPUMIBObjects = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1))
myCpuGeneralMibsGroup = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1))
myCPUUtilization5Sec = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 1), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myCPUUtilization5Sec.setStatus('current')
myCPUUtilization1Min = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 2), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myCPUUtilization1Min.setStatus('current')
myCPUUtilization5Min = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 3), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myCPUUtilization5Min.setStatus('current')
myCPUUtilizationWarning = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 4), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myCPUUtilizationWarning.setStatus('current')
myCPUUtilizationCritical = MibScalar((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 1, 5), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myCPUUtilizationCritical.setStatus('current')
myNodeCPUTotalTable = MibTable((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2), )
if mibBuilder.loadTexts: myNodeCPUTotalTable.setStatus('current')
myNodeCPUTotalEntry = MibTableRow((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1), ).setIndexNames((0, "MY-PROCESS-MIB", "myNodeCPUTotalIndex"))
if mibBuilder.loadTexts: myNodeCPUTotalEntry.setStatus('current')
myNodeCPUTotalIndex = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 1), Integer32()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotalIndex.setStatus('current')
myNodeCPUTotalName = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 2), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotalName.setStatus('current')
myNodeCPUTotal5sec = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 3), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotal5sec.setStatus('current')
myNodeCPUTotal1min = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 4), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotal1min.setStatus('current')
myNodeCPUTotal5min = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 5), Percent()).setMaxAccess("readonly")
if mibBuilder.loadTexts: myNodeCPUTotal5min.setStatus('current')
myNodeCPUTotalWarning = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 6), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myNodeCPUTotalWarning.setStatus('current')
myNodeCPUTotalCritical = MibTableColumn((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 1, 2, 1, 7), Percent()).setMaxAccess("readwrite")
if mibBuilder.loadTexts: myNodeCPUTotalCritical.setStatus('current')
myProcessMIBConformance = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2))
myProcessMIBCompliances = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 1))
myProcessMIBGroups = MibIdentifier((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 2))
myProcessMIBCompliance = ModuleCompliance((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 1, 1)).setObjects(("MY-PROCESS-MIB", "myCPUUtilizationMIBGroup"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
myProcessMIBCompliance = myProcessMIBCompliance.setStatus('current')
myCPUUtilizationMIBGroup = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 2, 1)).setObjects(("MY-PROCESS-MIB", "myCPUUtilization5Sec"), ("MY-PROCESS-MIB", "myCPUUtilization1Min"), ("MY-PROCESS-MIB", "myCPUUtilization5Min"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
myCPUUtilizationMIBGroup = myCPUUtilizationMIBGroup.setStatus('current')
myNodeCPUTotalGroups = ObjectGroup((1, 3, 6, 1, 4, 1, 171, 10, 97, 2, 36, 2, 2, 2)).setObjects(("MY-PROCESS-MIB", "myNodeCPUTotalIndex"), ("MY-PROCESS-MIB", "myNodeCPUTotalName"), ("MY-PROCESS-MIB", "myNodeCPUTotal5sec"), ("MY-PROCESS-MIB", "myNodeCPUTotal1min"), ("MY-PROCESS-MIB", "myNodeCPUTotal5min"), ("MY-PROCESS-MIB", "myNodeCPUTotalWarning"), ("MY-PROCESS-MIB", "myNodeCPUTotalCritical"))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
myNodeCPUTotalGroups = myNodeCPUTotalGroups.setStatus('current')
mibBuilder.exportSymbols("MY-PROCESS-MIB", myCPUMIBObjects=myCPUMIBObjects, myCPUUtilizationWarning=myCPUUtilizationWarning, myProcessMIBCompliances=myProcessMIBCompliances, myCPUUtilization5Sec=myCPUUtilization5Sec, Percent=Percent, myNodeCPUTotalEntry=myNodeCPUTotalEntry, myNodeCPUTotal5min=myNodeCPUTotal5min, myNodeCPUTotal5sec=myNodeCPUTotal5sec, myCpuGeneralMibsGroup=myCpuGeneralMibsGroup, myNodeCPUTotalCritical=myNodeCPUTotalCritical, myCPUUtilizationCritical=myCPUUtilizationCritical, myNodeCPUTotalWarning=myNodeCPUTotalWarning, myProcessMIBConformance=myProcessMIBConformance, myCPUUtilization1Min=myCPUUtilization1Min, myCPUUtilization5Min=myCPUUtilization5Min, PYSNMP_MODULE_ID=myProcessMIB, myNodeCPUTotalTable=myNodeCPUTotalTable, myProcessMIBCompliance=myProcessMIBCompliance, myNodeCPUTotalGroups=myNodeCPUTotalGroups, myNodeCPUTotalIndex=myNodeCPUTotalIndex, myProcessMIB=myProcessMIB, myProcessMIBGroups=myProcessMIBGroups, myCPUUtilizationMIBGroup=myCPUUtilizationMIBGroup, myNodeCPUTotalName=myNodeCPUTotalName, myNodeCPUTotal1min=myNodeCPUTotal1min)
| 0 | 117 | 22 |
8f5f8c1e792333da6b38cc2cd4606e00cf00f23c | 19,153 | py | Python | openforcefield/typing/engines/smirnoff/forcefield_utils.py | pschmidtke/openforcefield | d0868c78a8dff9df6750c5212fa918d7dc0e7bd8 | [
"MIT"
] | null | null | null | openforcefield/typing/engines/smirnoff/forcefield_utils.py | pschmidtke/openforcefield | d0868c78a8dff9df6750c5212fa918d7dc0e7bd8 | [
"MIT"
] | null | null | null | openforcefield/typing/engines/smirnoff/forcefield_utils.py | pschmidtke/openforcefield | d0868c78a8dff9df6750c5212fa918d7dc0e7bd8 | [
"MIT"
] | null | null | null | #!/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
forcefield_utils.py
Utilities relating to OpenMM ForceField replacement using SMIRKS-based matching.
AUTHORS
David L. Mobley <dmobley@mobleylab.org>
Based loosely on code from github.com/choderalab/openmoltools, and especially
parts from John Chodera and Kyle Beauchamp.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os
from openforcefield.typing.engines.smirnoff import ForceField
from openforcefield.utils import get_data_filename
import simtk.openmm
from simtk.openmm import app
import simtk.openmm as mm
from simtk.openmm.app import element as elem
from simtk.openmm.app import Topology
import numpy as np
from openmoltools import system_checker
import copy
import openeye.oechem
import openeye.oeomega
import openeye.oequacpac
from openeye import oechem
from simtk import openmm, unit
import parmed
#=============================================================================
# UTILITY FUNCTIONS
#=============================================================================
def create_system_from_amber( prmtop_filename, crd_filename, verbose = False ):
"""Utility function. Create and return an OpenMM System given a prmtop and
crd, AMBER format files.
Parameters
----------
prmtop_filename : str (filename)
Filename of input AMBER format prmtop file
crd_filename : str (filename)
Filename of input AMBER format crd file
Returns
_______
topology : OpenMM Topology
system : OpenMM System
positions : initial atomic positions (OpenMM)
"""
# Create System object
prmtop = app.AmberPrmtopFile(prmtop_filename)
topology = prmtop.topology
system = prmtop.createSystem(nonbondedMethod = app.NoCutoff, constraints = None, implicitSolvent = None )
# Read coordinates
crd = app.AmberInpcrdFile( crd_filename )
positions = crd.getPositions()
return (topology, system, positions)
def create_system_from_molecule(forcefield, mol, verbose=False):
"""
Generate a System from the given OEMol and SMIRNOFF forcefield, return the resulting System.
Parameters
----------
forcefield : ForceField
SMIRNOFF forcefield
mol : oechem.OEMol
Molecule to test (must have coordinates)
Returns
----------
topology : OpenMM Topology
system : OpenMM System
positions : initial atomic positions (OpenMM)
"""
# Create system
from openforcefield.utils import generateTopologyFromOEMol
topology = generateTopologyFromOEMol(mol)
system = forcefield.createSystem(topology, [mol], verbose=verbose)
# Get positions
coordinates = mol.GetCoords()
natoms = len(coordinates)
positions = np.zeros([natoms,3], np.float32)
for index in range(natoms):
(x,y,z) = coordinates[index]
positions[index,0] = x
positions[index,1] = y
positions[index,2] = z
positions = unit.Quantity(positions, unit.angstroms)
return topology, system, positions
def compare_system_energies( topology0, topology1, system0, system1, positions0, positions1=None, label0="AMBER system", label1 = "SMIRNOFF system", verbose = True, skip_assert = False, skip_improper = False ):
"""
Given two OpenMM systems, check that their energies and component-wise
energies are consistent, and return these. The same positions will be used
for both systems unless a second set of positions is provided.
Parameters
----------
topology0 : OpenMM Topology
Topology of first system
topology1 : OpenMM Topology
Topology of second system
system0 : OpenMM System
First system for comparison (usually from AMBER)
system1 : OpenMM System
Second system for comparison (usually from SMIRNOFF)
positions0 : simtk.unit.Quantity wrapped
Positions to use for energy evaluation comparison
positions1 (optional) : simtk.unit.Quantity wrapped (optional)
Positions to use for second OpenMM system; original positions are used
if this is not provided
label0 (optional) : str
String labeling system0 for output. Default, "AMBER system"
label1 (optional) : str
String labeling system1 for output. Default, "SMIRNOFF system"
verbose (optional) : bool
Print out info on energies, True/False (default True)
skip_assert (optional) : bool
Skip assertion that energies must be equal within specified tolerance. Default False.
skip_improper (optional) : bool
Skip detailed checking of force terms on impropers (helpful here if comparing with AMBER force fields using different definitions of impropers.) Default False.
Returns
----------
groups0 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the first simulation object
groups1 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the second simulation object
energy0 : simtk.unit.Quantity
Energy of first system
energy1 : simtk.unit.Quantity
Energy of second system
TO DO:
Allow energy extraction/comparison of terms specified by particular
SMARTS queries i.e. for specific bond, angle, or torsional terms.
"""
# Create integrator
timestep = 1.0 * unit.femtoseconds
integrator0 = simtk.openmm.VerletIntegrator( timestep )
integrator1 = simtk.openmm.VerletIntegrator( timestep )
# Grab second positions
if positions1 == None:
positions1 = copy.deepcopy( positions0 )
# Create simulations
platform = simtk.openmm.Platform.getPlatformByName("Reference")
simulation0 = app.Simulation( topology0, system0, integrator0, platform = platform )
simulation0.context.setPositions(positions0)
simulation1 = app.Simulation( topology1, system1, integrator1, platform = platform )
simulation1.context.setPositions(positions1)
# Print what torsions were found if verbose
if verbose:
# Build list of atoms for debugging info
atoms0 = [ atom for atom in simulation0.topology.atoms() ]
atoms1 = [ atom for atom in simulation1.topology.atoms() ]
# Loop over first system and print torsion info
for force in simulation0.system.getForces():
if type(force) == mm.PeriodicTorsionForce:
print("Num (type) \t Num (type) \t Num (type) \t Num (type) \t per \t phase \t k0")
for k in range(force.getNumTorsions()):
i0, i1, i2, i3, per, phase, k0 = force.getTorsionParameters(k)
print("%3s (%3s)- %3s (%3s)- \t %s (%3s)- \t %3s (%3s)- \t %f \t %f \t %f " % (i0, atoms0[i0].name, i1, atoms0[i1].name, i2, atoms0[i2].name, i3, atoms0[i3].name, per, phase/unit.degree, k0/unit.kilojoule_per_mole) )
for force in simulation1.system.getForces():
if type(force) == mm.PeriodicTorsionForce:
print("Num (type) \t Num (type) \t Num (type) \t Num (type) \t per \t phase \t k0")
for k in range(force.getNumTorsions()):
i0, i1, i2, i3, per, phase, k0 = force.getTorsionParameters(k)
print("%3s (%3s)- %3s (%3s)- %3s (%3s)- %3s (%3s) - %f \t %f \t %f " % (i0, atoms1[i0].name, i1, atoms1[i1].name, i2, atoms1[i2].name, i3, atoms1[i3].name, per, phase/unit.degree, k0/unit.kilojoule_per_mole) )
# Do energy comparison, print info if desired
syscheck = system_checker.SystemChecker( simulation0, simulation1 )
if not skip_assert:
# Only check force terms if we want to make sure energies are identical
syscheck.check_force_parameters(skipImpropers = skip_improper)
groups0, groups1 = syscheck.check_energy_groups(skip_assert = skip_assert)
energy0, energy1 = syscheck.check_energies(skip_assert = skip_assert)
if verbose:
print("Energy of %s: " % label0, energy0 )
print("Energy of %s: " % label1, energy1 )
print("\nComponents of %s:" % label0 )
for key in groups0.keys():
print("%s: " % key, groups0[key] )
print("\nComponents of %s:" % label1 )
for key in groups1.keys():
print("%s: " % key, groups1[key] )
# Return
return groups0, groups1, energy0, energy1
def compare_molecule_energies( prmtop, crd, forcefield, mol, verbose = True, skip_assert=False, skip_improper = False):
"""
Compare energies for OpenMM Systems/topologies created from an AMBER prmtop
and crd versus from a SMIRNOFF forcefield file and OEMol which should
parameterize the same system with same parameters.
Parameters
----------
prmtop_filename : str (filename)
Filename of input AMBER format prmtop file
crd_filename : str (filename)
Filename of input AMBER format crd file
forcefield : ForceField
SMIRNOFF forcefield
mol : oechem.OEMol
Molecule to test
verbose (optional): Bool
Print out info. Default: True
skip_assert : bool
Skip assertion that energies must be equal within tolerance. Default, False.
skip_improper (optional) : bool
Skip detailed checking of force terms on impropers (helpful here if comparing with AMBER force fields using different definitions of impropers.) Default False.
Returns
--------
groups0 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the first simulation object
groups1 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the second simulation object
energy0 : simtk.unit.Quantity
Energy of first system
energy1 : simtk.unit.Quantity
Energy of second system
"""
ambertop, ambersys, amberpos = create_system_from_amber( prmtop, crd )
smirfftop, smirffsys, smirffpos = create_system_from_molecule(forcefield, mol, verbose = verbose)
groups0, groups1, energy0, energy1 = compare_system_energies( ambertop,
smirfftop, ambersys, smirffsys, amberpos, verbose = verbose, skip_assert = skip_assert, skip_improper = skip_improper )
return groups0, groups1, energy0, energy1
def get_molecule_parameterIDs( oemols, ffxml):
"""Process a list of oemols with a specified SMIRNOFF ffxml file and determine which parameters are used by which molecules, returning collated results.
Parameters
----------
oemols : list
List of OpenEye OEChem molecules to parse; must have explicit hydrogens.
Returns
-------
parameters_by_molecule : dict
Parameter IDs used in each molecule, keyed by isomeric SMILES
generated from provided OEMols. Each entry in the dict is a list
which does not necessarily have unique entries; i.e. parameter IDs
which are used more than once will occur multiple times.
parameters_by_ID : dict
Molecules in which each parameter ID occur, keyed by parameter ID.
Each entry in the dict is a set of isomeric SMILES for molecules
in which that parameter occurs. No frequency information is stored.
"""
# Create storage
parameters_by_molecule = {}
parameters_by_ID = {}
# Generate isomeric SMILES
isosmiles = list()
for mol in oemols:
smi = oechem.OECreateIsoSmiString(mol)
if not smi in isosmiles:
isosmiles.append(smi)
# If the molecule is already here, raise exception
else:
raise ValueError("Error: get_molecule_parameterIDs has been provided a list of oemols which contains the same molecule, having isomeric smiles %s, more than once." % smi )
# Label molecules
ff = ForceField( ffxml )
labels = ff.labelMolecules( oemols )
# Organize labels into output dictionary by looping over all molecules/smiles
for idx in range(len(isosmiles)):
# Pull smiles, initialize storage
smi = isosmiles[idx]
parameters_by_molecule[smi] = []
# Organize data for this molecule
data = labels[idx]
for force_type in data.keys():
for (atom_indices, pid, smirks) in data[force_type]:
# Store pid to molecule
parameters_by_molecule[smi].append(pid)
# Store which molecule this pid occurred in
if pid not in parameters_by_ID:
parameters_by_ID[pid] = set()
parameters_by_ID[pid].add(smi)
else:
parameters_by_ID[pid].add(smi)
return parameters_by_molecule, parameters_by_ID
def getMolParamIDToAtomIndex( oemol, ff):
"""Take an OEMol and a SMIRNOFF forcefield object and return a dictionary, keyed by parameter ID, where each entry is a tuple of ( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS corresponding to that parameter ID and a list of the atom groups in that molecule that parameter is applied to.
Parameters
----------
oemol : OEMol
OpenEye OEMol with the molecule to investigate.
ff : ForceField
SMIRNOFF ForceField object (obtained from an ffxml via ForceField(ffxml)) containing FF of interest.
Returns
-------
param_usage : dictionary
Dictionary, keyed by parameter ID, where each entry is a tuple of ( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS corresponding to that parameter ID and a list of the atom groups in that molecule that parameter is applied to.
"""
labels = ff.labelMolecules([oemol])
param_usage = {}
for mol_entry in range(len(labels)):
for force in labels[mol_entry].keys():
for (atom_indices, pid, smirks) in labels[mol_entry][force]:
if not pid in param_usage:
param_usage[pid] = (smirks, [atom_indices])
else:
param_usage[pid][1].append( atom_indices )
return param_usage
def merge_system( topology0, topology1, system0, system1, positions0, positions1, label0="AMBER system", label1 = "SMIRNOFF system", verbose = True):
"""Merge two given OpenMM systems. Returns the merged OpenMM System.
Parameters
----------
topology0 : OpenMM Topology
Topology of first system (i.e. a protein)
topology1 : OpenMM Topology
Topology of second system (i.e. a ligand)
system0 : OpenMM System
First system for merging (usually from AMBER)
system1 : OpenMM System
Second system for merging (usually from SMIRNOFF)
positions0 : simtk.unit.Quantity wrapped
Positions to use for energy evaluation comparison
positions1 (optional) : simtk.unit.Quantity wrapped (optional)
Positions to use for second OpenMM system
label0 (optional) : str
String labeling system0 for output. Default, "AMBER system"
label1 (optional) : str
String labeling system1 for output. Default, "SMIRNOFF system"
verbose (optional) : bool
Print out info on topologies, True/False (default True)
Returns
----------
topology : OpenMM Topology
system : OpenMM System
positions: unit.Quantity position array
"""
#Load OpenMM Systems to ParmEd Structures
structure0 = parmed.openmm.load_topology( topology0, system0 )
structure1 = parmed.openmm.load_topology( topology1, system1 )
#Merge parameterized Structure
structure = structure0 + structure1
topology = structure.topology
#Concatenate positions arrays
positions_unit = unit.angstroms
positions0_dimensionless = np.array( positions0 / positions_unit )
positions1_dimensionless = np.array( positions1 / positions_unit )
coordinates = np.vstack((positions0_dimensionless,positions1_dimensionless))
natoms = len(coordinates)
positions = np.zeros([natoms,3], np.float32)
for index in range(natoms):
(x,y,z) = coordinates[index]
positions[index,0] = x
positions[index,1] = y
positions[index,2] = z
positions = unit.Quantity(positions, positions_unit)
#Generate merged OpenMM system
system = structure.createSystem()
if verbose:
print("Generating ParmEd Structures...\n \t{}: {}\n \t{}: {}\n".format(label0, structure0, label1, structure1))
print("Merged ParmEd Structure: {}".format( structure ))
return topology, system, positions
def save_system_to_amber( topology, system, positions, prmtop, crd ):
"""Save an OpenMM System, with provided topology and positions, to AMBER prmtop and coordinate files.
Parameters
----------
topology : OpenMM Topology
Topology of the system to be saved, perhaps as loaded from a PDB file or similar.
system : OpenMM System
Parameterized System to be saved, containing components represented by Topology
positions : unit.Quantity position array
Position array containing positions of atoms in topology/system
prmtop : filename
AMBER parameter file name to write
crd : filename
AMBER coordinate file name (ASCII crd format) to write
"""
structure = parmed.openmm.topsystem.load_topology( topology, system, positions )
structure.save( prmtop, overwrite = True, format="amber" )
structure.save( crd, format='rst7', overwrite = True)
def save_system_to_gromacs( topology, system, positions, top, gro ):
"""Save an OpenMM System, with provided topology and positions, to AMBER prmtop and coordinate files.
Parameters
----------
topology : OpenMM Topology
Topology of the system to be saved, perhaps as loaded from a PDB file or similar.
system : OpenMM System
Parameterized System to be saved, containing components represented by Topology
positions : unit.Quantity position array
Position array containing positions of atoms in topology/system
top : filename
GROMACS topology file name to write
gro : filename
GROMACS coordinate file name (.gro format) to write
"""
structure = parmed.openmm.topsystem.load_topology( topology, system, positions )
structure.save( top, overwrite = True, format="gromacs")
structure.save( gro, overwrite = True, format="gro")
| 40.407173 | 313 | 0.661567 | #!/bin/env python
#=============================================================================================
# MODULE DOCSTRING
#=============================================================================================
"""
forcefield_utils.py
Utilities relating to OpenMM ForceField replacement using SMIRKS-based matching.
AUTHORS
David L. Mobley <dmobley@mobleylab.org>
Based loosely on code from github.com/choderalab/openmoltools, and especially
parts from John Chodera and Kyle Beauchamp.
"""
#=============================================================================================
# GLOBAL IMPORTS
#=============================================================================================
import os
from openforcefield.typing.engines.smirnoff import ForceField
from openforcefield.utils import get_data_filename
import simtk.openmm
from simtk.openmm import app
import simtk.openmm as mm
from simtk.openmm.app import element as elem
from simtk.openmm.app import Topology
import numpy as np
from openmoltools import system_checker
import copy
import openeye.oechem
import openeye.oeomega
import openeye.oequacpac
from openeye import oechem
from simtk import openmm, unit
import parmed
#=============================================================================
# UTILITY FUNCTIONS
#=============================================================================
def create_system_from_amber( prmtop_filename, crd_filename, verbose = False ):
"""Utility function. Create and return an OpenMM System given a prmtop and
crd, AMBER format files.
Parameters
----------
prmtop_filename : str (filename)
Filename of input AMBER format prmtop file
crd_filename : str (filename)
Filename of input AMBER format crd file
Returns
_______
topology : OpenMM Topology
system : OpenMM System
positions : initial atomic positions (OpenMM)
"""
# Create System object
prmtop = app.AmberPrmtopFile(prmtop_filename)
topology = prmtop.topology
system = prmtop.createSystem(nonbondedMethod = app.NoCutoff, constraints = None, implicitSolvent = None )
# Read coordinates
crd = app.AmberInpcrdFile( crd_filename )
positions = crd.getPositions()
return (topology, system, positions)
def create_system_from_molecule(forcefield, mol, verbose=False):
"""
Generate a System from the given OEMol and SMIRNOFF forcefield, return the resulting System.
Parameters
----------
forcefield : ForceField
SMIRNOFF forcefield
mol : oechem.OEMol
Molecule to test (must have coordinates)
Returns
----------
topology : OpenMM Topology
system : OpenMM System
positions : initial atomic positions (OpenMM)
"""
# Create system
from openforcefield.utils import generateTopologyFromOEMol
topology = generateTopologyFromOEMol(mol)
system = forcefield.createSystem(topology, [mol], verbose=verbose)
# Get positions
coordinates = mol.GetCoords()
natoms = len(coordinates)
positions = np.zeros([natoms,3], np.float32)
for index in range(natoms):
(x,y,z) = coordinates[index]
positions[index,0] = x
positions[index,1] = y
positions[index,2] = z
positions = unit.Quantity(positions, unit.angstroms)
return topology, system, positions
def compare_system_energies( topology0, topology1, system0, system1, positions0, positions1=None, label0="AMBER system", label1 = "SMIRNOFF system", verbose = True, skip_assert = False, skip_improper = False ):
"""
Given two OpenMM systems, check that their energies and component-wise
energies are consistent, and return these. The same positions will be used
for both systems unless a second set of positions is provided.
Parameters
----------
topology0 : OpenMM Topology
Topology of first system
topology1 : OpenMM Topology
Topology of second system
system0 : OpenMM System
First system for comparison (usually from AMBER)
system1 : OpenMM System
Second system for comparison (usually from SMIRNOFF)
positions0 : simtk.unit.Quantity wrapped
Positions to use for energy evaluation comparison
positions1 (optional) : simtk.unit.Quantity wrapped (optional)
Positions to use for second OpenMM system; original positions are used
if this is not provided
label0 (optional) : str
String labeling system0 for output. Default, "AMBER system"
label1 (optional) : str
String labeling system1 for output. Default, "SMIRNOFF system"
verbose (optional) : bool
Print out info on energies, True/False (default True)
skip_assert (optional) : bool
Skip assertion that energies must be equal within specified tolerance. Default False.
skip_improper (optional) : bool
Skip detailed checking of force terms on impropers (helpful here if comparing with AMBER force fields using different definitions of impropers.) Default False.
Returns
----------
groups0 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the first simulation object
groups1 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the second simulation object
energy0 : simtk.unit.Quantity
Energy of first system
energy1 : simtk.unit.Quantity
Energy of second system
TO DO:
Allow energy extraction/comparison of terms specified by particular
SMARTS queries i.e. for specific bond, angle, or torsional terms.
"""
# Create integrator
timestep = 1.0 * unit.femtoseconds
integrator0 = simtk.openmm.VerletIntegrator( timestep )
integrator1 = simtk.openmm.VerletIntegrator( timestep )
# Grab second positions
if positions1 == None:
positions1 = copy.deepcopy( positions0 )
# Create simulations
platform = simtk.openmm.Platform.getPlatformByName("Reference")
simulation0 = app.Simulation( topology0, system0, integrator0, platform = platform )
simulation0.context.setPositions(positions0)
simulation1 = app.Simulation( topology1, system1, integrator1, platform = platform )
simulation1.context.setPositions(positions1)
# Print what torsions were found if verbose
if verbose:
# Build list of atoms for debugging info
atoms0 = [ atom for atom in simulation0.topology.atoms() ]
atoms1 = [ atom for atom in simulation1.topology.atoms() ]
# Loop over first system and print torsion info
for force in simulation0.system.getForces():
if type(force) == mm.PeriodicTorsionForce:
print("Num (type) \t Num (type) \t Num (type) \t Num (type) \t per \t phase \t k0")
for k in range(force.getNumTorsions()):
i0, i1, i2, i3, per, phase, k0 = force.getTorsionParameters(k)
print("%3s (%3s)- %3s (%3s)- \t %s (%3s)- \t %3s (%3s)- \t %f \t %f \t %f " % (i0, atoms0[i0].name, i1, atoms0[i1].name, i2, atoms0[i2].name, i3, atoms0[i3].name, per, phase/unit.degree, k0/unit.kilojoule_per_mole) )
for force in simulation1.system.getForces():
if type(force) == mm.PeriodicTorsionForce:
print("Num (type) \t Num (type) \t Num (type) \t Num (type) \t per \t phase \t k0")
for k in range(force.getNumTorsions()):
i0, i1, i2, i3, per, phase, k0 = force.getTorsionParameters(k)
print("%3s (%3s)- %3s (%3s)- %3s (%3s)- %3s (%3s) - %f \t %f \t %f " % (i0, atoms1[i0].name, i1, atoms1[i1].name, i2, atoms1[i2].name, i3, atoms1[i3].name, per, phase/unit.degree, k0/unit.kilojoule_per_mole) )
# Do energy comparison, print info if desired
syscheck = system_checker.SystemChecker( simulation0, simulation1 )
if not skip_assert:
# Only check force terms if we want to make sure energies are identical
syscheck.check_force_parameters(skipImpropers = skip_improper)
groups0, groups1 = syscheck.check_energy_groups(skip_assert = skip_assert)
energy0, energy1 = syscheck.check_energies(skip_assert = skip_assert)
if verbose:
print("Energy of %s: " % label0, energy0 )
print("Energy of %s: " % label1, energy1 )
print("\nComponents of %s:" % label0 )
for key in groups0.keys():
print("%s: " % key, groups0[key] )
print("\nComponents of %s:" % label1 )
for key in groups1.keys():
print("%s: " % key, groups1[key] )
# Return
return groups0, groups1, energy0, energy1
def compare_molecule_energies( prmtop, crd, forcefield, mol, verbose = True, skip_assert=False, skip_improper = False):
"""
Compare energies for OpenMM Systems/topologies created from an AMBER prmtop
and crd versus from a SMIRNOFF forcefield file and OEMol which should
parameterize the same system with same parameters.
Parameters
----------
prmtop_filename : str (filename)
Filename of input AMBER format prmtop file
crd_filename : str (filename)
Filename of input AMBER format crd file
forcefield : ForceField
SMIRNOFF forcefield
mol : oechem.OEMol
Molecule to test
verbose (optional): Bool
Print out info. Default: True
skip_assert : bool
Skip assertion that energies must be equal within tolerance. Default, False.
skip_improper (optional) : bool
Skip detailed checking of force terms on impropers (helpful here if comparing with AMBER force fields using different definitions of impropers.) Default False.
Returns
--------
groups0 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the first simulation object
groups1 : dict
As returned by openmoltools.system_checker.check_energy_groups,
a dictionary with keys "bond", "angle", "nb", "torsion" and values
corresponding to the energies of these components for the second simulation object
energy0 : simtk.unit.Quantity
Energy of first system
energy1 : simtk.unit.Quantity
Energy of second system
"""
ambertop, ambersys, amberpos = create_system_from_amber( prmtop, crd )
smirfftop, smirffsys, smirffpos = create_system_from_molecule(forcefield, mol, verbose = verbose)
groups0, groups1, energy0, energy1 = compare_system_energies( ambertop,
smirfftop, ambersys, smirffsys, amberpos, verbose = verbose, skip_assert = skip_assert, skip_improper = skip_improper )
return groups0, groups1, energy0, energy1
def get_molecule_parameterIDs( oemols, ffxml):
"""Process a list of oemols with a specified SMIRNOFF ffxml file and determine which parameters are used by which molecules, returning collated results.
Parameters
----------
oemols : list
List of OpenEye OEChem molecules to parse; must have explicit hydrogens.
Returns
-------
parameters_by_molecule : dict
Parameter IDs used in each molecule, keyed by isomeric SMILES
generated from provided OEMols. Each entry in the dict is a list
which does not necessarily have unique entries; i.e. parameter IDs
which are used more than once will occur multiple times.
parameters_by_ID : dict
Molecules in which each parameter ID occur, keyed by parameter ID.
Each entry in the dict is a set of isomeric SMILES for molecules
in which that parameter occurs. No frequency information is stored.
"""
# Create storage
parameters_by_molecule = {}
parameters_by_ID = {}
# Generate isomeric SMILES
isosmiles = list()
for mol in oemols:
smi = oechem.OECreateIsoSmiString(mol)
if not smi in isosmiles:
isosmiles.append(smi)
# If the molecule is already here, raise exception
else:
raise ValueError("Error: get_molecule_parameterIDs has been provided a list of oemols which contains the same molecule, having isomeric smiles %s, more than once." % smi )
# Label molecules
ff = ForceField( ffxml )
labels = ff.labelMolecules( oemols )
# Organize labels into output dictionary by looping over all molecules/smiles
for idx in range(len(isosmiles)):
# Pull smiles, initialize storage
smi = isosmiles[idx]
parameters_by_molecule[smi] = []
# Organize data for this molecule
data = labels[idx]
for force_type in data.keys():
for (atom_indices, pid, smirks) in data[force_type]:
# Store pid to molecule
parameters_by_molecule[smi].append(pid)
# Store which molecule this pid occurred in
if pid not in parameters_by_ID:
parameters_by_ID[pid] = set()
parameters_by_ID[pid].add(smi)
else:
parameters_by_ID[pid].add(smi)
return parameters_by_molecule, parameters_by_ID
def getMolParamIDToAtomIndex( oemol, ff):
"""Take an OEMol and a SMIRNOFF forcefield object and return a dictionary, keyed by parameter ID, where each entry is a tuple of ( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS corresponding to that parameter ID and a list of the atom groups in that molecule that parameter is applied to.
Parameters
----------
oemol : OEMol
OpenEye OEMol with the molecule to investigate.
ff : ForceField
SMIRNOFF ForceField object (obtained from an ffxml via ForceField(ffxml)) containing FF of interest.
Returns
-------
param_usage : dictionary
Dictionary, keyed by parameter ID, where each entry is a tuple of ( smirks, [[atom1, ... atomN], [atom1, ... atomN]) giving the SMIRKS corresponding to that parameter ID and a list of the atom groups in that molecule that parameter is applied to.
"""
labels = ff.labelMolecules([oemol])
param_usage = {}
for mol_entry in range(len(labels)):
for force in labels[mol_entry].keys():
for (atom_indices, pid, smirks) in labels[mol_entry][force]:
if not pid in param_usage:
param_usage[pid] = (smirks, [atom_indices])
else:
param_usage[pid][1].append( atom_indices )
return param_usage
def merge_system( topology0, topology1, system0, system1, positions0, positions1, label0="AMBER system", label1 = "SMIRNOFF system", verbose = True):
"""Merge two given OpenMM systems. Returns the merged OpenMM System.
Parameters
----------
topology0 : OpenMM Topology
Topology of first system (i.e. a protein)
topology1 : OpenMM Topology
Topology of second system (i.e. a ligand)
system0 : OpenMM System
First system for merging (usually from AMBER)
system1 : OpenMM System
Second system for merging (usually from SMIRNOFF)
positions0 : simtk.unit.Quantity wrapped
Positions to use for energy evaluation comparison
positions1 (optional) : simtk.unit.Quantity wrapped (optional)
Positions to use for second OpenMM system
label0 (optional) : str
String labeling system0 for output. Default, "AMBER system"
label1 (optional) : str
String labeling system1 for output. Default, "SMIRNOFF system"
verbose (optional) : bool
Print out info on topologies, True/False (default True)
Returns
----------
topology : OpenMM Topology
system : OpenMM System
positions: unit.Quantity position array
"""
#Load OpenMM Systems to ParmEd Structures
structure0 = parmed.openmm.load_topology( topology0, system0 )
structure1 = parmed.openmm.load_topology( topology1, system1 )
#Merge parameterized Structure
structure = structure0 + structure1
topology = structure.topology
#Concatenate positions arrays
positions_unit = unit.angstroms
positions0_dimensionless = np.array( positions0 / positions_unit )
positions1_dimensionless = np.array( positions1 / positions_unit )
coordinates = np.vstack((positions0_dimensionless,positions1_dimensionless))
natoms = len(coordinates)
positions = np.zeros([natoms,3], np.float32)
for index in range(natoms):
(x,y,z) = coordinates[index]
positions[index,0] = x
positions[index,1] = y
positions[index,2] = z
positions = unit.Quantity(positions, positions_unit)
#Generate merged OpenMM system
system = structure.createSystem()
if verbose:
print("Generating ParmEd Structures...\n \t{}: {}\n \t{}: {}\n".format(label0, structure0, label1, structure1))
print("Merged ParmEd Structure: {}".format( structure ))
return topology, system, positions
def save_system_to_amber( topology, system, positions, prmtop, crd ):
"""Save an OpenMM System, with provided topology and positions, to AMBER prmtop and coordinate files.
Parameters
----------
topology : OpenMM Topology
Topology of the system to be saved, perhaps as loaded from a PDB file or similar.
system : OpenMM System
Parameterized System to be saved, containing components represented by Topology
positions : unit.Quantity position array
Position array containing positions of atoms in topology/system
prmtop : filename
AMBER parameter file name to write
crd : filename
AMBER coordinate file name (ASCII crd format) to write
"""
structure = parmed.openmm.topsystem.load_topology( topology, system, positions )
structure.save( prmtop, overwrite = True, format="amber" )
structure.save( crd, format='rst7', overwrite = True)
def save_system_to_gromacs( topology, system, positions, top, gro ):
"""Save an OpenMM System, with provided topology and positions, to AMBER prmtop and coordinate files.
Parameters
----------
topology : OpenMM Topology
Topology of the system to be saved, perhaps as loaded from a PDB file or similar.
system : OpenMM System
Parameterized System to be saved, containing components represented by Topology
positions : unit.Quantity position array
Position array containing positions of atoms in topology/system
top : filename
GROMACS topology file name to write
gro : filename
GROMACS coordinate file name (.gro format) to write
"""
structure = parmed.openmm.topsystem.load_topology( topology, system, positions )
structure.save( top, overwrite = True, format="gromacs")
structure.save( gro, overwrite = True, format="gro")
| 0 | 0 | 0 |
45dcabbf64ec5140e782fbc10fcddaccf3c13f9e | 30,301 | py | Python | util.py | ourresearch/jump-api | 5252da53656389f9bd53811929d5156ff8f7620c | [
"MIT"
] | 15 | 2019-11-07T09:57:56.000Z | 2022-01-03T22:45:28.000Z | util.py | ourresearch/jump-api | 5252da53656389f9bd53811929d5156ff8f7620c | [
"MIT"
] | 6 | 2021-03-19T23:15:14.000Z | 2021-09-23T16:46:19.000Z | util.py | ourresearch/jump-api | 5252da53656389f9bd53811929d5156ff8f7620c | [
"MIT"
] | 1 | 2021-12-17T05:50:47.000Z | 2021-12-17T05:50:47.000Z | # coding: utf-8
import bisect
import codecs
import collections
import datetime
import locale
import logging
import math
import os
import re
import tempfile
import time
import traceback
import unicodedata
import urllib.parse
from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
import chardet
import numpy as np
import heroku3
import requests
import simplejson as json
import sqlalchemy
import unicodecsv as csv
from flask import current_app
from flask_jwt_extended import get_jwt_identity
from requests.adapters import HTTPAdapter
from simplejson import dumps
from sqlalchemy import exc
from sqlalchemy import sql
from unidecode import unidecode
from werkzeug.wsgi import ClosingIterator
try:
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') #use locale.format for commafication
except locale.Error:
locale.setlocale(locale.LC_ALL, '') #set to default locale (works on windows)
# from http://stackoverflow.com/a/3233356/596939
# returns dict with values that are proportion of all values
# good for deduping strings. warning: output removes spaces so isn't readable.
# from http://stackoverflow.com/a/11066579/596939
# from http://stackoverflow.com/a/22238613/596939
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError ("Type not serializable")
def median(my_list):
"""
Find the median of a list of ints
from https://stackoverflow.com/questions/24101524/finding-median-of-list-in-python/24101655#comment37177662_24101655
"""
my_list = sorted(my_list)
if len(my_list) < 1:
return None
if len(my_list) %2 == 1:
return my_list[((len(my_list)+1)/2)-1]
if len(my_list) %2 == 0:
return float(sum(my_list[(len(my_list)/2)-1:(len(my_list)/2)+1]))/2.0
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
from http://stackoverflow.com/a/312464
"""
for i in range(0, len(l), n):
yield l[i:i+n]
# from http://stackoverflow.com/a/20007730/226013
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n/10%10!=1)*(n%10<4)*n%10::4])
#from http://farmdev.com/talks/unicode/
# getting a "decoding Unicode is not supported" error in this function?
# might need to reinstall libaries as per
# http://stackoverflow.com/questions/17092849/flask-login-typeerror-decoding-unicode-is-not-supported
# could also make the random request have other filters
# see docs here: https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#sample
# usage:
# dois = get_random_dois(50000, from_date="2002-01-01", only_journal_articles=True)
# dois = get_random_dois(100000, only_journal_articles=True)
# fh = open("data/random_dois_articles_100k.txt", "w")
# fh.writelines(u"\n".join(dois))
# fh.close()
# from https://github.com/elastic/elasticsearch-py/issues/374
# to work around unicode problem
# class JSONSerializerPython2(elasticsearch.serializer.JSONSerializer):
# """Override elasticsearch library serializer to ensure it encodes utf characters during json dump.
# See original at: https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/serializer.py#L42
# A description of how ensure_ascii encodes unicode characters to ensure they can be sent across the wire
# as ascii can be found here: https://docs.python.org/2/library/json.html#basic-usage
# """
# def dumps(self, data):
# # don't serialize strings
# if isinstance(data, elasticsearch.compat.string_types):
# return data
# try:
# return json.dumps(data, default=self.default, ensure_ascii=True)
# except (ValueError, TypeError) as e:
# raise elasticsearch.exceptions.SerializationError(data, e)
# https://github.com/psycopg/psycopg2/issues/897
# from https://gist.github.com/douglasmiranda/5127251
# deletes a key from nested dict
# from https://stackoverflow.com/a/50762571/596939
# from https://stackoverflow.com/a/50762571/596939
# this is to support fully after-flask response sent efforts
# from # https://stackoverflow.com/a/51013358/596939
# use like
# @app.after_response
# def say_hi():
# print("hi")
# f5 from https://www.peterbe.com/plog/uniqifiers-benchmark
| 30.030723 | 145 | 0.624171 | # coding: utf-8
import bisect
import codecs
import collections
import datetime
import locale
import logging
import math
import os
import re
import tempfile
import time
import traceback
import unicodedata
import urllib.parse
from codecs import BOM_UTF8, BOM_UTF16_BE, BOM_UTF16_LE, BOM_UTF32_BE, BOM_UTF32_LE
import chardet
import numpy as np
import heroku3
import requests
import simplejson as json
import sqlalchemy
import unicodecsv as csv
from flask import current_app
from flask_jwt_extended import get_jwt_identity
from requests.adapters import HTTPAdapter
from simplejson import dumps
from sqlalchemy import exc
from sqlalchemy import sql
from unidecode import unidecode
from werkzeug.wsgi import ClosingIterator
try:
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') #use locale.format for commafication
except locale.Error:
locale.setlocale(locale.LC_ALL, '') #set to default locale (works on windows)
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
class NoDoiException(Exception):
pass
class TimingMessages(object):
def __init__(self):
self.start_time = time.time()
self.section_time = time.time()
self.messages = []
def format_timing_message(self, message, use_start_time=False):
my_elapsed = elapsed(self.section_time, 2)
if use_start_time:
my_elapsed = elapsed(self.start_time, 2)
# now reset section time
self.section_time = time.time()
return "{: <30} {: >6}s".format(message, my_elapsed)
def log_timing(self, message):
self.messages.append(self.format_timing_message(message))
def to_dict(self):
self.messages.append(self.format_timing_message("TOTAL", use_start_time=True))
return self.messages
class DelayedAdapter(HTTPAdapter):
def send(self, request, stream=False, timeout=None, verify=True, cert=None, proxies=None):
# logger.info(u"in DelayedAdapter getting {}, sleeping for 2 seconds".format(request.url))
# sleep(2)
start_time = time.time()
response = super(DelayedAdapter, self).send(request, stream, timeout, verify, cert, proxies)
# logger.info(u" HTTPAdapter.send for {} took {} seconds".format(request.url, elapsed(start_time, 2)))
return response
def read_csv_file(filename, sep=","):
with open(filename, "rU") as csv_file:
my_reader = csv.DictReader(csv_file, delimiter=sep, encoding='utf-8-sig')
rows = [row for row in my_reader]
return rows
# from http://stackoverflow.com/a/3233356/596939
def update_recursive_sum(d, u):
for k, v in u.items():
if isinstance(v, collections.Mapping):
r = update_recursive_sum(d.get(k, {}), v)
d[k] = r
else:
if k in d:
d[k] += u[k]
else:
d[k] = u[k]
return d
# returns dict with values that are proportion of all values
def as_proportion(my_dict):
if not my_dict:
return {}
total = sum(my_dict.values())
resp = {}
for k, v in my_dict.items():
resp[k] = round(float(v)/total, 2)
return resp
def calculate_percentile(refset, value):
if value is None: # distinguish between that and zero
return None
matching_index = bisect.bisect_left(refset, value)
percentile = float(matching_index) / len(refset)
# print u"percentile for {} is {}".format(value, percentile)
return percentile
def clean_html(raw_html):
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', raw_html)
return cleantext
# good for deduping strings. warning: output removes spaces so isn't readable.
def normalize(text):
response = text.lower()
response = unidecode(str(response))
response = clean_html(response) # has to be before remove_punctuation
response = remove_punctuation(response)
response = re.sub(r"\b(a|an|the)\b", "", response)
response = re.sub(r"\b(and)\b", "", response)
response = re.sub(r"\s+", "", response)
return response
def normalize_simple(text):
response = text.lower()
response = remove_punctuation(response)
response = re.sub(r"\b(a|an|the)\b", "", response)
response = re.sub(r"\s+", "", response)
return response
def remove_everything_but_alphas(input_string):
# from http://stackoverflow.com/questions/265960/best-way-to-strip-punctuation-from-a-string-in-python
only_alphas = input_string
if input_string:
only_alphas = "".join(e for e in input_string if (e.isalpha()))
return only_alphas
def remove_punctuation(input_string):
# from http://stackoverflow.com/questions/265960/best-way-to-strip-punctuation-from-a-string-in-python
no_punc = input_string
if input_string:
no_punc = "".join(e for e in input_string if (e.isalnum() or e.isspace()))
return no_punc
# from http://stackoverflow.com/a/11066579/596939
def replace_punctuation(text, sub):
punctutation_cats = set(['Pc', 'Pd', 'Ps', 'Pe', 'Pi', 'Pf', 'Po'])
chars = []
for my_char in text:
if unicodedata.category(my_char) in punctutation_cats:
chars.append(sub)
else:
chars.append(my_char)
return "".join(chars)
# from http://stackoverflow.com/a/22238613/596939
def json_serial(obj):
"""JSON serializer for objects not serializable by default json code"""
if isinstance(obj, datetime):
serial = obj.isoformat()
return serial
raise TypeError ("Type not serializable")
def conversational_number(number):
words = {
"1.0": "one",
"2.0": "two",
"3.0": "three",
"4.0": "four",
"5.0": "five",
"6.0": "six",
"7.0": "seven",
"8.0": "eight",
"9.0": "nine",
}
if number < 1:
return round(number, 2)
elif number < 1000:
return round(math.floor(number))
elif number < 1000000:
divided = number / 1000.0
unit = "thousand"
else:
divided = number / 1000000.0
unit = "million"
short_number = '{}'.format(round(divided, 2))[:-1]
if short_number in words:
short_number = words[short_number]
return short_number + " " + unit
def safe_commit(db):
try:
db.session.commit()
return True
except (KeyboardInterrupt, SystemExit):
# let these ones through, don't save anything to db
raise
except sqlalchemy.exc.DataError:
try:
print("sqlalchemy.exc.DataError on commit. rolling back.")
db.session.rollback()
except:
pass
except Exception:
try:
print("generic exception in commit. rolling back.")
db.session.rollback()
except:
pass
logging.exception("commit error")
return False
def is_pmc(url):
return "ncbi.nlm.nih.gov/pmc" in url or "europepmc.org/articles/" in url
def is_doi(text):
if not text:
return False
try_to_clean_doi = clean_doi(text, return_none_if_error=True)
if try_to_clean_doi:
return True
return False
def is_issn(text):
if not text:
return False
# include X and F
p = re.compile(r"[\dxf]{4}-[\dxf]{4}")
matches = re.findall(p, text.lower())
if len(matches) > 0:
return True
return False
def is_doi_url(url):
if not url:
return False
# test urls at https://regex101.com/r/yX5cK0/2
p = re.compile(r"https?:\/\/(?:dx.)?doi.org\/(.*)")
matches = re.findall(p, url.lower())
if len(matches) > 0:
return True
return False
def is_ip(ip):
if re.match(r"^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}$", ip):
return True
return False
def clean_doi(dirty_doi, return_none_if_error=False):
if not dirty_doi:
if return_none_if_error:
return None
else:
raise NoDoiException("There's no DOI at all.")
dirty_doi = dirty_doi.strip()
dirty_doi = dirty_doi.lower()
# test cases for this regex are at https://regex101.com/r/zS4hA0/1
p = re.compile(r'(10\.\d+\/[^\s]+)')
matches = re.findall(p, dirty_doi)
if len(matches) == 0:
if return_none_if_error:
return None
else:
raise NoDoiException("There's no valid DOI.")
match = matches[0]
match = remove_nonprinting_characters(match)
try:
resp = str(match, "utf-8") # unicode is valid in dois
except (TypeError, UnicodeDecodeError):
resp = match
# remove any url fragments
if "#" in resp:
resp = resp.split("#")[0]
# remove double quotes, they shouldn't be there as per http://www.doi.org/syntax.html
resp = resp.replace('"', '')
# remove trailing period, comma -- it is likely from a sentence or citation
if resp.endswith(",") or resp.endswith("."):
resp = resp[:-1]
return resp
def pick_best_url(urls):
if not urls:
return None
#get a backup
response = urls[0]
# now go through and pick the best one
for url in urls:
# doi if available
if "doi.org" in url:
response = url
# anything else if what we currently have is bogus
if response == "http://www.ncbi.nlm.nih.gov/pmc/articles/PMC":
response = url
return response
def date_as_iso_utc(datetime_object):
if datetime_object is None:
return None
date_string = "{}{}".format(datetime_object, "+00:00")
return date_string
def dict_from_dir(obj, keys_to_ignore=None, keys_to_show="all"):
if keys_to_ignore is None:
keys_to_ignore = []
elif isinstance(keys_to_ignore, str):
keys_to_ignore = [keys_to_ignore]
ret = {}
if keys_to_show != "all":
for key in keys_to_show:
ret[key] = getattr(obj, key)
return ret
for k in dir(obj):
value = getattr(obj, k)
if k.startswith("_"):
pass
elif k in keys_to_ignore:
pass
# hide sqlalchemy stuff
elif k in ["query", "query_class", "metadata"]:
pass
elif callable(value):
pass
else:
try:
# convert datetime objects...generally this will fail becase
# most things aren't datetime object.
ret[k] = time.mktime(value.timetuple())
except AttributeError:
ret[k] = value
return ret
def median(my_list):
"""
Find the median of a list of ints
from https://stackoverflow.com/questions/24101524/finding-median-of-list-in-python/24101655#comment37177662_24101655
"""
my_list = sorted(my_list)
if len(my_list) < 1:
return None
if len(my_list) %2 == 1:
return my_list[((len(my_list)+1)/2)-1]
if len(my_list) %2 == 0:
return float(sum(my_list[(len(my_list)/2)-1:(len(my_list)/2)+1]))/2.0
def underscore_to_camelcase(value):
words = value.split("_")
capitalized_words = []
for word in words:
capitalized_words.append(word.capitalize())
return "".join(capitalized_words)
def chunks(l, n):
"""
Yield successive n-sized chunks from l.
from http://stackoverflow.com/a/312464
"""
for i in range(0, len(l), n):
yield l[i:i+n]
def page_query(q, page_size=1000):
offset = 0
while True:
r = False
print("util.page_query() retrieved {} things".format(page_query()))
for elem in q.limit(page_size).offset(offset):
r = True
yield elem
offset += page_size
if not r:
break
def elapsed(since, round_places=2):
return round(time.time() - since, round_places)
def truncate(str, max=100):
if len(str) > max:
return str[0:max] + "..."
else:
return str
def str_to_bool(x):
if x.lower() in ["true", "1", "yes"]:
return True
elif x.lower() in ["false", "0", "no"]:
return False
else:
raise ValueError("This string can't be cast to a boolean.")
# from http://stackoverflow.com/a/20007730/226013
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n/10%10!=1)*(n%10<4)*n%10::4])
#from http://farmdev.com/talks/unicode/
def to_unicode_or_bust(obj, encoding='utf-8'):
if isinstance(obj, str):
if not isinstance(obj, str):
obj = str(obj, encoding)
return obj
def remove_nonprinting_characters(input, encoding='utf-8'):
input_was_unicode = True
if isinstance(input, str):
if not isinstance(input, str):
input_was_unicode = False
unicode_input = to_unicode_or_bust(input)
# see http://www.fileformat.info/info/unicode/category/index.htm
char_classes_to_remove = ["C", "M", "Z"]
response = ''.join(c for c in unicode_input if unicodedata.category(c)[0] not in char_classes_to_remove)
if not input_was_unicode:
response = response.encode(encoding)
return response
# getting a "decoding Unicode is not supported" error in this function?
# might need to reinstall libaries as per
# http://stackoverflow.com/questions/17092849/flask-login-typeerror-decoding-unicode-is-not-supported
class HTTPMethodOverrideMiddleware(object):
allowed_methods = frozenset([
'GET',
'HEAD',
'POST',
'DELETE',
'PUT',
'PATCH',
'OPTIONS'
])
bodyless_methods = frozenset(['GET', 'HEAD', 'OPTIONS', 'DELETE'])
def __init__(self, app):
self.app = app
def __call__(self, environ, start_response):
method = environ.get('HTTP_X_HTTP_METHOD_OVERRIDE', '').upper()
if method in self.allowed_methods:
method = method.encode('ascii', 'replace')
environ['REQUEST_METHOD'] = method
if method in self.bodyless_methods:
environ['CONTENT_LENGTH'] = '0'
return self.app(environ, start_response)
# could also make the random request have other filters
# see docs here: https://github.com/CrossRef/rest-api-doc/blob/master/rest_api.md#sample
# usage:
# dois = get_random_dois(50000, from_date="2002-01-01", only_journal_articles=True)
# dois = get_random_dois(100000, only_journal_articles=True)
# fh = open("data/random_dois_articles_100k.txt", "w")
# fh.writelines(u"\n".join(dois))
# fh.close()
def get_random_dois(n, from_date=None, only_journal_articles=True):
dois = []
while len(dois) < n:
# api takes a max of 100
number_this_round = min(n, 100)
url = "https://api.crossref.org/works?sample={}".format(number_this_round)
if only_journal_articles:
url += "&filter=type:journal-article"
if from_date:
url += ",from-pub-date:{}".format(from_date)
print(url)
print("calling crossref, asking for {} dois, so far have {} of {} dois".format(
number_this_round, len(dois), n))
r = requests.get(url)
items = r.json()["message"]["items"]
dois += [item["DOI"].lower() for item in items]
return dois
# from https://github.com/elastic/elasticsearch-py/issues/374
# to work around unicode problem
# class JSONSerializerPython2(elasticsearch.serializer.JSONSerializer):
# """Override elasticsearch library serializer to ensure it encodes utf characters during json dump.
# See original at: https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/serializer.py#L42
# A description of how ensure_ascii encodes unicode characters to ensure they can be sent across the wire
# as ascii can be found here: https://docs.python.org/2/library/json.html#basic-usage
# """
# def dumps(self, data):
# # don't serialize strings
# if isinstance(data, elasticsearch.compat.string_types):
# return data
# try:
# return json.dumps(data, default=self.default, ensure_ascii=True)
# except (ValueError, TypeError) as e:
# raise elasticsearch.exceptions.SerializationError(data, e)
def is_the_same_url(url1, url2):
norm_url1 = strip_jsessionid_from_url(url1.replace("https", "http"))
norm_url2 = strip_jsessionid_from_url(url2.replace("https", "http"))
if norm_url1 == norm_url2:
return True
return False
def strip_jsessionid_from_url(url):
url = re.sub(r";jsessionid=\w+", "", url)
return url
def get_link_target(url, base_url, strip_jsessionid=True):
if strip_jsessionid:
url = strip_jsessionid_from_url(url)
if base_url:
url = urllib.parse.urljoin(base_url, url)
return url
def sql_escape_string(value):
if value == None:
return "null"
value = value.replace("'", "''")
return value
def sql_bool(is_value):
if is_value==True:
return "true"
if is_value==False:
return "false"
return "null"
def run_sql(db, q):
q = q.strip()
if not q:
return
start = time.time()
try:
con = db.engine.connect()
trans = con.begin()
con.execute(q)
trans.commit()
except exc.ProgrammingError as e:
pass
finally:
con.close()
def get_sql_answer(db, q):
row = db.engine.execute(sql.text(q)).first()
if row:
return row[0]
return None
def get_sql_answers(db, q):
rows = db.engine.execute(sql.text(q)).fetchall()
if not rows:
return []
return [row[0] for row in rows if row]
def get_sql_rows(db, q):
rows = db.engine.execute(sql.text(q)).fetchall()
if not rows:
return []
return rows
def get_sql_dict_rows(q):
from app import get_db_cursor
with get_db_cursor() as cursor:
cursor.execute(q)
rows = cursor.fetchall()
return rows
# https://github.com/psycopg/psycopg2/issues/897
def build_row_dict(columns, row):
index = 0
dict = {}
for key in columns:
value = row[index]
dict[key] = value
index += 1
return dict
def cursor_rows_to_dicts(column_string, cursor_rows):
column_list = column_string.replace(" ", "").split(",")
response = []
for row in cursor_rows:
row_dict = build_row_dict(column_list, row)
response.append(row_dict)
return response
def normalize_title(title):
if not title:
return ""
# just first n characters
response = title[0:500]
# lowercase
response = response.lower()
# deal with unicode
response = unidecode(str(response))
# has to be before remove_punctuation
# the kind in titles are simple <i> etc, so this is simple
response = clean_html(response)
# remove articles and common prepositions
response = re.sub(r"\b(the|a|an|of|to|in|for|on|by|with|at|from)\b", "", response)
# remove everything except alphas
response = remove_everything_but_alphas(response)
return response
# from https://gist.github.com/douglasmiranda/5127251
# deletes a key from nested dict
def delete_key_from_dict(dictionary, key):
for k, v in dictionary.items():
if k == key:
yield v
elif isinstance(v, dict):
for result in delete_key_from_dict(key, v):
yield result
elif isinstance(v, list):
for d in v:
for result in delete_key_from_dict(key, d):
yield result
def restart_dynos(app_name, dyno_prefix):
heroku_conn = heroku3.from_key(os.getenv('HEROKU_API_KEY'))
app = heroku_conn.apps()[app_name]
dynos = app.dynos()
for dyno in dynos:
if dyno.name.startswith(dyno_prefix):
dyno.restart()
print("restarted {} on {}!".format(dyno.name, app_name))
def is_same_publisher(publisher1, publisher2):
if publisher1 and publisher2:
return normalize(publisher1) == normalize(publisher2)
return False
def myconverter(o):
if isinstance(o, datetime.datetime):
return o.isoformat()
if isinstance(o, np.int64):
return int(o)
raise TypeError(repr(o) + " is not JSON serializable")
# from https://stackoverflow.com/a/50762571/596939
def jsonify_fast_no_sort(*args, **kwargs):
if args and kwargs:
raise TypeError('jsonify() behavior undefined when passed both args and kwargs')
elif len(args) == 1: # single args are passed directly to dumps()
data = args[0]
else:
data = args or kwargs
# turn this to False to be even faster, but warning then responses may not cache
sort_keys = False
return current_app.response_class(
dumps(data,
skipkeys=True,
ensure_ascii=True,
check_circular=False,
allow_nan=True,
cls=None,
default=myconverter,
indent=None,
# separators=None,
sort_keys=sort_keys) + '\n', mimetype=current_app.config['JSONIFY_MIMETYPE']
)
# from https://stackoverflow.com/a/50762571/596939
def jsonify_fast(*args, **kwargs):
if args and kwargs:
raise TypeError('jsonify() behavior undefined when passed both args and kwargs')
elif len(args) == 1: # single args are passed directly to dumps()
data = args[0]
else:
data = args or kwargs
# turn this to False to be even faster, but warning then responses may not cache
sort_keys = True
return current_app.response_class(
dumps(data,
skipkeys=True,
ensure_ascii=True,
check_circular=False,
allow_nan=True,
cls=None,
default=myconverter,
indent=None,
# separators=None,
sort_keys=sort_keys) + '\n', mimetype=current_app.config['JSONIFY_MIMETYPE']
)
def find_normalized_license(text):
if not text:
return None
normalized_text = text.replace(" ", "").replace("-", "").lower()
# the lookup order matters
# assumes no spaces, no dashes, and all lowercase
# inspired by https://github.com/CottageLabs/blackbox/blob/fc13e5855bd13137cf1ef8f5e93883234fdab464/service/licences.py
# thanks CottageLabs! :)
license_lookups = [
("koreanjpathol.org/authors/access.php", "cc-by-nc"), # their access page says it is all cc-by-nc now
("elsevier.com/openaccess/userlicense", "elsevier-specific: oa user license"), #remove the - because is removed in normalized_text above
("pubs.acs.org/page/policy/authorchoice_termsofuse.html", "acs-specific: authorchoice/editors choice usage agreement"),
("creativecommons.org/licenses/byncnd", "cc-by-nc-nd"),
("creativecommonsattributionnoncommercialnoderiv", "cc-by-nc-nd"),
("ccbyncnd", "cc-by-nc-nd"),
("creativecommons.org/licenses/byncsa", "cc-by-nc-sa"),
("creativecommonsattributionnoncommercialsharealike", "cc-by-nc-sa"),
("ccbyncsa", "cc-by-nc-sa"),
("creativecommons.org/licenses/bynd", "cc-by-nd"),
("creativecommonsattributionnoderiv", "cc-by-nd"),
("ccbynd", "cc-by-nd"),
("creativecommons.org/licenses/bysa", "cc-by-sa"),
("creativecommonsattributionsharealike", "cc-by-sa"),
("ccbysa", "cc-by-sa"),
("creativecommons.org/licenses/bync", "cc-by-nc"),
("creativecommonsattributionnoncommercial", "cc-by-nc"),
("ccbync", "cc-by-nc"),
("creativecommons.org/licenses/by", "cc-by"),
("creativecommonsattribution", "cc-by"),
("ccby", "cc-by"),
("creativecommons.org/publicdomain/zero", "cc0"),
("creativecommonszero", "cc0"),
("creativecommons.org/publicdomain/mark", "pd"),
("publicdomain", "pd"),
# ("openaccess", "oa")
]
for (lookup, license) in license_lookups:
if lookup in normalized_text:
if license=="pd":
try:
if "worksnotinthepublicdomain" in normalized_text.decode(errors='ignore'):
return None
except:
# some kind of unicode exception
return None
return license
return None
def for_sorting(x):
if x is None:
return float('inf')
return x
def response_json(r):
from flask import make_response
response = make_response(r.json(), r.status_code)
response.mimetype = "application/json"
return response
def abort_json(status_code, msg):
from flask import make_response
from flask import abort
body_dict = {"HTTP_status_code": status_code, "message": msg, "error": True}
response_json = json.dumps(body_dict, sort_keys=True, indent=4)
response = make_response(response_json, status_code)
response.mimetype = "application/json"
abort(response)
def format_currency(amount, cents=False):
if amount == None:
return None
if not cents:
amount = round(round(amount))
my_string = locale.currency(amount, grouping=True)
my_string = my_string.replace(".00", "")
else:
my_string = locale.currency(amount, grouping=True)
return my_string
def format_percent(amount, num_decimals=0):
if amount == None:
return None
my_string = "{:0." + str(num_decimals) + "f}%"
my_string = my_string.format(amount)
return my_string
def format_with_commas(amount, num_decimals=0):
if amount == None:
return None
try:
my_string = "{:0,." + str(num_decimals) + "f}"
my_string = my_string.format(amount)
return my_string
except:
return locale.format('%d', amount, True)
def get_ip(request):
# from http://stackoverflow.com/a/12771438/596939
if request.headers.getlist("X-Forwarded-For"):
ip = request.headers.getlist("X-Forwarded-For")[0]
else:
ip = request.remote_addr
return ip
# this is to support fully after-flask response sent efforts
# from # https://stackoverflow.com/a/51013358/596939
# use like
# @app.after_response
# def say_hi():
# print("hi")
class AfterResponse:
def __init__(self, app=None):
self.callbacks = []
if app:
self.init_app(app)
def __call__(self, callback):
self.callbacks.append(callback)
return callback
def init_app(self, app):
# install extension
app.after_response = self
# install middleware
app.wsgi_app = AfterResponseMiddleware(app.wsgi_app, self)
def flush(self):
for fn in self.callbacks:
try:
fn()
except Exception:
traceback.print_exc()
class AfterResponseMiddleware:
def __init__(self, application, after_response_ext):
self.application = application
self.after_response_ext = after_response_ext
def __call__(self, environ, after_response):
iterator = self.application(environ, after_response)
try:
return ClosingIterator(iterator, [self.after_response_ext.flush])
except Exception:
traceback.print_exc()
return iterator
def authenticated_user_id():
jwt_identity = get_jwt_identity()
return jwt_identity.get('user_id', None) if jwt_identity else None
def convert_to_utf_8(file_name):
with open(file_name, 'rb') as input_file:
sample = input_file.read(1024*1024)
if not sample:
return file_name
# first, look for a unicode BOM
# https://unicodebook.readthedocs.io/guess_encoding.html#check-for-bom-markers
BOMS = (
(BOM_UTF8, 'UTF-8'),
(BOM_UTF32_BE, 'UTF-32-BE'),
(BOM_UTF32_LE, 'UTF-32-LE'),
(BOM_UTF16_BE, 'UTF-16-BE'),
(BOM_UTF16_LE, 'UTF-16-LE'),
)
possible_encodings = [encoding for bom, encoding in BOMS if sample.startswith(bom)]
# look for UTF-32 or UTF-16 by null byte frequency
nulls = len([c for c in sample if c == b'\x00']) / float(len(sample))
leading_nulls = len([c for i, c in enumerate(sample) if c == b'\x00' and i % 4 == 0]) / float(len(sample)/4)
if nulls > .6 :
if leading_nulls > .9:
possible_encodings.append('UTF-32-BE')
else:
possible_encodings.append('UTF-32-LE')
elif nulls > .1:
if leading_nulls > .9:
possible_encodings.append('UTF-16-BE')
else:
possible_encodings.append('UTF-16-LE')
possible_encodings.append('UTF-8')
possible_encodings.append('windows-1252')
chardet_encoding = chardet.detect(sample)['encoding']
if chardet_encoding:
possible_encodings.append(chardet_encoding)
possible_encodings.append('cp437')
for pe in possible_encodings:
try:
new_file_name = tempfile.mkstemp('_{}_to_utf8_{}'.format(pe, os.path.split(file_name)[-1]))[1]
with codecs.open(file_name, 'r', pe) as input_file:
with codecs.open(new_file_name, 'w', 'utf-8') as output_file:
while True:
contents = input_file.read(1024*1024)
if not contents:
break
output_file.write(contents)
return new_file_name
except UnicodeDecodeError:
continue
raise UnicodeError("Can't determine text encoding (tried {}).".format(possible_encodings))
def write_to_tempfile(file_contents, strip=False):
if strip:
lines = file_contents.strip().split('\n')
lines = [line.strip() for line in lines]
file_contents = '\n'.join(lines)
temp_file_name = tempfile.mkstemp()[1]
with codecs.open(temp_file_name, 'w', 'utf-8') as temp_file:
temp_file.write(file_contents)
return temp_file_name
# f5 from https://www.peterbe.com/plog/uniqifiers-benchmark
def uniquify_list(seq, idfun=None):
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
# in old Python versions:
# if seen.has_key(marker)
# but in new ones:
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
| 23,771 | 352 | 1,821 |
d8720d7f22de0deb1bb68bd9101b2c62c779b225 | 3,173 | py | Python | rosys/pathplanning/obstacle_map.py | zauberzeug/rosys | 10271c88ffd5dcc4fb8eec93d46fe4144a9e40d8 | [
"MIT"
] | 1 | 2022-02-20T08:21:07.000Z | 2022-02-20T08:21:07.000Z | rosys/pathplanning/obstacle_map.py | zauberzeug/rosys | 10271c88ffd5dcc4fb8eec93d46fe4144a9e40d8 | [
"MIT"
] | 1 | 2022-03-08T12:46:09.000Z | 2022-03-08T12:46:09.000Z | rosys/pathplanning/obstacle_map.py | zauberzeug/rosys | 10271c88ffd5dcc4fb8eec93d46fe4144a9e40d8 | [
"MIT"
] | null | null | null | import numpy as np
from scipy import ndimage
import cv2
from ..world.world import World
from .binary_renderer import BinaryRenderer
from .grid import Grid
from .robot_renderer import RobotRenderer
| 41.75 | 103 | 0.622754 | import numpy as np
from scipy import ndimage
import cv2
from ..world.world import World
from .binary_renderer import BinaryRenderer
from .grid import Grid
from .robot_renderer import RobotRenderer
class ObstacleMap:
def __init__(self, grid, map_, robot_renderer):
self.grid = grid
self.map = map_
self.stack = np.zeros(grid.size, dtype=bool)
self.dist_stack = np.zeros(self.stack.shape)
for layer in range(grid.size[2]):
_, _, yaw = grid.from_grid(0, 0, layer)
kernel = robot_renderer.render(grid.pixel_size, yaw).astype(np.uint8)
self.stack[:, :, layer] = cv2.dilate(self.map.astype(np.uint8), kernel)
self.dist_stack[:, :, layer] = \
ndimage.distance_transform_edt(~self.stack[:, :, layer]) * grid.pixel_size
# NOTE: when yaw wraps around, map_coordinates should wrap around on axis 2
self.stack = np.dstack((self.stack, self.stack[:, :, :1]))
self.dist_stack = np.dstack((self.dist_stack, self.dist_stack[:, :, :1]))
@staticmethod
def from_list(grid, obstacles, robot_renderer):
map_ = np.zeros(grid.size[:2], dtype=bool)
for x, y, w, h in obstacles:
r0, c0 = grid.to_grid(x, y)
r1, c1 = grid.to_grid(x + w, y + h)
map_[int(np.round(r0)):int(np.round(r1))+1,
int(np.round(c0)):int(np.round(c1))+1] = True
return ObstacleMap(grid, map_, robot_renderer)
@staticmethod
def from_world(world: World, grid: Grid):
robot_renderer = RobotRenderer(world.robot.shape.outline)
binary_renderer = BinaryRenderer(grid.size[:2])
for obstacle in world.obstacles.values():
binary_renderer.polygon(np.array([grid.to_grid(p.x, p.y)[::-1] for p in obstacle.outline]))
return ObstacleMap(grid, binary_renderer.map, robot_renderer)
def test(self, x, y, yaw):
row, col, layer = self.grid.to_grid(x, y, yaw)
return ndimage.map_coordinates(self.stack, [[row], [col], [layer]], order=0)
t_lookup = [np.linspace(0, 1, i) for i in range(360)]
def _create_poses(self, spline, backward):
def pose(t): return (
spline.x(t),
spline.y(t),
spline.yaw(t) + [0, np.pi][backward],
)
row0, col0, layer0 = self.grid.to_grid(*pose(0.0))
row1, col1, layer1 = self.grid.to_grid(*pose(1.0))
num_rows = int(abs(row1 - row0))
num_cols = int(abs(col1 - col0))
num_layers = int(abs(layer1 - layer0))
n = max(num_rows, num_cols, num_layers)
t = ObstacleMap.t_lookup[n] if n < len(ObstacleMap.t_lookup) else np.linspace(0, 1, n)
return pose(t)
def test_spline(self, spline, backward=False):
return self.test(*self._create_poses(spline, backward)).any()
def get_distance(self, x, y, yaw):
row, col, layer = self.grid.to_grid(x, y, yaw)
return ndimage.map_coordinates(self.dist_stack, [[row], [col], [layer]], order=0)
def get_minimum_spline_distance(self, spline, backward=False):
return self.get_distance(*self._create_poses(spline, backward)).min()
| 2,644 | 308 | 23 |
d13dc9b857e96a7e9d8ab8282640f8d59ecf3e70 | 415 | py | Python | trik/trik.py | Zeyu-Li/kattis_solutions | 1599725facc32945f62acc9a5c3982d5834ba9ba | [
"MIT"
] | null | null | null | trik/trik.py | Zeyu-Li/kattis_solutions | 1599725facc32945f62acc9a5c3982d5834ba9ba | [
"MIT"
] | null | null | null | trik/trik.py | Zeyu-Li/kattis_solutions | 1599725facc32945f62acc9a5c3982d5834ba9ba | [
"MIT"
] | null | null | null |
if __name__ == "__main__":
main()
| 18.043478 | 35 | 0.375904 | def main():
counts = input()
a = [1,0,0]
for char in counts:
if char == 'A':
a[0], a[1] = a[1], a[0]
elif char == 'B':
a[1], a[2] = a[2], a[1]
else:
a[0], a[2] = a[2], a[0]
# print(a)
for index, i in enumerate(a):
if i == 1:
print(index+1)
break
return
if __name__ == "__main__":
main()
| 354 | 0 | 22 |
b17485bf75669c3ce0838b5ddff2c0e6f5259d55 | 1,140 | py | Python | server/app/views/index.py | TrafficStuck/ts-server | 6d4f197d6f8542b85133dbb3cbfb3738a1caaeb5 | [
"MIT"
] | null | null | null | server/app/views/index.py | TrafficStuck/ts-server | 6d4f197d6f8542b85133dbb3cbfb3738a1caaeb5 | [
"MIT"
] | 21 | 2021-03-02T10:50:50.000Z | 2021-09-29T12:55:29.000Z | server/app/views/index.py | TrafficStuck/ts-server | 6d4f197d6f8542b85133dbb3cbfb3738a1caaeb5 | [
"MIT"
] | null | null | null | """This module provides basic server endpoints."""
from http import HTTPStatus
from flask import Blueprint, request
from app.utils.misc import make_response
internal_blueprint = Blueprint('traffic-stuck-internal', __name__)
@internal_blueprint.route("/health", methods=['GET'])
def get_health():
"""Return health OK http status."""
return make_response(True, "OK", HTTPStatus.OK)
def handle_404(error):
"""Return custom response for 404 http status code."""
return make_response(
False,
f"The endpoint ({request.path}) you are trying to access could not be found on the server.",
error.code
)
def handle_405(error):
"""Return custom response for 405 http status code."""
return make_response(
False,
f"The method ({request.method}) you are trying to use for this URL could not be handled on the server.",
error.code
)
def handle_500(error):
"""Return custom response for 500 http status code."""
return make_response(
False,
"Something has gone wrong on the server side. Please, try again later.",
error.code
)
| 25.909091 | 112 | 0.676316 | """This module provides basic server endpoints."""
from http import HTTPStatus
from flask import Blueprint, request
from app.utils.misc import make_response
internal_blueprint = Blueprint('traffic-stuck-internal', __name__)
@internal_blueprint.route("/health", methods=['GET'])
def get_health():
"""Return health OK http status."""
return make_response(True, "OK", HTTPStatus.OK)
def handle_404(error):
"""Return custom response for 404 http status code."""
return make_response(
False,
f"The endpoint ({request.path}) you are trying to access could not be found on the server.",
error.code
)
def handle_405(error):
"""Return custom response for 405 http status code."""
return make_response(
False,
f"The method ({request.method}) you are trying to use for this URL could not be handled on the server.",
error.code
)
def handle_500(error):
"""Return custom response for 500 http status code."""
return make_response(
False,
"Something has gone wrong on the server side. Please, try again later.",
error.code
)
| 0 | 0 | 0 |
a2198a5c61777794140e87db46e40df1259fb8da | 7,397 | py | Python | client/starwhale/utils/config.py | star-whale/starwhale | 11cfe86d3a0c2972b508812d101f1b32e4166706 | [
"Apache-2.0"
] | 13 | 2022-03-09T15:27:29.000Z | 2022-03-29T06:12:47.000Z | client/starwhale/utils/config.py | star-whale/starwhale | 11cfe86d3a0c2972b508812d101f1b32e4166706 | [
"Apache-2.0"
] | 7 | 2022-03-14T08:59:39.000Z | 2022-03-30T00:50:40.000Z | client/starwhale/utils/config.py | star-whale/starwhale | 11cfe86d3a0c2972b508812d101f1b32e4166706 | [
"Apache-2.0"
] | 9 | 2022-03-10T08:12:44.000Z | 2022-03-26T15:00:13.000Z | import os
import typing as t
import getpass
from pathlib import Path
import yaml
from starwhale.consts import (
UserRoleType,
SW_CLI_CONFIG,
DEFAULT_PROJECT,
DEFAULT_INSTANCE,
SW_LOCAL_STORAGE,
ENV_SW_CLI_CONFIG,
STANDALONE_INSTANCE,
LOCAL_CONFIG_VERSION,
)
from starwhale.utils.error import NotFoundError
from . import console, now_str, fmt_http_server
from .fs import ensure_dir, ensure_file
_config: t.Dict[str, t.Any] = {}
_CURRENT_SHELL_USERNAME = getpass.getuser()
# TODO: abstract better common base or mixed class
| 30.566116 | 128 | 0.615925 | import os
import typing as t
import getpass
from pathlib import Path
import yaml
from starwhale.consts import (
UserRoleType,
SW_CLI_CONFIG,
DEFAULT_PROJECT,
DEFAULT_INSTANCE,
SW_LOCAL_STORAGE,
ENV_SW_CLI_CONFIG,
STANDALONE_INSTANCE,
LOCAL_CONFIG_VERSION,
)
from starwhale.utils.error import NotFoundError
from . import console, now_str, fmt_http_server
from .fs import ensure_dir, ensure_file
_config: t.Dict[str, t.Any] = {}
_CURRENT_SHELL_USERNAME = getpass.getuser()
def load_swcli_config() -> t.Dict[str, t.Any]:
global _config
if _config:
ensure_dir(Path(_config["storage"]["root"]) / DEFAULT_PROJECT, recursion=True)
return _config
# TODO: add set_global_env func in cli startup
fpath = get_swcli_config_path()
if not os.path.exists(fpath):
_config = render_default_swcli_config(fpath)
else:
with open(fpath) as f:
_config = yaml.safe_load(f)
_version = _config.get("version")
if _version != LOCAL_CONFIG_VERSION:
console.print(
f":cherries: {fpath} use unexpected version({_version}), swcli only support {LOCAL_CONFIG_VERSION} version."
)
console.print(
f":carrot: {fpath} will be upgraded to {LOCAL_CONFIG_VERSION} automatically."
)
_config = render_default_swcli_config(fpath)
ensure_dir(Path(_config["storage"]["root"]) / DEFAULT_PROJECT, recursion=True)
return _config
def render_default_swcli_config(fpath: str) -> t.Dict[str, t.Any]:
from starwhale.base.type import InstanceType
c = dict(
version=LOCAL_CONFIG_VERSION,
instances={
STANDALONE_INSTANCE: dict(
uri=DEFAULT_INSTANCE,
user_name=_CURRENT_SHELL_USERNAME,
current_project=DEFAULT_PROJECT,
type=InstanceType.STANDALONE,
updated_at=now_str(), # type: ignore
)
},
current_instance=DEFAULT_INSTANCE,
storage=dict(root=str(SW_LOCAL_STORAGE.resolve())),
)
render_swcli_config(c, fpath)
return c
def update_swcli_config(**kw: t.Any) -> None:
c = load_swcli_config()
# TODO: tune update config
# TODO: add deepcopy for dict?
c.update(kw)
render_swcli_config(c)
def get_swcli_config_path() -> str:
fpath = os.environ.get(ENV_SW_CLI_CONFIG, "")
if not fpath or not os.path.exists(fpath):
fpath = str(SW_CLI_CONFIG)
return fpath
def render_swcli_config(c: t.Dict[str, t.Any], path: str = "") -> None:
fpath = path or get_swcli_config_path()
ensure_dir(os.path.dirname(fpath), recursion=True)
ensure_file(fpath, yaml.safe_dump(c, default_flow_style=False), mode=0o600)
# TODO: abstract better common base or mixed class
class SWCliConfigMixed(object):
def __init__(self, swcli_config: t.Union[t.Dict[str, t.Any], None] = None) -> None:
self._config = swcli_config or load_swcli_config()
@property
def rootdir(self) -> Path:
return Path(self._config["storage"]["root"])
@property
def workdir(self) -> Path:
return self.rootdir / "workdir"
@property
def pkgdir(self) -> Path:
return self.rootdir / "pkg"
@property
def dataset_dir(self) -> Path:
return self.rootdir / "dataset"
@property
def eval_run_dir(self) -> Path:
return self.rootdir / "run" / "eval"
@property
def sw_remote_addr(self) -> str:
addr = self._current_instance_obj.get("uri", "")
return fmt_http_server(addr)
@property
def user_name(self) -> str:
return self._current_instance_obj.get("user_name", "")
@property
def _sw_token(self) -> str:
return self._current_instance_obj.get("sw_token", "")
@property
def _current_instance_obj(self) -> t.Dict[str, t.Any]:
return self._config.get("instances", {}).get(self.current_instance, {})
@property
def user_role(self) -> str:
return self._current_instance_obj.get("user_role", "")
@property
def current_instance(self) -> str:
return self._config["current_instance"] # type: ignore
def get_sw_instance_config(self, instance: str) -> t.Dict[str, t.Any]:
instance = self._get_instance_alias(instance)
return self._config["instances"].get(instance, {})
def get_sw_token(self, instance: str) -> str:
return self.get_sw_instance_config(instance).get("sw_token", "")
def _get_instance_alias(self, instance: str) -> str:
if not instance:
return self.current_instance
if instance not in self._config["instances"]:
for k, v in self._config["instances"].items():
if v["uri"] == instance:
return k
return instance
@property
def current_project(self) -> str:
return self._current_instance_obj.get("current_project", DEFAULT_PROJECT)
def select_current_default(self, instance: str, project: str = "") -> None:
instance = self._get_instance_alias(instance)
if instance not in self._config["instances"]:
raise NotFoundError(f"need to login instance {instance}")
self._config["current_instance"] = instance
if project:
if (
instance == STANDALONE_INSTANCE
and not (self.rootdir / project).exists()
):
raise NotFoundError(f"need to create project {project}")
# TODO: check cloud project existence
self._config["instances"][instance]["current_project"] = project
update_swcli_config(**self._config)
def delete_instance(self, uri: str) -> None:
if uri == STANDALONE_INSTANCE:
return
_insts = self._config["instances"]
_alias = uri
if uri in _insts:
_insts.pop(uri)
else:
for k, v in _insts.items():
if v.get("uri") == uri:
_insts.pop(k)
_alias = uri
if _alias == self._config["current_instance"]:
self._config["current_instance"] = DEFAULT_INSTANCE
update_swcli_config(**self._config)
def update_instance(
self,
uri: str,
user_name: str = _CURRENT_SHELL_USERNAME,
user_role: str = UserRoleType.NORMAL,
sw_token: str = "",
alias: str = "",
) -> None:
from starwhale.base.type import InstanceType
# TODO: abstrace instance class
uri = uri.strip()
if not uri.startswith(("http://", "https://")):
uri = f"http://{uri}"
alias = alias or uri
if alias == STANDALONE_INSTANCE:
console.print(f":person_running: skip {STANDALONE_INSTANCE} update")
return
# TODO: add more instance list and search
_instances: t.Dict[str, t.Dict[str, str]] = self._config["instances"]
if alias not in _instances:
_instances[alias] = {}
_instances[alias].update(
uri=uri,
user_name=user_name,
user_role=user_role,
sw_token=sw_token,
type=InstanceType.CLOUD,
updated_at=now_str(), # type: ignore
)
update_swcli_config(**self._config)
| 6,005 | 690 | 137 |
e9c3e27af9aa4a410f686d7b95c805d8ffce192d | 126 | py | Python | Euler1-5/euler3.py | jokkebk/livecoding | b925c5d8d30f5186c6f4c83a44e5704ceb92be48 | [
"MIT"
] | null | null | null | Euler1-5/euler3.py | jokkebk/livecoding | b925c5d8d30f5186c6f4c83a44e5704ceb92be48 | [
"MIT"
] | null | null | null | Euler1-5/euler3.py | jokkebk/livecoding | b925c5d8d30f5186c6f4c83a44e5704ceb92be48 | [
"MIT"
] | null | null | null | import math
n, i = 600851475143, 2
while n > 1:
if n % i == 0:
print(i)
n /= i
else:
i += 1
| 11.454545 | 22 | 0.404762 | import math
n, i = 600851475143, 2
while n > 1:
if n % i == 0:
print(i)
n /= i
else:
i += 1
| 0 | 0 | 0 |
5def1f05872b830bf36955bb002ba0e01e02e1a2 | 4,658 | py | Python | 2020/day-15/process.py | MatthieuMichon/advent-of-code | 2749d92de71d544a1b993577e300ee2026cecc94 | [
"MIT"
] | 1 | 2020-12-08T23:25:23.000Z | 2020-12-08T23:25:23.000Z | 2020/day-15/process.py | MatthieuMichon/advent-of-code | 2749d92de71d544a1b993577e300ee2026cecc94 | [
"MIT"
] | null | null | null | 2020/day-15/process.py | MatthieuMichon/advent-of-code | 2749d92de71d544a1b993577e300ee2026cecc94 | [
"MIT"
] | 1 | 2021-02-22T09:50:07.000Z | 2021-02-22T09:50:07.000Z | #!/usr/bin/env python
"""
Advent of Code 2020: Day 15
"""
import os
import re
import signal
import sys
from types import FrameType
from typing import List, Mapping
from pathlib import Path
DEBUG = False
def spoken_number(starting_numbers: List[int], turns: int) -> int:
"""
Compute spoken number after a given number of turns
:param starting_numbers: list of starting numbers
:param turns: number of rounds
:return: spoken number
"""
spoken_numbers = list()
last_index = lambda li, n: next(i for i in reversed(range(len(li)))
if li[i] == n)
for turn, n in enumerate(starting_numbers):
if DEBUG:
print(f'Turn {1 + turn}: The number spoken is a starting number, {n}.')
spoken_numbers.append(n)
while 1 + turn < turns:
turn += 1
last_number = spoken_numbers[-1]
spoken_before = last_number in spoken_numbers[:-1]
new_spoken_number = 0 if not spoken_before else \
turn - (1 + last_index(spoken_numbers[:-1], last_number))
spoken_numbers.append(new_spoken_number)
if DEBUG:
print(f'Turn {1 + turn}: Last number spoken {last_number}, '
f'was {"" if spoken_before else "not"} spoken before. Number spoken {new_spoken_number}')
return new_spoken_number
def spoken_number_part2(starting_numbers: List[int], turns: int) -> int:
"""
Compute spoken number after a given number of turns (optimized)
:param starting_numbers: list of starting numbers
:param turns: number of rounds
:return: spoken number
"""
spoken_numbers = dict()
last_number: int = 0
last_number_spoken_before: bool = False
turn: int = 0
for i, n in enumerate(starting_numbers):
if turn > 0:
spoken_numbers[last_number] = turn
turn = 1 + i
if DEBUG:
print(f'Turn {turn}: The number spoken is a starting number, {n}.')
last_number = n
last_number_spoken_before = last_number in spoken_numbers
while turn < turns:
turn += 1
new_spoken_number = 0 if not last_number_spoken_before else \
(turn - 1) - spoken_numbers[last_number]
if DEBUG:
print(f'Turn {turn}: Last number spoken {last_number}, '
f'was {"" if last_number_spoken_before else "not"} '
f'spoken before. Number spoken {new_spoken_number}')
spoken_numbers[last_number] = turn - 1
last_number = new_spoken_number
last_number_spoken_before = last_number in spoken_numbers
return last_number
def process(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:param part_two: true for processing part 2
:return: value to submit
"""
debug = False
numbers_list = [list(int(n) for n in l.strip().split(','))
for l in open(file)]
number = 0
for numbers in numbers_list:
number = spoken_number(starting_numbers=numbers, turns=20200)
submission = number
return submission
def process_part2(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:param part_two: true for processing part 2
:return: value to submit
"""
debug = False
numbers_list = [list(int(n) for n in l.strip().split(','))
for l in open(file)]
number = 0
for numbers in numbers_list:
number = spoken_number_part2(starting_numbers=numbers, turns=30000000)
submission = number
return submission
def main() -> int:
"""
Main function
:return: Shell exit code
"""
file = './input.txt'
submission = process(file=Path(file))
print(f'In file {file}, submission: {submission}')
print(f'Part 2')
file = './input.txt'
submission = process_part2(file=Path(file))
print(f'In file {file}, submission: {submission}')
return 0
def handle_sigint(signal_value: signal.Signals, frame: FrameType) -> None:
"""
Interrupt signal call-back method
:param signal_value: signal (expected SIGINT)
:param frame: current stack frame at the time of signal
:return: nothing
"""
assert signal_value == signal.SIGINT
print(frame.f_locals)
sys.exit(1)
def install_signal_handler() -> None:
"""
Install interrupt signal handler
:return: nothing
"""
signal.signal(signal.SIGINT, handle_sigint)
if __name__ == '__main__':
install_signal_handler()
sys.exit(main())
| 26.316384 | 107 | 0.636539 | #!/usr/bin/env python
"""
Advent of Code 2020: Day 15
"""
import os
import re
import signal
import sys
from types import FrameType
from typing import List, Mapping
from pathlib import Path
DEBUG = False
def spoken_number(starting_numbers: List[int], turns: int) -> int:
"""
Compute spoken number after a given number of turns
:param starting_numbers: list of starting numbers
:param turns: number of rounds
:return: spoken number
"""
spoken_numbers = list()
last_index = lambda li, n: next(i for i in reversed(range(len(li)))
if li[i] == n)
for turn, n in enumerate(starting_numbers):
if DEBUG:
print(f'Turn {1 + turn}: The number spoken is a starting number, {n}.')
spoken_numbers.append(n)
while 1 + turn < turns:
turn += 1
last_number = spoken_numbers[-1]
spoken_before = last_number in spoken_numbers[:-1]
new_spoken_number = 0 if not spoken_before else \
turn - (1 + last_index(spoken_numbers[:-1], last_number))
spoken_numbers.append(new_spoken_number)
if DEBUG:
print(f'Turn {1 + turn}: Last number spoken {last_number}, '
f'was {"" if spoken_before else "not"} spoken before. Number spoken {new_spoken_number}')
return new_spoken_number
def spoken_number_part2(starting_numbers: List[int], turns: int) -> int:
"""
Compute spoken number after a given number of turns (optimized)
:param starting_numbers: list of starting numbers
:param turns: number of rounds
:return: spoken number
"""
spoken_numbers = dict()
last_number: int = 0
last_number_spoken_before: bool = False
turn: int = 0
for i, n in enumerate(starting_numbers):
if turn > 0:
spoken_numbers[last_number] = turn
turn = 1 + i
if DEBUG:
print(f'Turn {turn}: The number spoken is a starting number, {n}.')
last_number = n
last_number_spoken_before = last_number in spoken_numbers
while turn < turns:
turn += 1
new_spoken_number = 0 if not last_number_spoken_before else \
(turn - 1) - spoken_numbers[last_number]
if DEBUG:
print(f'Turn {turn}: Last number spoken {last_number}, '
f'was {"" if last_number_spoken_before else "not"} '
f'spoken before. Number spoken {new_spoken_number}')
spoken_numbers[last_number] = turn - 1
last_number = new_spoken_number
last_number_spoken_before = last_number in spoken_numbers
return last_number
def process(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:param part_two: true for processing part 2
:return: value to submit
"""
debug = False
numbers_list = [list(int(n) for n in l.strip().split(','))
for l in open(file)]
number = 0
for numbers in numbers_list:
number = spoken_number(starting_numbers=numbers, turns=20200)
submission = number
return submission
def process_part2(file: Path) -> int:
"""
Process input file yielding the submission value
:param file: file containing the input values
:param part_two: true for processing part 2
:return: value to submit
"""
debug = False
numbers_list = [list(int(n) for n in l.strip().split(','))
for l in open(file)]
number = 0
for numbers in numbers_list:
number = spoken_number_part2(starting_numbers=numbers, turns=30000000)
submission = number
return submission
def main() -> int:
"""
Main function
:return: Shell exit code
"""
file = './input.txt'
submission = process(file=Path(file))
print(f'In file {file}, submission: {submission}')
print(f'Part 2')
file = './input.txt'
submission = process_part2(file=Path(file))
print(f'In file {file}, submission: {submission}')
return 0
def handle_sigint(signal_value: signal.Signals, frame: FrameType) -> None:
"""
Interrupt signal call-back method
:param signal_value: signal (expected SIGINT)
:param frame: current stack frame at the time of signal
:return: nothing
"""
assert signal_value == signal.SIGINT
print(frame.f_locals)
sys.exit(1)
def install_signal_handler() -> None:
"""
Install interrupt signal handler
:return: nothing
"""
signal.signal(signal.SIGINT, handle_sigint)
if __name__ == '__main__':
install_signal_handler()
sys.exit(main())
| 0 | 0 | 0 |
fc96884381cb5606db13bce409311eb8c43b7e72 | 823 | py | Python | src/web/modules/smartq/admin.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 5 | 2018-03-08T17:22:27.000Z | 2018-03-11T14:20:53.000Z | src/web/modules/smartq/admin.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 263 | 2018-03-08T18:05:12.000Z | 2022-03-11T23:26:20.000Z | src/web/modules/smartq/admin.py | fossabot/SIStema | 1427dda2082688a9482c117d0e24ad380fdc26a6 | [
"MIT"
] | 6 | 2018-03-12T19:48:19.000Z | 2022-01-14T04:58:52.000Z | from django.contrib import admin
import users.admin
from modules.smartq import models
@admin.register(models.Question)
@admin.register(models.GeneratedQuestion)
@admin.register(models.StaffGeneratedQuestion)
| 21.102564 | 51 | 0.648846 | from django.contrib import admin
import users.admin
from modules.smartq import models
@admin.register(models.Question)
class QuestionAdmin(admin.ModelAdmin):
list_display = (
'short_name',
'created_date',
'modified_date',
)
search_fields = ('=id', 'short_name')
@admin.register(models.GeneratedQuestion)
@admin.register(models.StaffGeneratedQuestion)
class GeneratedQuestionAdmin(admin.ModelAdmin):
list_display = (
'base_question',
'seed',
'user',
)
list_filter = (
'base_question',
)
autocomplete_fields = ('base_question', 'user')
search_fields = (
'base_question__short_name',
'=seed',
'user__profile__first_name',
'user__profile__middle_name',
'user__profile__last_name',
)
| 0 | 566 | 44 |
188af2b09335bf525da1e101ccdddb4de53fc716 | 4,602 | py | Python | yangram/conversations/views.py | LuceteYang/yangram | 922c4924c06043d5d27410611f4a76904452d44e | [
"MIT"
] | 1 | 2019-02-26T21:39:10.000Z | 2019-02-26T21:39:10.000Z | yangram/conversations/views.py | LuceteYang/yangram | 922c4924c06043d5d27410611f4a76904452d44e | [
"MIT"
] | 11 | 2020-06-05T20:02:33.000Z | 2022-02-26T09:55:09.000Z | yangram/conversations/views.py | LuceteYang/yangram | 922c4924c06043d5d27410611f4a76904452d44e | [
"MIT"
] | 1 | 2020-05-05T18:37:11.000Z | 2020-05-05T18:37:11.000Z | from django.shortcuts import render
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from yangram.conversations.models import *
from django.http import JsonResponse
from . import constants, sqls, serializers
from rest_framework import status
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import get_user_model
User = get_user_model()
from django.utils import timezone
from django.contrib.humanize.templatetags.humanize import naturaltime
# Create your views here.
@login_required
@csrf_exempt
@login_required
@login_required
| 46.02 | 147 | 0.779009 | from django.shortcuts import render
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from yangram.conversations.models import *
from django.http import JsonResponse
from . import constants, sqls, serializers
from rest_framework import status
from django.shortcuts import get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.contrib.auth import get_user_model
User = get_user_model()
from django.utils import timezone
from django.contrib.humanize.templatetags.humanize import naturaltime
# Create your views here.
def room(request):
context = {}
return render(request, 'room.html', context)
@login_required
@csrf_exempt
def Conversations(request):
if request.method == 'GET':
try:
page = int(request.GET.get('page'))
user = request.user
row = custom_sql_dictfetch_all(sqls.CONVERSATION_LIST_SQL,[user.id,user.id,page*constants.PAGE_SIZE])
for item in row:
item.update( {'message_created_time':naturaltime(item['message_created_at'])})
except Exception as inst:
print(inst)
return JsonResponse( data={'err': 'bad request'}, status=status.HTTP_400_BAD_REQUEST)
return JsonResponse({'conversationList':row}, safe=False, status=status.HTTP_200_OK)
if request.method == 'POST':
user = request.user
if request.POST.get('user_id') is None:
return JsonResponse({}, safe=False, status=status.HTTP_400_BAD_REQUEST)
# firstquery = Participant.objects.filter(participant_user=user.id).filter(participant_user=request.POST.get('user_id'))
convesrsation_with_user_id = request.POST.get('user_id')
try:
convesrsation_with_user = User.objects.get(id=convesrsation_with_user_id)
existConversation = custom_sql_dictfetch_all(sqls.CHECK_CONVERSATION_EXISTS_SQL,[user.id,convesrsation_with_user_id])
except:
return JsonResponse( data={'err': 'bad request'}, status=status.HTTP_400_BAD_REQUEST)
if len(existConversation) > 0:
# 기존 대화방 존재
return JsonResponse({"conversation_id": existConversation[0].get('conversation_id') }, safe=False, status=status.HTTP_201_CREATED)
# 새 대화방 생성
newConversation = Conversation(creator=user)
newConversation.save()
myParticipant = Participant(conversation=newConversation, participant_user=user)
otherParticipant = Participant(conversation=newConversation, participant_user=convesrsation_with_user)
myParticipant.save()
otherParticipant.save()
return JsonResponse({"conversation_id": newConversation.id }, safe=False, status=status.HTTP_201_CREATED)
@login_required
def SearchConversations(request):
if request.method == 'GET':
search_msg = request.GET.get('msg')
if search_msg is None or search_msg=="":
return JsonResponse({"conversations":[]}, safe=False, status=status.HTTP_400_BAD_REQUEST)
try:
user = request.user
row = custom_sql_dictfetch_all(sqls.SEARCH_MESSAGE_SQL,[user.id,user.id,'%'+search_msg+'%'])
for item in row:
item.update( {'message_created_time':naturaltime(item['message_created_at'])})
except:
return JsonResponse( data={'err': 'bad request'}, status=status.HTTP_400_BAD_REQUEST)
return JsonResponse({"conversations": list(row) }, safe=False, status=status.HTTP_201_CREATED)
@login_required
def ConversationMessage(request,conversation_id):
if request.method == 'GET':
try:
last_message_id = int(request.GET.get('last_message_id'))
except:
return JsonResponse( data={'err': 'bad request'}, status=status.HTTP_400_BAD_REQUEST)
participation_info = get_object_or_404(Participant, conversation_id=conversation_id, participant_user=request.user)
field_value_pairs = [('conversation_id', conversation_id)]
if last_message_id>0:
field_value_pairs.append(('id__lt', last_message_id))
filter_options = {k:v for k,v in field_value_pairs if v}
test_conversation_messages = reversed(Message.objects.filter(
**filter_options
).order_by('-id')[:constants.PAGE_SIZE])
serializer = serializers.FeedUserSerializer(test_conversation_messages, many=True)
other_participations=[]
if last_message_id==0:
participation_info.last_read_date = timezone.now()
participation_info.save()
other_participations_data = Participant.objects.filter(
conversation_id=conversation_id
).exclude(
participant_user=request.user
)
userInfo_serializer = serializers.ParticipantSerializer(other_participations_data, many=True)
other_participations = userInfo_serializer.data
return JsonResponse({'conversation_messages':serializer.data,'other_participations':other_participations}, safe=False, status=status.HTTP_200_OK)
| 3,888 | 0 | 88 |
24791f571ad3e592f184b1d470e2aeeb137bb64a | 948 | py | Python | BuildingCyberLexicon/spiders/twitter/TweetScraper/scrapeTwitter.py | eneyi/CyberLexicon | b5d3e0245675fd692eae4a98df5d0c85604cfaba | [
"MIT"
] | 1 | 2020-10-13T11:55:48.000Z | 2020-10-13T11:55:48.000Z | BuildingCyberLexicon/spiders/twitter/TweetScraper/scrapeTwitter.py | eneyi/CyberLexicon | b5d3e0245675fd692eae4a98df5d0c85604cfaba | [
"MIT"
] | null | null | null | BuildingCyberLexicon/spiders/twitter/TweetScraper/scrapeTwitter.py | eneyi/CyberLexicon | b5d3e0245675fd692eae4a98df5d0c85604cfaba | [
"MIT"
] | 1 | 2020-10-13T11:56:04.000Z | 2020-10-13T11:56:04.000Z | import os
from time import sleep
import json
#use hastags and mentions from cyber war news articles as filters on twitter
if __name__ == "__main__":
hashtags = getMentionsAndHashtags()
for hashtag in hashtags:
outfile = "../../../outputfiles/twitter/"+str(hashtag).replace("#","").replace("@","")+".json"
os.system("scrapy crawl TweetScraper -a query="+hashtag + " -a top_tweet=True" + " -a crawl_user=True"+ " -o "+outfile)
print(" Scraped tweets from "+hashtag)
sleep(30)
| 36.461538 | 127 | 0.594937 | import os
from time import sleep
import json
#use hastags and mentions from cyber war news articles as filters on twitter
def getMentionsAndHashtags():
with open("../../../outputfiles/cwn.json", "r+") as ff:
data = ff.read()
hashtagsMentions, data =[], json.loads(data)
for dd in data:
text = dd.get('text')
for i in text.split():
if (i.startswith('#') or i.startswith('@')) and (len(i)>3 and len(i)<15):
hashtagsMentions.append(i)
return hashtagsMentions
if __name__ == "__main__":
hashtags = getMentionsAndHashtags()
for hashtag in hashtags:
outfile = "../../../outputfiles/twitter/"+str(hashtag).replace("#","").replace("@","")+".json"
os.system("scrapy crawl TweetScraper -a query="+hashtag + " -a top_tweet=True" + " -a crawl_user=True"+ " -o "+outfile)
print(" Scraped tweets from "+hashtag)
sleep(30)
| 408 | 0 | 22 |
3cb2f495379fa87b1682362e4c498ac0e66b256e | 590 | py | Python | jira.py | 317brian/rando-scripts | ae50080563f8a23783a2806735f508fe0a3bcea2 | [
"Apache-2.0"
] | null | null | null | jira.py | 317brian/rando-scripts | ae50080563f8a23783a2806735f508fe0a3bcea2 | [
"Apache-2.0"
] | null | null | null | jira.py | 317brian/rando-scripts | ae50080563f8a23783a2806735f508fe0a3bcea2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# You can create the script manually or clone the script and use it script via a symlink:
# For example `ln -s /Users/brian/rando-scripts/jira.py /usr/local/bin/jira`
# Make sure you create an environment variable named JIRA_URL for your terminal, such as .zshenv. Set the env variable equal to https://your-jira.net/browse/
import webbrowser, sys, pyperclip, os
jira_url = os.environ['JIRA_URL']
sys.argv
if len(sys.argv) > 1:
issue_number = ' '.join(sys.argv[1:])
else:
issue_number = pyperclip.paste()
webbrowser.open_new_tab(jira_url + issue_number)
| 32.777778 | 157 | 0.740678 | #!/usr/bin/env python3
# You can create the script manually or clone the script and use it script via a symlink:
# For example `ln -s /Users/brian/rando-scripts/jira.py /usr/local/bin/jira`
# Make sure you create an environment variable named JIRA_URL for your terminal, such as .zshenv. Set the env variable equal to https://your-jira.net/browse/
import webbrowser, sys, pyperclip, os
jira_url = os.environ['JIRA_URL']
sys.argv
if len(sys.argv) > 1:
issue_number = ' '.join(sys.argv[1:])
else:
issue_number = pyperclip.paste()
webbrowser.open_new_tab(jira_url + issue_number)
| 0 | 0 | 0 |
53a43e6290b908719c9b324da356c61fb8a8fb1c | 1,159 | py | Python | ps/samsung/treetech.py | underflow101/code-example | 1010a7ec6f8b5bf3ef5dbfad2ba075fd785b0cb0 | [
"MIT"
] | null | null | null | ps/samsung/treetech.py | underflow101/code-example | 1010a7ec6f8b5bf3ef5dbfad2ba075fd785b0cb0 | [
"MIT"
] | null | null | null | ps/samsung/treetech.py | underflow101/code-example | 1010a7ec6f8b5bf3ef5dbfad2ba075fd785b0cb0 | [
"MIT"
] | null | null | null | # treetech.py
# boj 16235
from sys import stdin
# input = stdin.readline
dx = (-1, -1, -1, 0, 0, 1, 1, 1)
dy = (-1, 0, 1, -1, 1, -1, 0, 1)
n, m, k = map(int, input().split())
mp = [[5] * n for _ in range(n)]
fert = [list(map(int, input().split())) for _ in range(n)]
tree = list()
for _ in range(m):
_x, _y, _z = map(int, input().split())
# x, y, yo
tree.append([_x-1, _y-1, _z])
for i in range(k):
tree = sorted(tree, key=lambda x:(x[0], x[1], x[2]))
# spring & summer
tmp = list()
dead = list()
for item in tree:
if mp[item[0]][item[1]] < item[2]:
mp[item[0]][item[1]] += (item[2] // 2)
else:
mp[item[0]][item[1]] -= item[2]
tmp.append([item[0], item[1], item[2] + 1])
tree = list(tmp)
# fall
for items in tree:
if items[2] % 5 == 0:
for i in range(8):
if 0 <= items[0]+dx[i] < n and 0 <= items[1]+dy[i] < n:
tree.append([items[0]+dx[i], items[1]+dy[i], 1])
# winter
for i in range(n):
for j in range(n):
mp[i][j] += fert[i][j]
print(len(tree)) | 25.195652 | 71 | 0.459879 | # treetech.py
# boj 16235
from sys import stdin
# input = stdin.readline
dx = (-1, -1, -1, 0, 0, 1, 1, 1)
dy = (-1, 0, 1, -1, 1, -1, 0, 1)
n, m, k = map(int, input().split())
mp = [[5] * n for _ in range(n)]
fert = [list(map(int, input().split())) for _ in range(n)]
tree = list()
for _ in range(m):
_x, _y, _z = map(int, input().split())
# x, y, yo
tree.append([_x-1, _y-1, _z])
for i in range(k):
tree = sorted(tree, key=lambda x:(x[0], x[1], x[2]))
# spring & summer
tmp = list()
dead = list()
for item in tree:
if mp[item[0]][item[1]] < item[2]:
mp[item[0]][item[1]] += (item[2] // 2)
else:
mp[item[0]][item[1]] -= item[2]
tmp.append([item[0], item[1], item[2] + 1])
tree = list(tmp)
# fall
for items in tree:
if items[2] % 5 == 0:
for i in range(8):
if 0 <= items[0]+dx[i] < n and 0 <= items[1]+dy[i] < n:
tree.append([items[0]+dx[i], items[1]+dy[i], 1])
# winter
for i in range(n):
for j in range(n):
mp[i][j] += fert[i][j]
print(len(tree)) | 0 | 0 | 0 |
033588472b7ed9ca2ebefa85133c4bef6d8f9b05 | 99 | py | Python | secondProject/driveTest/driveManager/apps.py | loic9654/Djangodev | 2babb235d68f508c64171a146be8483009dea7f7 | [
"Apache-2.0"
] | null | null | null | secondProject/driveTest/driveManager/apps.py | loic9654/Djangodev | 2babb235d68f508c64171a146be8483009dea7f7 | [
"Apache-2.0"
] | null | null | null | secondProject/driveTest/driveManager/apps.py | loic9654/Djangodev | 2babb235d68f508c64171a146be8483009dea7f7 | [
"Apache-2.0"
] | null | null | null | from django.apps import AppConfig
| 16.5 | 36 | 0.777778 | from django.apps import AppConfig
class DrivemanagerConfig(AppConfig):
name = 'driveManager'
| 0 | 41 | 23 |
fbf7afe96ab301b5ab9ef4b0a41511d602b18303 | 1,502 | py | Python | checker/constants.py | OrphicHymns/Monolith | cacc453945f8d6209cc40d54f7212e7555898c86 | [
"MIT"
] | 2 | 2021-10-17T04:39:50.000Z | 2022-01-10T12:35:25.000Z | checker/constants.py | OrphicHymns/Monolith | cacc453945f8d6209cc40d54f7212e7555898c86 | [
"MIT"
] | null | null | null | checker/constants.py | OrphicHymns/Monolith | cacc453945f8d6209cc40d54f7212e7555898c86 | [
"MIT"
] | null | null | null | MOJANG_AUTH = "https://authserver.mojang.com/authenticate"
JSON_POST_HEADERS = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.164 Safari/537.36 "
}
HTTP_HEADERS = {
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.164 Safari/537.36"
}
HTTPS_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.164 Safari/537.36"
}
CONFIG = """[Monolith]
debug = true
"""
ASCII_TITLE = """ ███▄ ▄███▓ ▒█████ ███▄ █ ▒█████ ██▓ ██▓▄▄▄█████▓ ██░ ██
▓██▒▀█▀ ██▒▒██▒ ██▒ ██ ▀█ █ ▒██▒ ██▒▓██▒ ▓██▒▓ ██▒ ▓▒▓██░ ██▒
▓██ ▓██░▒██░ ██▒▓██ ▀█ ██▒▒██░ ██▒▒██░ ▒██▒▒ ▓██░ ▒░▒██▀▀██░
▒██ ▒██ ▒██ ██░▓██▒ ▐▌██▒▒██ ██░▒██░ ░██░░ ▓██▓ ░ ░▓█ ░██
▒██▒ ░██▒░ ████▓▒░▒██░ ▓██░░ ████▓▒░░██████▒░██░ ▒██▒ ░ ░▓█▒░██▓
░ ▒░ ░ ░░ ▒░▒░▒░ ░ ▒░ ▒ ▒ ░ ▒░▒░▒░ ░ ▒░▓ ░░▓ ▒ ░░ ▒ ░░▒░▒
░ ░ ░ ░ ▒ ▒░ ░ ░░ ░ ▒░ ░ ▒ ▒░ ░ ░ ▒ ░ ▒ ░ ░ ▒ ░▒░ ░
░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ▒ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
"""
COMBO_REG = ".+?@.+?\..+?:.+?"
| 42.914286 | 102 | 0.310919 | MOJANG_AUTH = "https://authserver.mojang.com/authenticate"
JSON_POST_HEADERS = {
"Content-Type": "application/json",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.164 Safari/537.36 "
}
HTTP_HEADERS = {
"Upgrade-Insecure-Requests": "1",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.164 Safari/537.36"
}
HTTPS_HEADERS = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.0.4472.164 Safari/537.36"
}
CONFIG = """[Monolith]
debug = true
"""
ASCII_TITLE = """ ███▄ ▄███▓ ▒█████ ███▄ █ ▒█████ ██▓ ██▓▄▄▄█████▓ ██░ ██
▓██▒▀█▀ ██▒▒██▒ ██▒ ██ ▀█ █ ▒██▒ ██▒▓██▒ ▓██▒▓ ██▒ ▓▒▓██░ ██▒
▓██ ▓██░▒██░ ██▒▓██ ▀█ ██▒▒██░ ██▒▒██░ ▒██▒▒ ▓██░ ▒░▒██▀▀██░
▒██ ▒██ ▒██ ██░▓██▒ ▐▌██▒▒██ ██░▒██░ ░██░░ ▓██▓ ░ ░▓█ ░██
▒██▒ ░██▒░ ████▓▒░▒██░ ▓██░░ ████▓▒░░██████▒░██░ ▒██▒ ░ ░▓█▒░██▓
░ ▒░ ░ ░░ ▒░▒░▒░ ░ ▒░ ▒ ▒ ░ ▒░▒░▒░ ░ ▒░▓ ░░▓ ▒ ░░ ▒ ░░▒░▒
░ ░ ░ ░ ▒ ▒░ ░ ░░ ░ ▒░ ░ ▒ ▒░ ░ ░ ▒ ░ ▒ ░ ░ ▒ ░▒░ ░
░ ░ ░ ░ ░ ▒ ░ ░ ░ ░ ░ ░ ▒ ░ ░ ▒ ░ ░ ░ ░░ ░
░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░ ░
"""
COMBO_REG = ".+?@.+?\..+?:.+?"
| 0 | 0 | 0 |
4ff1fa100f6d7beea43b3e70fc84a6e50a168c19 | 3,811 | py | Python | django_usda/management/commands/import_r27.py | alexBLR/django-usda | e9b6a63da5e1dddb347e0c8f965b3d1d8f6ae55e | [
"MIT"
] | 3 | 2015-01-27T12:52:34.000Z | 2020-04-24T16:24:07.000Z | django_usda/management/commands/import_r27.py | alexBLR/django-usda | e9b6a63da5e1dddb347e0c8f965b3d1d8f6ae55e | [
"MIT"
] | null | null | null | django_usda/management/commands/import_r27.py | alexBLR/django-usda | e9b6a63da5e1dddb347e0c8f965b3d1d8f6ae55e | [
"MIT"
] | 3 | 2015-07-03T14:24:30.000Z | 2020-04-25T00:43:40.000Z | from django.core.management.base import BaseCommand, CommandError
from django.db.models.loading import get_model
from django_usda.models import Food, FoodGroup, FoodLanguaLFactor, LanguaLFactor, NutrientData, Nutrient, Source, Derivation, Weight, Footnote, DataLink, DataSource, DeletedFood, DeletedNutrient, DeletedFootnote
import zipfile
import csv
import json
import time
from django.db import IntegrityError
from django import db
appLabel = "django_usda"
modelMap = [
{"fileName": "DATA_SRC.txt", "model": DataSource},
{"fileName": "FD_GROUP.txt", "model": FoodGroup},
{"fileName": "FOOD_DES.txt", "model": Food},
{"fileName": "LANGDESC.txt", "model": LanguaLFactor},
{"fileName": "LANGUAL.txt", "model": FoodLanguaLFactor},
{"fileName": "NUTR_DEF.txt", "model": Nutrient},
{"fileName": "DERIV_CD.txt", "model": Derivation},
{"fileName": "SRC_CD.txt", "model": Source},
{"fileName": "NUT_DATA.txt", "model": NutrientData},
{"fileName": "WEIGHT.txt", "model": Weight},
{"fileName": "FOOTNOTE.txt", "model": Footnote},
{"fileName": "DATSRCLN.txt", "model": DataLink}
]
| 33.725664 | 211 | 0.609027 | from django.core.management.base import BaseCommand, CommandError
from django.db.models.loading import get_model
from django_usda.models import Food, FoodGroup, FoodLanguaLFactor, LanguaLFactor, NutrientData, Nutrient, Source, Derivation, Weight, Footnote, DataLink, DataSource, DeletedFood, DeletedNutrient, DeletedFootnote
import zipfile
import csv
import json
import time
from django.db import IntegrityError
from django import db
appLabel = "django_usda"
modelMap = [
{"fileName": "DATA_SRC.txt", "model": DataSource},
{"fileName": "FD_GROUP.txt", "model": FoodGroup},
{"fileName": "FOOD_DES.txt", "model": Food},
{"fileName": "LANGDESC.txt", "model": LanguaLFactor},
{"fileName": "LANGUAL.txt", "model": FoodLanguaLFactor},
{"fileName": "NUTR_DEF.txt", "model": Nutrient},
{"fileName": "DERIV_CD.txt", "model": Derivation},
{"fileName": "SRC_CD.txt", "model": Source},
{"fileName": "NUT_DATA.txt", "model": NutrientData},
{"fileName": "WEIGHT.txt", "model": Weight},
{"fileName": "FOOTNOTE.txt", "model": Footnote},
{"fileName": "DATSRCLN.txt", "model": DataLink}
]
def filter(value):
newValue = value.replace("\r\n", "")
if newValue == "":
return None
return newValue
def importFile(file, model):
contents = file.readlines()
bulk = []
print "Creating objects."
for counter, line in enumerate(contents):
values = line.replace("~", "").decode(
'iso-8859-1').encode('utf8').split("^")
fields = list(model._meta.fields)
if fields[0].get_internal_type() == "AutoField":
del fields[0]
newModel = createObject(model, fields, values)
if newModel:
bulk.append(newModel)
importObjects(model, bulk)
def importObjects(model, bulk):
length = len(bulk)
chunkSize = 50000
if length > chunkSize:
for counter, chunk in enumerate(chunks(bulk, chunkSize)):
print "Importing %s/%s objects into the database." % (counter * chunkSize + len(chunk), length)
importChunk(model, chunk)
else:
print "Importing %s objects into the database." % len(bulk)
importChunk(model, bulk)
def importChunk(model, chunk):
try:
model.objects.bulk_create(chunk)
except IntegrityError as e:
if "Duplicate entry" not in str(e):
print "Database Error: %s" % e
print chunk
def createObject(model, fields, values):
linkedFields = {}
try:
for counter, value in enumerate(values):
value = filter(value)
field = fields[counter]
key = field.name
if not field.null and value == "":
raise Exception(
"%s: Field required but null given." % field.name)
fieldType = field.get_internal_type()
if fieldType == "ForeignKey":
key = key + "_id"
elif fieldType == "BooleanField":
value = False
if value == "Y":
value = True
linkedFields[key] = value
return model(**linkedFields)
except Exception as e:
print "Model creation error for pk '%s': %s" % (values[0], e)
return False
def chunks(l, n):
for i in xrange(0, len(l), n):
yield l[i:i + n]
class Command(BaseCommand):
args = "<zipFile>"
help = 'Import the nutrition database (Only R27 Supported)'
def handle(self, *args, **options):
openedZipFile = zipfile.ZipFile(args[0])
order = 0
for info in modelMap:
print "Importing file '%s' as %s" % (info["fileName"], info["model"]._meta.verbose_name_plural.title())
importFile(openedZipFile.open(info["fileName"]), info["model"])
openedZipFile.close()
| 2,397 | 120 | 161 |
d199bc3cf4c227e15be15f7274aef821de06b5f4 | 2,518 | py | Python | vermouth/tests/helper_functions.py | biomolsim/vermouth-martinize | 332295078bfea680da7f488d2a9d61a97b8c9ae9 | [
"Apache-2.0"
] | 35 | 2018-02-16T12:39:33.000Z | 2022-03-24T12:18:36.000Z | vermouth/tests/helper_functions.py | biomolsim/vermouth-martinize | 332295078bfea680da7f488d2a9d61a97b8c9ae9 | [
"Apache-2.0"
] | 300 | 2018-02-16T12:24:32.000Z | 2022-03-31T13:41:36.000Z | vermouth/tests/helper_functions.py | biomolsim/vermouth-martinize | 332295078bfea680da7f488d2a9d61a97b8c9ae9 | [
"Apache-2.0"
] | 25 | 2018-11-07T18:52:07.000Z | 2022-03-06T08:34:38.000Z | # Copyright 2018 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains helper functions for tests.
"""
import operator
import os
import networkx.algorithms.isomorphism as iso
def make_into_set(iter_of_dict):
"""
Convenience function that turns an iterator of dicts into a set of
frozenset of the dict items.
"""
return set(frozenset(dict_.items()) for dict_ in iter_of_dict)
def equal_graphs(g1, g2,
node_attrs=('resid', 'resname', 'atomname', 'chain', 'charge_group', 'atype'),
edge_attrs=()):
"""
Parameters
----------
g1: networkx.Graph
g2: networkx.Graph
node_attrs: collections.abc.Iterable or None
Node attributes to consider. If `None`, the node attribute dicts must
be equal.
edge_attrs: collections.abc.Iterable or None
Edge attributes to consider. If `None`, the edge attribute dicts must
be equal.
Returns
-------
bool
True if `g1` and `g2` are isomorphic, False otherwise.
"""
if node_attrs is None:
node_equal = operator.eq
else:
node_equal = iso.categorical_node_match(node_attrs, [''] * len(node_attrs))
if edge_attrs is None:
edge_equal = operator.eq
else:
edge_equal = iso.categorical_node_match(edge_attrs, [''] * len(edge_attrs))
matcher = iso.GraphMatcher(g1, g2, node_match=node_equal, edge_match=edge_equal)
return matcher.is_isomorphic()
def find_in_path(names=('martinize2', 'martinize2.py')):
"""
Finds and returns the location of one of `names` in PATH, and returns the
first match.
Parameters
----------
names: collections.abc.Sequence
Names to look for in PATH.
Returns
-------
os.PathLike or None
"""
for folder in os.getenv("PATH", '').split(os.pathsep):
for name in names:
fullpath = os.path.join(folder, name)
if os.path.isfile(fullpath):
return fullpath | 31.08642 | 95 | 0.662033 | # Copyright 2018 University of Groningen
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains helper functions for tests.
"""
import operator
import os
import networkx.algorithms.isomorphism as iso
def make_into_set(iter_of_dict):
"""
Convenience function that turns an iterator of dicts into a set of
frozenset of the dict items.
"""
return set(frozenset(dict_.items()) for dict_ in iter_of_dict)
def equal_graphs(g1, g2,
node_attrs=('resid', 'resname', 'atomname', 'chain', 'charge_group', 'atype'),
edge_attrs=()):
"""
Parameters
----------
g1: networkx.Graph
g2: networkx.Graph
node_attrs: collections.abc.Iterable or None
Node attributes to consider. If `None`, the node attribute dicts must
be equal.
edge_attrs: collections.abc.Iterable or None
Edge attributes to consider. If `None`, the edge attribute dicts must
be equal.
Returns
-------
bool
True if `g1` and `g2` are isomorphic, False otherwise.
"""
if node_attrs is None:
node_equal = operator.eq
else:
node_equal = iso.categorical_node_match(node_attrs, [''] * len(node_attrs))
if edge_attrs is None:
edge_equal = operator.eq
else:
edge_equal = iso.categorical_node_match(edge_attrs, [''] * len(edge_attrs))
matcher = iso.GraphMatcher(g1, g2, node_match=node_equal, edge_match=edge_equal)
return matcher.is_isomorphic()
def find_in_path(names=('martinize2', 'martinize2.py')):
"""
Finds and returns the location of one of `names` in PATH, and returns the
first match.
Parameters
----------
names: collections.abc.Sequence
Names to look for in PATH.
Returns
-------
os.PathLike or None
"""
for folder in os.getenv("PATH", '').split(os.pathsep):
for name in names:
fullpath = os.path.join(folder, name)
if os.path.isfile(fullpath):
return fullpath | 0 | 0 | 0 |
22d81486520b473e33b27fff2cfa30651a6f94c5 | 4,123 | py | Python | mne/selection.py | ARudiuk/mne-python | 63feb683cd1f8ddd598a78d12c8ef522f9ca2d78 | [
"BSD-3-Clause"
] | 1 | 2016-05-26T19:37:19.000Z | 2016-05-26T19:37:19.000Z | mne/selection.py | ARudiuk/mne-python | 63feb683cd1f8ddd598a78d12c8ef522f9ca2d78 | [
"BSD-3-Clause"
] | null | null | null | mne/selection.py | ARudiuk/mne-python | 63feb683cd1f8ddd598a78d12c8ef522f9ca2d78 | [
"BSD-3-Clause"
] | null | null | null | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from os import path
from .io.meas_info import Info
from . import pick_types
from .utils import logger, verbose
@verbose
def read_selection(name, fname=None, info=None, verbose=None):
"""Read channel selection from file
By default, the selections used in ``mne_browse_raw`` are supported.
Additional selections can be added by specifying a selection file (e.g.
produced using ``mne_browse_raw``) using the ``fname`` parameter.
The ``name`` parameter can be a string or a list of string. The returned
selection will be the combination of all selections in the file where
(at least) one element in name is a substring of the selection name in
the file. For example, ``name=['temporal', 'Right-frontal']`` will produce
a combination of ``'Left-temporal'``, ``'Right-temporal'``, and
``'Right-frontal'``.
The included selections are:
* ``'Vertex'``
* ``'Left-temporal'``
* ``'Right-temporal'``
* ``'Left-parietal'``
* ``'Right-parietal'``
* ``'Left-occipital'``
* ``'Right-occipital'``
* ``'Left-frontal'``
* ``'Right-frontal'``
Parameters
----------
name : str or list of str
Name of the selection. If is a list, the selections are combined.
fname : str
Filename of the selection file (if None, built-in selections are used).
info : instance of Info
Measurement info file, which will be used to determine the spacing
of channel names to return, e.g. ``'MEG 0111'`` for old Neuromag
systems and ``'MEG0111'`` for new ones.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
sel : list of string
List with channel names in the selection.
"""
# convert name to list of string
if not isinstance(name, (list, tuple)):
name = [name]
if isinstance(info, Info):
picks = pick_types(info, meg=True, exclude=())
if len(picks) > 0 and ' ' not in info['ch_names'][picks[0]]:
spacing = 'new'
else:
spacing = 'old'
elif info is not None:
raise TypeError('info must be an instance of Info or None, not %s'
% (type(info),))
else: # info is None
spacing = 'old'
# use built-in selections by default
if fname is None:
fname = path.join(path.dirname(__file__), 'data', 'mne_analyze.sel')
if not path.isfile(fname):
raise ValueError('The file %s does not exist.' % fname)
# use this to make sure we find at least one match for each name
name_found = dict((n, False) for n in name)
with open(fname, 'r') as fid:
sel = []
for line in fid:
line = line.strip()
# skip blank lines and comments
if len(line) == 0 or line[0] == '#':
continue
# get the name of the selection in the file
pos = line.find(':')
if pos < 0:
logger.info('":" delimiter not found in selections file, '
'skipping line')
continue
sel_name_file = line[:pos]
# search for substring match with name provided
for n in name:
if sel_name_file.find(n) >= 0:
sel.extend(line[pos + 1:].split('|'))
name_found[n] = True
break
# make sure we found at least one match for each name
for n, found in name_found.items():
if not found:
raise ValueError('No match for selection name "%s" found' % n)
# make the selection a sorted list with unique elements
sel = list(set(sel))
sel.sort()
if spacing == 'new': # "new" or "old" by now, "old" is default
sel = [s.replace('MEG ', 'MEG') for s in sel]
return sel
| 35.239316 | 79 | 0.583071 | # Authors: Alexandre Gramfort <alexandre.gramfort@telecom-paristech.fr>
# Matti Hamalainen <msh@nmr.mgh.harvard.edu>
# Martin Luessi <mluessi@nmr.mgh.harvard.edu>
#
# License: BSD (3-clause)
from os import path
from .io.meas_info import Info
from . import pick_types
from .utils import logger, verbose
@verbose
def read_selection(name, fname=None, info=None, verbose=None):
"""Read channel selection from file
By default, the selections used in ``mne_browse_raw`` are supported.
Additional selections can be added by specifying a selection file (e.g.
produced using ``mne_browse_raw``) using the ``fname`` parameter.
The ``name`` parameter can be a string or a list of string. The returned
selection will be the combination of all selections in the file where
(at least) one element in name is a substring of the selection name in
the file. For example, ``name=['temporal', 'Right-frontal']`` will produce
a combination of ``'Left-temporal'``, ``'Right-temporal'``, and
``'Right-frontal'``.
The included selections are:
* ``'Vertex'``
* ``'Left-temporal'``
* ``'Right-temporal'``
* ``'Left-parietal'``
* ``'Right-parietal'``
* ``'Left-occipital'``
* ``'Right-occipital'``
* ``'Left-frontal'``
* ``'Right-frontal'``
Parameters
----------
name : str or list of str
Name of the selection. If is a list, the selections are combined.
fname : str
Filename of the selection file (if None, built-in selections are used).
info : instance of Info
Measurement info file, which will be used to determine the spacing
of channel names to return, e.g. ``'MEG 0111'`` for old Neuromag
systems and ``'MEG0111'`` for new ones.
verbose : bool, str, int, or None
If not None, override default verbose level (see mne.verbose).
Returns
-------
sel : list of string
List with channel names in the selection.
"""
# convert name to list of string
if not isinstance(name, (list, tuple)):
name = [name]
if isinstance(info, Info):
picks = pick_types(info, meg=True, exclude=())
if len(picks) > 0 and ' ' not in info['ch_names'][picks[0]]:
spacing = 'new'
else:
spacing = 'old'
elif info is not None:
raise TypeError('info must be an instance of Info or None, not %s'
% (type(info),))
else: # info is None
spacing = 'old'
# use built-in selections by default
if fname is None:
fname = path.join(path.dirname(__file__), 'data', 'mne_analyze.sel')
if not path.isfile(fname):
raise ValueError('The file %s does not exist.' % fname)
# use this to make sure we find at least one match for each name
name_found = dict((n, False) for n in name)
with open(fname, 'r') as fid:
sel = []
for line in fid:
line = line.strip()
# skip blank lines and comments
if len(line) == 0 or line[0] == '#':
continue
# get the name of the selection in the file
pos = line.find(':')
if pos < 0:
logger.info('":" delimiter not found in selections file, '
'skipping line')
continue
sel_name_file = line[:pos]
# search for substring match with name provided
for n in name:
if sel_name_file.find(n) >= 0:
sel.extend(line[pos + 1:].split('|'))
name_found[n] = True
break
# make sure we found at least one match for each name
for n, found in name_found.items():
if not found:
raise ValueError('No match for selection name "%s" found' % n)
# make the selection a sorted list with unique elements
sel = list(set(sel))
sel.sort()
if spacing == 'new': # "new" or "old" by now, "old" is default
sel = [s.replace('MEG ', 'MEG') for s in sel]
return sel
| 0 | 0 | 0 |
6c3bad3d96508cbe3f4195973059baf235ed402d | 1,364 | py | Python | apply_fortran_template.py | elmanuelito/simple-fortran-parser | 42138901870fa4359bc650049f232ca3720fb01a | [
"MIT"
] | 4 | 2015-11-27T11:13:57.000Z | 2018-05-08T09:01:15.000Z | apply_fortran_template.py | ebranlard/simple-fortran-parser | 42138901870fa4359bc650049f232ca3720fb01a | [
"MIT"
] | null | null | null | apply_fortran_template.py | ebranlard/simple-fortran-parser | 42138901870fa4359bc650049f232ca3720fb01a | [
"MIT"
] | 1 | 2018-08-03T22:02:34.000Z | 2018-08-03T22:02:34.000Z | #!/usr/bin/env python
# --------------------------------------------------------------------------------
# --- PARAMS
# --------------------------------------------------------------------------------
Types=['integer','double precision','real','logical']
Dims =[1,0]
# --------------------------------------------------------------------------------
# ---
# --------------------------------------------------------------------------------
import os
import sys
import glob
if len(sys.argv)>1:
Files=sys.argv[1:]
else:
Files=glob.glob('*.Template')
# print('Template files:')
# print(Files)
if len(Files)>0:
filebase=Files[0].replace('.Template','')
#
for typ in Types:
for dim in Dims:
#
TD=typ[0]+'%d'%dim
TD=TD.upper()
td=TD.lower()
filename=filebase+TD+'.f90'
if dim==0:
TYPE_AND_DIM=typ
else:
TYPE_AND_DIM=typ+', dimension(n1)'
#
fr=open(Files[0],'r')
fw=open(filename,'w')
for l in fr.readlines():
l=l.replace('<TD>',TD)
l=l.replace('<N1>','n1')
l=l.replace('<td>',td)
l=l.replace('<TYPE_AND_DIM>',TYPE_AND_DIM)
fw.write(l)
fw.close()
fr.close()
| 23.118644 | 82 | 0.34824 | #!/usr/bin/env python
# --------------------------------------------------------------------------------
# --- PARAMS
# --------------------------------------------------------------------------------
Types=['integer','double precision','real','logical']
Dims =[1,0]
# --------------------------------------------------------------------------------
# ---
# --------------------------------------------------------------------------------
import os
import sys
import glob
if len(sys.argv)>1:
Files=sys.argv[1:]
else:
Files=glob.glob('*.Template')
# print('Template files:')
# print(Files)
if len(Files)>0:
filebase=Files[0].replace('.Template','')
#
for typ in Types:
for dim in Dims:
#
TD=typ[0]+'%d'%dim
TD=TD.upper()
td=TD.lower()
filename=filebase+TD+'.f90'
if dim==0:
TYPE_AND_DIM=typ
else:
TYPE_AND_DIM=typ+', dimension(n1)'
#
fr=open(Files[0],'r')
fw=open(filename,'w')
for l in fr.readlines():
l=l.replace('<TD>',TD)
l=l.replace('<N1>','n1')
l=l.replace('<td>',td)
l=l.replace('<TYPE_AND_DIM>',TYPE_AND_DIM)
fw.write(l)
fw.close()
fr.close()
| 0 | 0 | 0 |
77af3cb16b13c36bc1b49ee6c1ee34789921a428 | 3,376 | py | Python | misc_functions/__init__.py | MrJeremyHobbs/Book-Be-Gone | 017926c1ef0956baec8ffcfa85bbfd33430c9bb7 | [
"MIT"
] | 8 | 2019-04-08T16:24:11.000Z | 2021-04-08T18:05:27.000Z | misc_functions/__init__.py | MrJeremyHobbs/Book-Be-Gone | 017926c1ef0956baec8ffcfa85bbfd33430c9bb7 | [
"MIT"
] | 2 | 2019-04-11T19:26:05.000Z | 2019-10-24T15:24:37.000Z | misc_functions/__init__.py | MrJeremyHobbs/Book-Be-Gone | 017926c1ef0956baec8ffcfa85bbfd33430c9bb7 | [
"MIT"
] | 1 | 2019-04-08T16:24:17.000Z | 2019-04-08T16:24:17.000Z | import grequests
import requests | 38.363636 | 102 | 0.650474 | import grequests
import requests
def get_xmls(urls):
rs = (grequests.get(u) for u in urls)
r = grequests.map(rs)
r_array = []
for x in r:
print(x.status_code)
r_array.append(x)
return r_array
def check_bibs_api_GET(apikey):
url = f'https://api-na.hosted.exlibrisgroup.com/almaws/v1/bibs/test?apikey={apikey}'
r = requests.get(url)
test_results = r.text
test_results = test_results.replace('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>', '')
test_results = test_results.replace('<test>', '')
test_results = test_results.replace('</test>', '')
return f"BIBS API READ: \t{test_results}"
def check_bibs_api_POST(apikey):
url = f'https://api-na.hosted.exlibrisgroup.com/almaws/v1/bibs/test?apikey={apikey}'
xml = ""
headers = {'Content-Type': 'application/xml', 'charset':'UTF-8'}
r = requests.post(url, data=xml.encode('utf-8'), headers=headers)
test_results = r.text
test_results = test_results.replace('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>', '')
test_results = test_results.replace('<test>', '')
test_results = test_results.replace('</test>', '')
return f"BIBS API WRITE:\t{test_results}"
def check_acquisitions_api_GET(apikey):
url = f'https://api-na.hosted.exlibrisgroup.com/almaws/v1/acq/test?apikey={apikey}'
r = requests.get(url)
test_results = r.text
test_results = test_results.replace('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>', '')
test_results = test_results.replace('<test>', '')
test_results = test_results.replace('</test>', '')
return f"ACQ API READ:\t{test_results}"
def check_acquisitions_api_POST(apikey):
url = f'https://api-na.hosted.exlibrisgroup.com/almaws/v1/acq/test?apikey={apikey}'
xml = ""
headers = {'Content-Type': 'application/xml', 'charset':'UTF-8'}
r = requests.post(url, data=xml.encode('utf-8'), headers=headers)
test_results = r.text
test_results = test_results.replace('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>', '')
test_results = test_results.replace('<test>', '')
test_results = test_results.replace('</test>', '')
return f"ACQ API WRITE:\t{test_results}"
def check_analytics_api(apikey):
pass
def check_configuration_api_GET(apikey):
url = f'https://api-na.hosted.exlibrisgroup.com/almaws/v1/conf/test?apikey={apikey}'
r = requests.get(url)
test_results = r.text
test_results = test_results.replace('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>', '')
test_results = test_results.replace('<test>', '')
test_results = test_results.replace('</test>', '')
return f"CONF API READ:\t{test_results}"
def check_configuration_api_POST(apikey):
url = f'https://api-na.hosted.exlibrisgroup.com/almaws/v1/conf/test?apikey={apikey}'
xml = ""
headers = {'Content-Type': 'application/xml', 'charset':'UTF-8'}
r = requests.post(url, data=xml.encode('utf-8'), headers=headers)
test_results = r.text
test_results = test_results.replace('<?xml version="1.0" encoding="UTF-8" standalone="yes"?>', '')
test_results = test_results.replace('<test>', '')
test_results = test_results.replace('</test>', '')
return f"CONF API WRITE:\t{test_results}" | 3,140 | 0 | 204 |
d48cafa96ed56253b58bb41919d2655791e02dea | 281 | py | Python | dummy_device/register_classes.py | fretchen/synqs_devices | ec4bb24639e9d0d4cb707e1c2a6296a47dfe9f02 | [
"MIT"
] | null | null | null | dummy_device/register_classes.py | fretchen/synqs_devices | ec4bb24639e9d0d4cb707e1c2a6296a47dfe9f02 | [
"MIT"
] | 4 | 2020-04-06T14:20:58.000Z | 2020-04-17T10:47:11.000Z | dummy_device/register_classes.py | fretchen/synqs_devices | ec4bb24639e9d0d4cb707e1c2a6296a47dfe9f02 | [
"MIT"
] | 2 | 2020-04-10T08:56:28.000Z | 2020-09-06T20:08:29.000Z | """Register the YunTemp to blacs.
This is boilerplate and should be only minimally changed.
"""
from labscript_devices import register_classes
register_classes(
"DummyDevice",
BLACS_tab="user_devices.dummy_device.blacs_tabs.DummyDeviceTab",
runviewer_parser=None,
)
| 21.615385 | 68 | 0.782918 | """Register the YunTemp to blacs.
This is boilerplate and should be only minimally changed.
"""
from labscript_devices import register_classes
register_classes(
"DummyDevice",
BLACS_tab="user_devices.dummy_device.blacs_tabs.DummyDeviceTab",
runviewer_parser=None,
)
| 0 | 0 | 0 |
33ea139d5c0bdc9ba532bf85b6a211c71b4ae8a2 | 790 | py | Python | aplpy/conftest.py | senchp/aplpy | 0081433f8d061073c2f19980d371a8ae76994ea9 | [
"MIT"
] | 1 | 2020-08-12T03:52:50.000Z | 2020-08-12T03:52:50.000Z | aplpy/conftest.py | senchp/aplpy | 0081433f8d061073c2f19980d371a8ae76994ea9 | [
"MIT"
] | null | null | null | aplpy/conftest.py | senchp/aplpy | 0081433f8d061073c2f19980d371a8ae76994ea9 | [
"MIT"
] | null | null | null | # this contains imports plugins that configure py.test for astropy tests.
# by importing them here in conftest.py they are discoverable by py.test
# no matter how it is invoked within the source tree.
from astropy.tests.pytest_plugins import *
from astropy.tests.pytest_plugins import pytest_addoption as astropy_pytest_addoption
# Uncomment the following line to treat all DeprecationWarnings as
# exceptions
enable_deprecations_as_exceptions()
import os
from astropy.tests.helper import pytest
@pytest.fixture
| 34.347826 | 112 | 0.805063 | # this contains imports plugins that configure py.test for astropy tests.
# by importing them here in conftest.py they are discoverable by py.test
# no matter how it is invoked within the source tree.
from astropy.tests.pytest_plugins import *
from astropy.tests.pytest_plugins import pytest_addoption as astropy_pytest_addoption
# Uncomment the following line to treat all DeprecationWarnings as
# exceptions
enable_deprecations_as_exceptions()
import os
from astropy.tests.helper import pytest
def pytest_addoption(parser):
parser.addoption('--generate-images-path', help="directory to generate reference images in", action='store')
return astropy_pytest_addoption(parser)
@pytest.fixture
def generate(request):
return request.config.getoption("--generate-images-path")
| 228 | 0 | 45 |
732ad0dfe6e7089832beb01c637675ca162227f6 | 2,231 | py | Python | graphql_compiler/tests/integration_tests/integration_backend_config.py | justinaustin/graphql-compiler | 4e68d592fc97855ca043dc20bdf59be4298647ab | [
"Apache-2.0"
] | null | null | null | graphql_compiler/tests/integration_tests/integration_backend_config.py | justinaustin/graphql-compiler | 4e68d592fc97855ca043dc20bdf59be4298647ab | [
"Apache-2.0"
] | null | null | null | graphql_compiler/tests/integration_tests/integration_backend_config.py | justinaustin/graphql-compiler | 4e68d592fc97855ca043dc20bdf59be4298647ab | [
"Apache-2.0"
] | null | null | null | # Copyright 2018-present Kensho Technologies, LLC.
from collections import namedtuple
from six.moves.urllib.parse import quote_plus
from .. import test_backend
DEFAULT_ROOT_PASSWORD = "root" # nosec
SQL_BACKENDS = {
test_backend.POSTGRES,
test_backend.MYSQL,
test_backend.MARIADB,
test_backend.MSSQL,
test_backend.SQLITE,
}
# sqlite does not require that a DB be created/dropped for testing
EXPLICIT_DB_BACKENDS = {
test_backend.POSTGRES,
test_backend.MYSQL,
test_backend.MARIADB,
test_backend.MSSQL,
}
MATCH_BACKENDS = {
test_backend.ORIENTDB,
}
# Split Neo4j and RedisGraph because RedisGraph doesn't support all Neo4j features.
NEO4J_BACKENDS = {
test_backend.NEO4J,
}
REDISGRAPH_BACKENDS = {
test_backend.REDISGRAPH,
}
pyodbc_parameter_string = "DRIVER={driver};SERVER={server};UID={uid};PWD={pwd}".format( # nosec
driver="{ODBC Driver 17 for SQL SERVER}",
server="127.0.0.1,1434", # Do not change to 'localhost'.
# You won't be able to connect with the db.
uid="SA", # System Administrator.
pwd="Root-secure1",
)
# delimeters must be URL escaped
escaped_pyodbc_parameter_string = quote_plus(pyodbc_parameter_string)
SQL_BACKEND_TO_CONNECTION_STRING = {
# HACK(bojanserafimov): Entries are commented-out because MSSQL is the only one whose scheme
# initialization is properly configured, with a hierarchy of multiple
# databases and schemas. I'm keeping the code to remember the connection
# string formats.
#
test_backend.POSTGRES: "postgresql://postgres:{password}@localhost:5433".format(
password=DEFAULT_ROOT_PASSWORD
),
# test_backend.MYSQL:
# 'mysql://root:{password}@127.0.0.1:3307'.format(password=DEFAULT_ROOT_PASSWORD),
# test_backend.MARIADB:
# 'mysql://root:{password}@127.0.0.1:3308'.format(password=DEFAULT_ROOT_PASSWORD),
test_backend.MSSQL: "mssql+pyodbc:///?odbc_connect={}".format(escaped_pyodbc_parameter_string),
# test_backend.SQLITE:
# 'sqlite:///:memory:',
}
SqlTestBackend = namedtuple(
"SqlTestBackend",
(
"engine",
"base_connection_string",
),
)
| 29.355263 | 99 | 0.693411 | # Copyright 2018-present Kensho Technologies, LLC.
from collections import namedtuple
from six.moves.urllib.parse import quote_plus
from .. import test_backend
DEFAULT_ROOT_PASSWORD = "root" # nosec
SQL_BACKENDS = {
test_backend.POSTGRES,
test_backend.MYSQL,
test_backend.MARIADB,
test_backend.MSSQL,
test_backend.SQLITE,
}
# sqlite does not require that a DB be created/dropped for testing
EXPLICIT_DB_BACKENDS = {
test_backend.POSTGRES,
test_backend.MYSQL,
test_backend.MARIADB,
test_backend.MSSQL,
}
MATCH_BACKENDS = {
test_backend.ORIENTDB,
}
# Split Neo4j and RedisGraph because RedisGraph doesn't support all Neo4j features.
NEO4J_BACKENDS = {
test_backend.NEO4J,
}
REDISGRAPH_BACKENDS = {
test_backend.REDISGRAPH,
}
pyodbc_parameter_string = "DRIVER={driver};SERVER={server};UID={uid};PWD={pwd}".format( # nosec
driver="{ODBC Driver 17 for SQL SERVER}",
server="127.0.0.1,1434", # Do not change to 'localhost'.
# You won't be able to connect with the db.
uid="SA", # System Administrator.
pwd="Root-secure1",
)
# delimeters must be URL escaped
escaped_pyodbc_parameter_string = quote_plus(pyodbc_parameter_string)
SQL_BACKEND_TO_CONNECTION_STRING = {
# HACK(bojanserafimov): Entries are commented-out because MSSQL is the only one whose scheme
# initialization is properly configured, with a hierarchy of multiple
# databases and schemas. I'm keeping the code to remember the connection
# string formats.
#
test_backend.POSTGRES: "postgresql://postgres:{password}@localhost:5433".format(
password=DEFAULT_ROOT_PASSWORD
),
# test_backend.MYSQL:
# 'mysql://root:{password}@127.0.0.1:3307'.format(password=DEFAULT_ROOT_PASSWORD),
# test_backend.MARIADB:
# 'mysql://root:{password}@127.0.0.1:3308'.format(password=DEFAULT_ROOT_PASSWORD),
test_backend.MSSQL: "mssql+pyodbc:///?odbc_connect={}".format(escaped_pyodbc_parameter_string),
# test_backend.SQLITE:
# 'sqlite:///:memory:',
}
SqlTestBackend = namedtuple(
"SqlTestBackend",
(
"engine",
"base_connection_string",
),
)
| 0 | 0 | 0 |
e46d6e052ef66211c37cef5353eef69458461d60 | 7,203 | py | Python | pymeda/datasets.py | karunkannan/pymeda | 936e0e326274de121a3f65baa0db0ff14a6c1b05 | [
"Apache-2.0"
] | null | null | null | pymeda/datasets.py | karunkannan/pymeda | 936e0e326274de121a3f65baa0db0ff14a6c1b05 | [
"Apache-2.0"
] | 6 | 2019-02-10T19:30:10.000Z | 2019-03-26T02:23:01.000Z | pymeda/datasets.py | karunkannan/pymeda | 936e0e326274de121a3f65baa0db0ff14a6c1b05 | [
"Apache-2.0"
] | 3 | 2019-02-10T19:46:14.000Z | 2019-03-03T21:07:42.000Z | import os
import json
import statistics
import pandas as pd
import numpy as np
class CSVDataSet(DataSet):
""" A dataset living locally in a .csv file
"""
def getResource(self, index):
"""Get a specific data point from the data set.
Parameters
----------
index : int or string
The index of the data point in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
A ndarray of the data point.
"""
if type(index) is int:
return self.D.iloc[index].as_matrix()
else:
return self.D.loc[index].as_matrix()
def getColumn(self, index):
"""Get a column of the dataframe.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
The values in the column.
"""
if type(index) is int:
return self.D.iloc[:, index].as_matrix()
else:
return self.D[index].as_matrix()
def getColumnValues(self, index):
"""Get the unique values of a column.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
A ndarray of the unique values.
"""
column = self.getColumn(index)
if column.dtype == "float64":
column = column[~np.isnan(column)]
else:
column = column[np.array([x != "NA" for x in column])]
return np.unique(column)
def getColumnDistribution(self, index):
"""Get the distribution of values in a column.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`, :obj:`ndarray`
An array x of the unique labels, and an array y of the count of that label
"""
x = self.getColumnValues(index)
column = self.getColumn(index)
y = [np.sum(column == v) for v in x]
return x, y
def getColumnDescription(self, index, sep="\n"):
"""Get a description of the column.
"""
desc = []
if type(index) is int:
index = self.D.columns.values[index]
for i, name in enumerate(self.D.columns.names):
desc.append(name + ": " + index[i])
return sep.join(desc)
| 30.914163 | 86 | 0.539914 | import os
import json
import statistics
import pandas as pd
import numpy as np
class DataSet:
def __init__(self, D, name="default"):
self.D = D
self.n, self.d = self.D.shape
self.name = name
def getResource(self, index):
return self.D.iloc[index, :]
def saveMetaData(self, filepath):
metadata = dict(d=self.d, n=self.n, name=self.name)
string = json.dumps(metadata, indent=2)
with open(filepath, 'w') as f:
f.write(string)
return string
def getMatrix(self):
return self.D.as_matrix()
def convertDtype(l):
try:
return np.array(l, dtype="float")
except:
pass
l = np.array(l, dtype=str)
l[l == 'nan'] = 'NA'
return l
class CSVDataSet(DataSet):
""" A dataset living locally in a .csv file
"""
def __init__(self,
csv_path,
index_column=None,
NA_val=".",
name="mydataset"):
self.name = name
# Load the data set
D = pd.read_csv(csv_path, dtype="unicode")
self.n, self.d = D.shape
print("Dataset of size", self.n, "samples", self.d, "dimensions",
"Loaded")
# Convert to numeric all numeric rows
D = D.replace(NA_val, "nan")
print("Replacing all", NA_val, "with nan")
d = []
for c in D.columns:
d.append(convertDtype(list(D[c])))
print("Converting", c, end="\r\r")
newcolumns = D.columns
newindex = D.index
D = list(d)
D = pd.DataFrame(dict(zip(newcolumns, D)), index=newindex)
# Set the index column as specified
if index_column is not None:
print("Setting index column as", index_column)
D.index = D[index_column]
print("Deleting", index_column, "from dataset")
del D[index_column]
self.D = D
# Remove all columns which have all null values
keep = []
allnull = self.D.isnull().all(axis=0)
for c in self.D.columns[allnull]:
print("Removing column", c, "because it has all null values")
keep = self.D.columns[~allnull]
self.D = self.D[keep]
# Remove all rows which have all null values
allnull = self.D.isnull().all(axis=1)
for r in self.D.index[allnull]:
print("Removing row", r, "because it has all null values")
keep = self.D.index[~allnull]
self.D = self.D.loc[keep]
n, d = self.D.shape
print("Dataset of size", n, "samples", d, "dimensions", "Resulting")
self.N = self.D.shape[0]
def imputeColumns(self, numeric):
keep = []
keep = (self.D.dtypes == "float64").as_matrix()
for c in self.D.columns[~keep]:
print("Removing column", c, "because it is not numeric")
self.D = self.D[self.D.columns[keep]]
cmean = self.D.mean(axis=0)
values = dict(list(zip(self.D.columns, cmean.as_matrix())))
#self.D.fillna(value=values, inplace=True)
d = self.D.as_matrix()
for i, c in enumerate(self.D.columns):
print("Imputing column", c, "with value", values[c])
d[:, i][np.isnan(d[:, i])] = values[c]
D = pd.DataFrame(d)
D.index = self.D.index
D.index.names = self.D.index.names
D.columns = self.D.columns
D.columns.names = self.D.columns.names
self.D = D
allzero = np.all(self.D.as_matrix() == 0, axis=0)
for c in self.D.columns[allzero]:
print("Removing column", c, "because it has all zero values")
keep = self.D.columns[~allzero]
allsame = np.std(self.D.as_matrix(), axis=0) == 0
for c in self.D.columns[allsame]:
print(
"Removing column", c,
"because it has all zero standard deviation (all values same)")
keep = self.D.columns[~allsame]
self.D = self.D[keep]
n, d = self.D.shape
print("Dataset of size", n, "samples", d, "dimensions", "Resulting")
print("Dataset has", self.D.isnull().sum().sum(), "nans")
print("Dataset has", np.sum(np.isinf(self.D.as_matrix())), "infs")
def getResource(self, index):
"""Get a specific data point from the data set.
Parameters
----------
index : int or string
The index of the data point in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
A ndarray of the data point.
"""
if type(index) is int:
return self.D.iloc[index].as_matrix()
else:
return self.D.loc[index].as_matrix()
def getColumn(self, index):
"""Get a column of the dataframe.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
The values in the column.
"""
if type(index) is int:
return self.D.iloc[:, index].as_matrix()
else:
return self.D[index].as_matrix()
def getColumnValues(self, index):
"""Get the unique values of a column.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`
A ndarray of the unique values.
"""
column = self.getColumn(index)
if column.dtype == "float64":
column = column[~np.isnan(column)]
else:
column = column[np.array([x != "NA" for x in column])]
return np.unique(column)
def getColumnDistribution(self, index):
"""Get the distribution of values in a column.
Parameters
----------
index : int or string
The index of the column in `D`, either positional or a string.
Returns
-------
:obj:`ndarray`, :obj:`ndarray`
An array x of the unique labels, and an array y of the count of that label
"""
x = self.getColumnValues(index)
column = self.getColumn(index)
y = [np.sum(column == v) for v in x]
return x, y
def getColumnNADist(self, index):
column = self.getColumn(index)
if column.dtype == "float64":
na = np.sum([np.isnan(x) for x in column])
not_na = len(column) - na
return na, not_na
else:
na = np.sum([x == "NA" for x in column])
not_na = len(column) - na
return na, not_na
return na, not_na
def getColumnDescription(self, index, sep="\n"):
"""Get a description of the column.
"""
desc = []
if type(index) is int:
index = self.D.columns.values[index]
for i, name in enumerate(self.D.columns.names):
desc.append(name + ": " + index[i])
return sep.join(desc)
def getLevelValues(self, index):
return np.unique(self.D.columns.get_level_values(index))
| 4,336 | -7 | 261 |
c93929021bb8364e7a04b30ef4c2a4414b855d18 | 188 | py | Python | src/handlers/mainhandler.py | kevinbaijnath/MultiUserBlog | 20d0caf4b4d48c710a2dcda866ab855c602081cc | [
"MIT"
] | null | null | null | src/handlers/mainhandler.py | kevinbaijnath/MultiUserBlog | 20d0caf4b4d48c710a2dcda866ab855c602081cc | [
"MIT"
] | null | null | null | src/handlers/mainhandler.py | kevinbaijnath/MultiUserBlog | 20d0caf4b4d48c710a2dcda866ab855c602081cc | [
"MIT"
] | null | null | null | import webapp2
class MainHandler(webapp2.RequestHandler):
"""Main Handler for the blog"""
def get(self):
"""Redirects to the blog page"""
self.redirect("/blog")
| 18.8 | 42 | 0.632979 | import webapp2
class MainHandler(webapp2.RequestHandler):
"""Main Handler for the blog"""
def get(self):
"""Redirects to the blog page"""
self.redirect("/blog")
| 0 | 0 | 0 |
afdb93fa4c7ac873a40c11acab097ed744cca020 | 16,779 | py | Python | hole_filling/components.py | Rhoana/block_based_synapse_aware | 744b9b91031d0eb0570d309d97563182b768a9d8 | [
"MIT"
] | 2 | 2020-04-14T07:51:02.000Z | 2022-03-25T08:09:04.000Z | hole_filling/components.py | Rhoana/block_based_synapse_aware | 744b9b91031d0eb0570d309d97563182b768a9d8 | [
"MIT"
] | null | null | null | hole_filling/components.py | Rhoana/block_based_synapse_aware | 744b9b91031d0eb0570d309d97563182b768a9d8 | [
"MIT"
] | 3 | 2020-04-10T08:33:11.000Z | 2020-05-19T06:51:23.000Z |
import os
import time
import numpy as np
from numba import jit, types
from numba.typed import Dict
from blockbased_synapseaware.hole_filling.connected_components.cc3d import connected_components
from blockbased_synapseaware.utilities.dataIO import PickleData, PickleNumbaData, WriteH5File
from blockbased_synapseaware.utilities.constants import *
@jit(nopython=True)
| 44.271768 | 178 | 0.650396 |
import os
import time
import numpy as np
from numba import jit, types
from numba.typed import Dict
from blockbased_synapseaware.hole_filling.connected_components.cc3d import connected_components
from blockbased_synapseaware.utilities.dataIO import PickleData, PickleNumbaData, WriteH5File
from blockbased_synapseaware.utilities.constants import *
def ComputeConnected6Components(seg, background_start_label):
# run connected components with 6 connectivity and according background_start_label
components = connected_components(seg, start_label=background_start_label, connectivity=6)
return components
@jit(nopython=True)
def FindAdjacentLabelSetLocal(components):
neighbor_label_set = set()
zres, yres, xres = components.shape
# consider all neighboring pairs within the volume
for iz in range(0, zres - 1):
for iy in range(0, yres - 1):
for ix in range(0, xres - 1):
# get the component at this location
component = components[iz,iy,ix]
# does this component differ from its neighbor in z
if component != components[iz+1,iy,ix]:
neighbor_label_set.add((component, components[iz+1,iy,ix]))
neighbor_label_set.add((components[iz+1,iy,ix], component))
# does this component differ from its neighbor in y
if component != components[iz,iy+1,ix]:
neighbor_label_set.add((component, components[iz,iy+1,ix]))
neighbor_label_set.add((components[iz,iy+1,ix], component))
# does this component differ from its neighbor in x
if component != components[iz,iy,ix+1]:
neighbor_label_set.add((component, components[iz,iy,ix+1]))
neighbor_label_set.add((components[iz,iy,ix+1], component))
# consider components on the first and last z slice
for iz in [0, zres - 1]:
for iy in range(0, yres):
for ix in range(0, xres):
#interconnect in plane
component = components[iz,iy,ix]
if iy < yres - 1:
if component != components[iz,iy+1,ix]:
neighbor_label_set.add((component, components[iz,iy+1,ix]))
neighbor_label_set.add((components[iz,iy+1,ix], component))
if ix < xres - 1:
if component != components[iz,iy,ix+1]:
neighbor_label_set.add((component, components[iz,iy,ix+1]))
neighbor_label_set.add((components[iz,iy,ix+1], component))
# write dict of border components paired with sufficiently high fake label
neighbor_label_set.add((component, BORDER_CONTACT))
# consider components on the first and last y slice
for iy in [0, yres - 1]:
for iz in range(0, zres):
for ix in range(0, xres):
#interconnect in plane
component = components[iz,iy,ix]
if iz < zres - 1:
if component != components[iz+1,iy,ix]:
neighbor_label_set.add((component, components[iz+1,iy,ix]))
neighbor_label_set.add((components[iz+1,iy,ix], component))
if ix < xres - 1:
if component != components[iz,iy,ix+1]:
neighbor_label_set.add((component, components[iz,iy,ix+1]))
neighbor_label_set.add((components[iz,iy,ix+1], component))
# write dict of border components paired with sufficiently high fake label
neighbor_label_set.add((component, BORDER_CONTACT))
# consider components on the first and last x slice
for ix in [0, xres - 1]:
for iz in range(0, zres):
for iy in range(0, yres):
#interconnect in plane
component = components[iz,iy,ix]
if iz < zres - 1:
if component != components[iz+1,iy,ix]:
neighbor_label_set.add((component, components[iz+1,iy,ix]))
neighbor_label_set.add((components[iz+1,iy,ix], component))
if iy < yres - 1:
if component != components[iz,iy+1,ix]:
neighbor_label_set.add((component, components[iz,iy+1,ix]))
neighbor_label_set.add((components[iz,iy+1,ix], component))
# write dict of border components paired with sufficiently high fake label
neighbor_label_set.add((component, BORDER_CONTACT))
return neighbor_label_set
def Set2Dictionary(label_set, label_dict = None):
if label_dict == None:
label_dict = dict()
# go through all of the labels in the set
for (label_one, label_two) in label_set:
# background components can be neighbors if they cross a border
# skip non-background components
if label_one > 0: continue
# create a dictionary entry for this label if it doesn't exist yet
if not label_one in label_dict:
label_dict[label_one] = [label_two]
# otherwise append the label to the dictionary entry for label_one
elif not label_two in label_dict[label_one]:
label_dict[label_one].append(label_two)
# when label_dict is not None, the element could already appear in the set
return label_dict
def FindBackgroundComponentsAssociatedLabels(neighbor_label_dict, undetermined_label_set, associated_label_dict):
# find which background components have only one non-background neighbor
border_contact = set()
holes = set()
non_holes = set()
# continue until there are no more undetermined components in the set
while len(undetermined_label_set):
query_component = undetermined_label_set.pop()
# check to see if there is one neighbor which is a neuron
if len(neighbor_label_dict[query_component]) == 1 and neighbor_label_dict[query_component][0] != BORDER_CONTACT:
# there should never be a case where there is one background component neighbor
assert (neighbor_label_dict[query_component][0] > 0)
associated_label_dict[query_component] = neighbor_label_dict[query_component][0]
holes.add(query_component)
# otherwise, unroll all other neighbors to identify if hole or not
else:
# list of nodes to expand (initially just the neighbors of the background component)
labels_to_expand = list(filter(lambda a : a < 0, neighbor_label_dict[query_component]))
# iteratively expand labels
while len(labels_to_expand):
label = labels_to_expand.pop()
if label == BORDER_CONTACT:
# add the border contact to the neighbor list
if not BORDER_CONTACT in neighbor_label_dict[query_component]:
neighbor_label_dict[query_component].append(BORDER_CONTACT)
else:
for child in neighbor_label_dict[label]:
if not child in neighbor_label_dict[query_component] and not child == query_component:
neighbor_label_dict[query_component].append(child)
# if this component is also background, add to list of expandable nodes
if child < 0: labels_to_expand.append(child)
# if there is contact with the border, add to border contact list
if BORDER_CONTACT in neighbor_label_dict[query_component]:
border_contact.add(query_component)
for label in neighbor_label_dict[query_component]:
# all connected background components also connect to the border
if label < 0:
border_contact.add(label)
undetermined_label_set.remove(label)
# if component lacks border contact, it can be determined as whole or not
else:
neuron_neighbors = list(filter(lambda a : a > 0, neighbor_label_dict[query_component]))
# if there is only one neighbor it is a hole
if len(neuron_neighbors) == 1:
associated_label_dict[query_component] = neuron_neighbors[0]
holes.add(query_component)
# all other background components adjacent to this neuron are also holes
for label in neighbor_label_dict[query_component]:
if label < 0:
# make sure the label agrees if already discovered
associated_label_dict[label] = neuron_neighbors[0]
undetermined_label_set.remove(label)
holes.add(label)
# if there are more than one neuron neighbor it is not a hole
else:
associated_label_dict[query_component] = 0
non_holes.add(query_component)
for label in neighbor_label_dict[query_component]:
if label < 0:
# make sure the label agrees if already discovered
associated_label_dict[label] = 0
undetermined_label_set.remove(label)
non_holes.add(label)
assert (not len(undetermined_label_set))
# update the undetermined_label_set to equal the border_contact_set
return associated_label_dict, border_contact, holes, non_holes
def PruneNeighborLabelSet(neighbor_label_set, holes, non_holes):
neighbor_label_set_reduced = set()
for (label_one, label_two) in neighbor_label_set:
# do not include any elements already labeled or connected to the border
if label_one in holes or label_one in non_holes: continue
if label_two in holes or label_two in non_holes: continue
if label_one == BORDER_CONTACT or label_two == BORDER_CONTACT: continue
neighbor_label_set_reduced.add((label_one, label_two))
return neighbor_label_set_reduced
def FindPerBlockConnectedComponents(data, iz, iy, ix):
# start timing statistics
total_time = time.time()
# get the number of blocks in each dimension
nblocks = data.NBlocks()
block_volume = data.BlockVolume()
# get the index for this block
block_index = data.IndexFromIndices(iz, iy, ix)
# get the index for the background volumes
background_start_label = -1 - (block_index * block_volume)
# read in this volume
read_time = time.time()
seg = data.ReadRawSegmentationBlock(iz, iy, ix)
read_time = time.time() - read_time
# make sure the block is not larger than mentioned in param file
assert (seg.shape[OR_Z] <= data.BlockZLength())
assert (seg.shape[OR_Y] <= data.BlockYLength())
assert (seg.shape[OR_X] <= data.BlockXLength())
# pad the block with zeroes at the ends
if seg.shape[OR_Z] < data.BlockZLength() or seg.shape[OR_Y] < data.BlockYLength() or seg.shape[OR_X] < data.BlockXLength():
# make sure that the block is on one of the far edges
assert (iz == data.EndZ() - 1 or iy == data.EndY() - 1 or ix == data.EndX() - 1)
zpadding = data.ZBlockLength() - seg.shape[OR_Z]
ypadding = data.YBlockLength() - seg.shape[OR_Y]
xpadding = data.XBlockLength() - seg.shape[OR_X]
# padding only goes at the far edges of the block
seg = np.pad(seg, ((0, zpadding), (0, ypadding), (0, xpadding)), 'constant', constant_values = 0)
# make sure the block is not smaller than mentioned in param file
assert (seg.shape[OR_Z] == data.BlockZLength())
assert (seg.shape[OR_Y] == data.BlockYLength())
assert (seg.shape[OR_X] == data.BlockXLength())
# call connected components algorithm for this block
components_time = time.time()
components = ComputeConnected6Components(seg, background_start_label)
# delete original segmentation
del seg
# save the components file to disk
tmp_directory = data.TempBlockDirectory(iz, iy, ix)
# create the folder if it does not exist
if not os.path.exists(tmp_directory):
os.makedirs(tmp_directory, exist_ok=True)
# write the components and all walls to file
WriteH5File(components, '{}/components.h5'.format(tmp_directory))
WriteH5File(components[0,:,:], '{}/z-min-hole-filling.h5'.format(tmp_directory))
WriteH5File(components[-1,:,:], '{}/z-max-hole-filling.h5'.format(tmp_directory))
WriteH5File(components[:,0,:], '{}/y-min-hole-filling.h5'.format(tmp_directory))
WriteH5File(components[:,-1,:], '{}/y-max-hole-filling.h5'.format(tmp_directory))
WriteH5File(components[:,:,0], '{}/x-min-hole-filling.h5'.format(tmp_directory))
WriteH5File(components[:,:,-1], '{}/x-max-hole-filling.h5'.format(tmp_directory))
components_time = time.time() - components_time
# find the set of adjacent labels, both inside the volume and the ones connected at the local border
adjacency_set_time = time.time()
neighbor_label_set = FindAdjacentLabelSetLocal(components)
adjacency_set_time = time.time() - adjacency_set_time
# create a dictionary of labels from the set
background_associated_labels_time = time.time()
neighbor_label_dict = Set2Dictionary(neighbor_label_set)
# to start, none of the background components are determined
undetermined_label_set = set(neighbor_label_dict.keys())
# dictionary associated background components to labels
associated_label_dict = Dict.empty(key_type=types.int64, value_type=types.int64)
associated_label_dict, undetermined_label_set, holes, non_holes = FindBackgroundComponentsAssociatedLabels(neighbor_label_dict, undetermined_label_set, associated_label_dict)
background_associated_labels_time = time.time() - background_associated_labels_time
# remove from the neighbor label set border elements and those already determined as holes and non holes
neighbor_label_set_reduced = PruneNeighborLabelSet(neighbor_label_set, holes, non_holes)
neighbor_label_dict_reduced = Set2Dictionary(neighbor_label_set_reduced)
# delete the temporary generated set and dictionary
del neighbor_label_set, neighbor_label_dict
# write the relevant files to disk
write_time = time.time()
PickleNumbaData(associated_label_dict, '{}/associated-label-set-local.pickle'.format(tmp_directory))
PickleData(undetermined_label_set, '{}/undetermined-label-set-local.pickle'.format(tmp_directory))
PickleData(neighbor_label_dict_reduced, '{}/neighbor-label-dictionary-reduced.pickle'.format(tmp_directory))
write_time = time.time() - write_time
total_time = time.time() - total_time
print ('Read Time: {:0.2f} seconds.'.format(read_time))
print ('Components Time: {:0.2f} seconds.'.format(components_time))
print ('Adjacency Set Time: {:0.2f} seconds.'.format(adjacency_set_time))
print ('Background Components Associated Labels: {:0.2f} seconds.'.format(background_associated_labels_time))
print ('Write Time: {:0.2f} seconds.'.format(write_time))
print ('Total Time: {:0.2f} seconds.'.format(total_time))
# generate statistics for the holes
# does not count towards total computation time
labels, counts = np.unique(components, return_counts=True)
hole_sizes = {}
for iv, label in enumerate(labels):
# skip the actual neurons in the volume
if label > 0: continue
hole_sizes[label] = counts[iv]
# save the output file
PickleData(hole_sizes, '{}/hole-sizes.pickle'.format(tmp_directory))
# delete the components (no longer needed)
del components
# output timing statistics
timing_directory = '{}/connected-components'.format(data.TimingDirectory())
if not os.path.exists(timing_directory):
os.makedirs(timing_directory, exist_ok=True)
timing_filename = '{}/{:04d}z-{:04d}y-{:04d}x.txt'.format(timing_directory, iz, iy, ix)
with open(timing_filename, 'w') as fd:
fd.write ('Read Time: {:0.2f} seconds.\n'.format(read_time))
fd.write ('Components Time: {:0.2f} seconds.\n'.format(components_time))
fd.write ('Adjacency Set Time: {:0.2f} seconds.\n'.format(adjacency_set_time))
fd.write ('Background Components Associated Labels: {:0.2f} seconds.\n'.format(background_associated_labels_time))
fd.write ('Write Time: {:0.2f} seconds.\n'.format(write_time))
fd.write ('Total Time: {:0.2f} seconds.\n'.format(total_time))
| 16,252 | 0 | 137 |
517ccb527d0a92db701ebf0e219f5fcbbb838e0e | 1,413 | py | Python | examples/service/arg_check_example.py | ooici/pyon | 122c629290d27f32f2f41dafd5c12469295e8acf | [
"BSD-2-Clause"
] | 2 | 2015-06-09T16:07:09.000Z | 2015-07-28T10:06:31.000Z | examples/service/arg_check_example.py | ooici/pyon | 122c629290d27f32f2f41dafd5c12469295e8acf | [
"BSD-2-Clause"
] | 3 | 2020-07-22T15:14:55.000Z | 2021-12-13T19:35:06.000Z | examples/service/arg_check_example.py | ooici/pyon | 122c629290d27f32f2f41dafd5c12469295e8acf | [
"BSD-2-Clause"
] | null | null | null | #!/usr/bin/env python
'''
@author Luke Campbel <LCampbell@ASAScience.com>
@file
@date 03/27/12 15:30
@description DESCRIPTION
'''
from pyon.util.arg_check import validate_is_instance, validate_in, validate_equal, validate_true
class ArgCheckService(object):
'''
Example Service illustrating how to use the various validateion mechanisms
'''
def pass_integer(self, val=''):
'''
Say you were expecting an integer from the client...
'''
validate_is_instance(val,int,'Value is not an integer.')
return val
def pass_float(self, val=1.0):
'''
Say you were expecting a float from the client
'''
validate_is_instance(val,float,'Value is not a float.')
return val
def handle_list(self, needle, haystack):
'''
You needed to be certain that something was in the list or dict
'''
validate_in(needle,haystack,'Can\'t find %s in %s.' % (needle, haystack))
return needle
def check_equality(self, a,b):
'''
You needed to be sure that two items we're equivalent
'''
validate_equal(a,b,'%s != %s' %(str(a), str(b)))
return True
def list_len(self,l):
'''
You needed to be certain that a list had len >0
'''
validate_true(len(l)>0, 'list=%s was empty.' % str(l))
| 27.173077 | 96 | 0.604388 | #!/usr/bin/env python
'''
@author Luke Campbel <LCampbell@ASAScience.com>
@file
@date 03/27/12 15:30
@description DESCRIPTION
'''
from pyon.util.arg_check import validate_is_instance, validate_in, validate_equal, validate_true
class ArgCheckService(object):
'''
Example Service illustrating how to use the various validateion mechanisms
'''
def __init__(self):
pass
def pass_integer(self, val=''):
'''
Say you were expecting an integer from the client...
'''
validate_is_instance(val,int,'Value is not an integer.')
return val
def pass_float(self, val=1.0):
'''
Say you were expecting a float from the client
'''
validate_is_instance(val,float,'Value is not a float.')
return val
def handle_list(self, needle, haystack):
'''
You needed to be certain that something was in the list or dict
'''
validate_in(needle,haystack,'Can\'t find %s in %s.' % (needle, haystack))
return needle
def check_equality(self, a,b):
'''
You needed to be sure that two items we're equivalent
'''
validate_equal(a,b,'%s != %s' %(str(a), str(b)))
return True
def list_len(self,l):
'''
You needed to be certain that a list had len >0
'''
validate_true(len(l)>0, 'list=%s was empty.' % str(l))
| 11 | 0 | 26 |
85fd80dee75de489ab2b78464c3cbc65eeca1519 | 1,913 | py | Python | annotated_packet.py | USC-NSL/policing-detection | 95dcfbd76875b23e26a57f1a5f4b82ea63916c99 | [
"Apache-2.0"
] | 13 | 2016-12-12T02:19:49.000Z | 2021-12-08T12:49:28.000Z | annotated_packet.py | USC-NSL/policing-detection | 95dcfbd76875b23e26a57f1a5f4b82ea63916c99 | [
"Apache-2.0"
] | null | null | null | annotated_packet.py | USC-NSL/policing-detection | 95dcfbd76875b23e26a57f1a5f4b82ea63916c99 | [
"Apache-2.0"
] | 3 | 2017-10-18T08:01:12.000Z | 2018-05-22T08:46:32.000Z | import dpkt
from dpkt.tcp import *
from tcp_util import *
| 31.883333 | 78 | 0.637219 | import dpkt
from dpkt.tcp import *
from tcp_util import *
class AnnotatedPacket(object):
def __init__(self, packet, timestamp_us, index):
self.packet = packet
self.timestamp_us = timestamp_us
self.index = index
self.ack_delay_ms = -1
self.ack_index = -1
self.rtx = None
self.rtx_is_spurious = False
self.previous_tx = None
self.previous_packet = None
self.data_len = tcp_data_len(self)
self.seq = packet.ip.tcp.seq
self.seq_end = add_offset(self.seq, self.data_len)
# Replace raw option buffer by a parsed version
self.packet.ip.tcp.opts = parse_opts(self.packet.ip.tcp.opts)
self.ack = packet.ip.tcp.ack
# Relative sequence numbers are set by the TCP endpoint
# (requires knowledge about the initial sequence numbers)
self.seq_relative = -1
self.ack_relative = -1
# Bytes that were received successfully by the other endpoint
# (packets transmitted before this one)
self.bytes_passed = -1
def is_lost(self):
return self.rtx is not None and not self.rtx_is_spurious
def update_length_and_offset(self, new_length, offset):
"""Update the sequence numbers and payload length (used when splitting
a jumbo packet into smaller on-the-wire frames"""
self.data_len = new_length
tcp_set_data_len(self, new_length)
assert self.data_len == tcp_data_len(self)
tcp = self.packet.ip.tcp
self.seq = tcp.seq = add_offset(self.seq, offset)
self.seq_end = add_offset(self.seq, self.data_len)
# trim buffer storing actual payload
if len(tcp.data) <= offset:
tcp.data = []
else:
buf_start = offset
buf_end = min(len(tcp.data), offset + new_length)
tcp.data = tcp.data[buf_start:buf_end]
| 1,023 | 807 | 23 |
61c5161b9f9bd6ce0d40bbd88906570121b82373 | 1,057 | py | Python | python_version/plateau.py | k3nnywilliam/rover-simulator | c250af46c838fab2dc20161bb1d8a3f6470f1045 | [
"MIT"
] | 1 | 2020-02-18T23:20:44.000Z | 2020-02-18T23:20:44.000Z | python_version/plateau.py | k3nnywilliam/rover-simulator | c250af46c838fab2dc20161bb1d8a3f6470f1045 | [
"MIT"
] | null | null | null | python_version/plateau.py | k3nnywilliam/rover-simulator | c250af46c838fab2dc20161bb1d8a3f6470f1045 | [
"MIT"
] | null | null | null | '''
Written by Kenny William Nyallau ©2020
This is a python implementation of Rover challenge
'''
| 26.425 | 82 | 0.525071 | '''
Written by Kenny William Nyallau ©2020
This is a python implementation of Rover challenge
'''
class Plateau:
def __init__(self):
self.px = 0
self.py = 0
self.x = 0
self.y = 0
self.name = ""
def setRoverCoordinates(self, x, y):
self.x = x
self.y = y
def setPlateauSize(self):
while(True):
px, py = input("Please set x, y coordinates for the Plateau:").split()
self.px = int(px)
self.py = int(py)
if(int(self.x) > int(self.px) or int(self.y) > int(self.py)):
print("Invalid. Plateau too small for Rover!")
elif(int(self.px) <=0 or int(self.py) <=0):
print("Invalid. Plateau too small for Rover!")
else:
break
def __checkIfPositionOccupied(self, r1_x, r1_y, r2_x, r2_y):
if(self.r1_x == self.r2_x and self.r1_y == self.r2_y):
print("This position has been occupied")
self.r2_x = 0
self.r2_y = 0
| 834 | -7 | 130 |
166da20936646259d249f9f96026f0761b83bcf2 | 663 | py | Python | obsvail/tests/load_experts.py | sandipan1/robo_rl | 3bcb7caabeba71dd747fadf2355ac42408b7f340 | [
"MIT"
] | 5 | 2018-10-16T03:48:02.000Z | 2021-10-01T08:58:05.000Z | obsvail/tests/load_experts.py | sandipan1/robo_rl | 3bcb7caabeba71dd747fadf2355ac42408b7f340 | [
"MIT"
] | 1 | 2018-10-17T16:19:14.000Z | 2018-10-31T06:19:30.000Z | obsvail/tests/load_experts.py | sandipan1/robo_rl | 3bcb7caabeba71dd747fadf2355ac42408b7f340 | [
"MIT"
] | null | null | null | import pickle
experts_file_path = "../experts/sampled_experts.obs"
with open(experts_file_path, "rb") as f:
expert_trajectories = pickle.load(f)
# (num_experts)
print(len(expert_trajectories))
# (trajectory_length)
print(len(expert_trajectories[0]["trajectory"]))
# (num_observations, 1)
print(expert_trajectories[0]["trajectory"][0]["state"].shape)
print(expert_trajectories[0]["context"])
# Should have 30 for each context - 2,3,4,5
context_bins = [0]*4
for expert_trajectory in expert_trajectories:
context_decimal = expert_trajectory["context"][0] + 2 * expert_trajectory["context"][1]
context_bins[context_decimal] += 1
print(context_bins)
| 27.625 | 91 | 0.751131 | import pickle
experts_file_path = "../experts/sampled_experts.obs"
with open(experts_file_path, "rb") as f:
expert_trajectories = pickle.load(f)
# (num_experts)
print(len(expert_trajectories))
# (trajectory_length)
print(len(expert_trajectories[0]["trajectory"]))
# (num_observations, 1)
print(expert_trajectories[0]["trajectory"][0]["state"].shape)
print(expert_trajectories[0]["context"])
# Should have 30 for each context - 2,3,4,5
context_bins = [0]*4
for expert_trajectory in expert_trajectories:
context_decimal = expert_trajectory["context"][0] + 2 * expert_trajectory["context"][1]
context_bins[context_decimal] += 1
print(context_bins)
| 0 | 0 | 0 |
51132cf221eac7d2bc80d0b8468825911b58e7ae | 4,646 | py | Python | navio_tasks/commands/cli_pylint.py | matthewdeanmartin/random_names | 5c78a96e62c7924864c7ef802f995eb55d5a520f | [
"MIT"
] | 8 | 2019-05-24T19:31:38.000Z | 2019-05-28T14:13:56.000Z | navio_tasks/commands/cli_pylint.py | matthewdeanmartin/random_names | 5c78a96e62c7924864c7ef802f995eb55d5a520f | [
"MIT"
] | 17 | 2018-07-14T17:04:49.000Z | 2022-03-24T15:59:11.000Z | navio_tasks/commands/cli_pylint.py | matthewdeanmartin/so_pip | 0ff7ab096352ae3dc316606d1acd5582cfaee25a | [
"MIT"
] | 2 | 2021-03-26T18:34:41.000Z | 2022-03-14T16:27:23.000Z | """
Lots of code gripes.
"""
import os
import shlex
import subprocess
import sys
from typing import List
from navio_tasks import settings as settings
from navio_tasks.cli_commands import check_command_exists, config_pythonpath
from navio_tasks.output import say_and_exit
from navio_tasks.pure_reports.cli_pygount import total_loc
from navio_tasks.settings import (
IS_DJANGO,
IS_GITLAB,
PROBLEMS_FOLDER,
PROJECT_NAME,
VENV_SHELL,
)
from navio_tasks.utils import inform
def do_lint(folder_type: str) -> str:
"""
Execute pylint
"""
# pylint: disable=too-many-locals
check_command_exists("pylint")
if folder_type == PROJECT_NAME:
pylintrc = f"{settings.CONFIG_FOLDER}/.pylintrc"
lint_output_file_name = f"{PROBLEMS_FOLDER}/lint.txt"
else:
pylintrc = f"{settings.CONFIG_FOLDER}/.pylintrc_{folder_type}"
lint_output_file_name = f"{PROBLEMS_FOLDER}/lint_{folder_type}.txt"
if os.path.isfile(lint_output_file_name):
os.remove(lint_output_file_name)
if IS_DJANGO:
django_bits = "--load-plugins pylint_django "
else:
django_bits = ""
# pylint: disable=pointless-string-statement
command_text = (
f"{VENV_SHELL} pylint {django_bits} " f"--rcfile={pylintrc} {folder_type} "
)
command_text += " "
"--msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"
"".strip().replace(" ", " ")
inform(command_text)
command = shlex.split(command_text)
with open(lint_output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
return lint_output_file_name
def evaluated_lint_results(
lint_output_file_name: str,
small_code_base_cut_off: int,
maximum_lint: int,
fatals: List[str],
) -> str:
"""Deciding if the lint is bad enough to fail
Also treats certain errors as fatal even if under the maximum cutoff.
"""
with open(lint_output_file_name) as file_handle:
full_text = file_handle.read()
lint_did_indeed_run = "Your code has been rated at" in full_text
with open(lint_output_file_name) as file_handle:
fatal_errors = sum(1 for line in file_handle if ": E" in line or ": F" in line)
for fatal in fatals:
for line in file_handle:
if fatal in file_handle or ": E" in line or ": F" in line:
fatal_errors += 1
if fatal_errors > 0:
with open(lint_output_file_name) as file_handle:
for line in file_handle:
if "*************" in line:
continue
if not line or not line.strip("\n "):
continue
inform(line.strip("\n "))
message = f"Fatal lint errors and possibly others, too : {fatal_errors}"
if IS_GITLAB:
with open(lint_output_file_name) as error_file:
inform(error_file.read())
say_and_exit(message, "lint")
return message
with open(lint_output_file_name) as lint_file_handle:
for line in [
line
for line in lint_file_handle
if not (
"*************" in line
or "---------------------" in line
or "Your code has been rated at" in line
or line == "\n"
)
]:
inform(line)
if total_loc() > small_code_base_cut_off:
cutoff = maximum_lint
else:
cutoff = 0
with open(lint_output_file_name) as lint_file_handle:
num_lines = sum(
1
for line in lint_file_handle
if not (
"*************" in line
or "---------------------" in line
or "Your code has been rated at" in line
or line == "\n"
)
)
if num_lines > cutoff:
say_and_exit(f"Too many lines of lint : {num_lines}, max {cutoff}", "pylint")
sys.exit(-1)
with open(lint_output_file_name) as lint_file_handle:
num_lines_all_output = sum(1 for _ in lint_file_handle)
if (
not lint_did_indeed_run
and num_lines_all_output == 0
and os.path.isfile(lint_output_file_name)
):
# should always have at least 'found 0 errors' in output
# force lint to re-run, because empty file will be missing
os.remove(lint_output_file_name)
say_and_exit(
"No lint messages at all, did pylint fail to run or is it installed?",
"pylint",
)
sys.exit(-1)
return "pylint succeeded"
| 31.605442 | 87 | 0.602238 | """
Lots of code gripes.
"""
import os
import shlex
import subprocess
import sys
from typing import List
from navio_tasks import settings as settings
from navio_tasks.cli_commands import check_command_exists, config_pythonpath
from navio_tasks.output import say_and_exit
from navio_tasks.pure_reports.cli_pygount import total_loc
from navio_tasks.settings import (
IS_DJANGO,
IS_GITLAB,
PROBLEMS_FOLDER,
PROJECT_NAME,
VENV_SHELL,
)
from navio_tasks.utils import inform
def do_lint(folder_type: str) -> str:
"""
Execute pylint
"""
# pylint: disable=too-many-locals
check_command_exists("pylint")
if folder_type == PROJECT_NAME:
pylintrc = f"{settings.CONFIG_FOLDER}/.pylintrc"
lint_output_file_name = f"{PROBLEMS_FOLDER}/lint.txt"
else:
pylintrc = f"{settings.CONFIG_FOLDER}/.pylintrc_{folder_type}"
lint_output_file_name = f"{PROBLEMS_FOLDER}/lint_{folder_type}.txt"
if os.path.isfile(lint_output_file_name):
os.remove(lint_output_file_name)
if IS_DJANGO:
django_bits = "--load-plugins pylint_django "
else:
django_bits = ""
# pylint: disable=pointless-string-statement
command_text = (
f"{VENV_SHELL} pylint {django_bits} " f"--rcfile={pylintrc} {folder_type} "
)
command_text += " "
"--msg-template={path}:{line}: [{msg_id}({symbol}), {obj}] {msg}"
"".strip().replace(" ", " ")
inform(command_text)
command = shlex.split(command_text)
with open(lint_output_file_name, "w") as outfile:
env = config_pythonpath()
subprocess.call(command, stdout=outfile, env=env)
return lint_output_file_name
def evaluated_lint_results(
lint_output_file_name: str,
small_code_base_cut_off: int,
maximum_lint: int,
fatals: List[str],
) -> str:
"""Deciding if the lint is bad enough to fail
Also treats certain errors as fatal even if under the maximum cutoff.
"""
with open(lint_output_file_name) as file_handle:
full_text = file_handle.read()
lint_did_indeed_run = "Your code has been rated at" in full_text
with open(lint_output_file_name) as file_handle:
fatal_errors = sum(1 for line in file_handle if ": E" in line or ": F" in line)
for fatal in fatals:
for line in file_handle:
if fatal in file_handle or ": E" in line or ": F" in line:
fatal_errors += 1
if fatal_errors > 0:
with open(lint_output_file_name) as file_handle:
for line in file_handle:
if "*************" in line:
continue
if not line or not line.strip("\n "):
continue
inform(line.strip("\n "))
message = f"Fatal lint errors and possibly others, too : {fatal_errors}"
if IS_GITLAB:
with open(lint_output_file_name) as error_file:
inform(error_file.read())
say_and_exit(message, "lint")
return message
with open(lint_output_file_name) as lint_file_handle:
for line in [
line
for line in lint_file_handle
if not (
"*************" in line
or "---------------------" in line
or "Your code has been rated at" in line
or line == "\n"
)
]:
inform(line)
if total_loc() > small_code_base_cut_off:
cutoff = maximum_lint
else:
cutoff = 0
with open(lint_output_file_name) as lint_file_handle:
num_lines = sum(
1
for line in lint_file_handle
if not (
"*************" in line
or "---------------------" in line
or "Your code has been rated at" in line
or line == "\n"
)
)
if num_lines > cutoff:
say_and_exit(f"Too many lines of lint : {num_lines}, max {cutoff}", "pylint")
sys.exit(-1)
with open(lint_output_file_name) as lint_file_handle:
num_lines_all_output = sum(1 for _ in lint_file_handle)
if (
not lint_did_indeed_run
and num_lines_all_output == 0
and os.path.isfile(lint_output_file_name)
):
# should always have at least 'found 0 errors' in output
# force lint to re-run, because empty file will be missing
os.remove(lint_output_file_name)
say_and_exit(
"No lint messages at all, did pylint fail to run or is it installed?",
"pylint",
)
sys.exit(-1)
return "pylint succeeded"
| 0 | 0 | 0 |
27aae263b75fff183863589ccb93f7732b2a47d8 | 763 | py | Python | ckuser/config.py | ds19991999/CKUser | c66ebda6ef5068a79b816de2c57a443b25d7096d | [
"MIT"
] | null | null | null | ckuser/config.py | ds19991999/CKUser | c66ebda6ef5068a79b816de2c57a443b25d7096d | [
"MIT"
] | null | null | null | ckuser/config.py | ds19991999/CKUser | c66ebda6ef5068a79b816de2c57a443b25d7096d | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
mysql> desc userinfors;
+----------+----------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+----------+----------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| name | char(40) | YES | | NULL | |
| passwd | char(40) | YES | | NULL | |
| isdelete | bit(1) | YES | | b'0' | |
+----------+----------+------+-----+---------+----------------+
"""
| 30.52 | 63 | 0.349934 | #!/usr/bin/python3
# -*- coding:utf-8 -*-
"""
mysql> desc userinfors;
+----------+----------+------+-----+---------+----------------+
| Field | Type | Null | Key | Default | Extra |
+----------+----------+------+-----+---------+----------------+
| id | int(11) | NO | PRI | NULL | auto_increment |
| name | char(40) | YES | | NULL | |
| passwd | char(40) | YES | | NULL | |
| isdelete | bit(1) | YES | | b'0' | |
+----------+----------+------+-----+---------+----------------+
"""
def config():
config={
'mysql_ip':'localhost',
'mysql_database':'python3',
'mysql_user':'root',
'mysql_passwd':'passwd',
'redis_ip':'localhost'
}
return config
| 152 | 0 | 23 |
4954a99b7dfa453ebb17638013bab490f8777570 | 9,920 | py | Python | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/macInMACNoFcs_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 20 | 2019-05-07T01:59:14.000Z | 2022-02-11T05:24:47.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/macInMACNoFcs_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 60 | 2019-04-03T18:59:35.000Z | 2022-02-22T12:05:05.000Z | ixnetwork_restpy/testplatform/sessions/ixnetwork/traffic/trafficitem/configelement/stack/macInMACNoFcs_template.py | OpenIxia/ixnetwork_restpy | f628db450573a104f327cf3c737ca25586e067ae | [
"MIT"
] | 13 | 2019-05-20T10:48:31.000Z | 2021-10-06T07:45:44.000Z | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
| 36.740741 | 127 | 0.675403 | from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class MacInMACNoFcs(Base):
__slots__ = ()
_SDM_NAME = 'macInMACNoFcs'
_SDM_ATT_MAP = {
'HeaderBdestinationAddressEthernet': 'macInMACNoFcs.header.bdestinationAddressEthernet-1',
'HeaderBsourceAddressEthernet': 'macInMACNoFcs.header.bsourceAddressEthernet-2',
'BethernetTypeEtherTypeBtag': 'macInMACNoFcs.header.bethernetType.etherTypeBtag-3',
'BvlanTagBvlanPriority': 'macInMACNoFcs.header.bethernetType.bvlanTag.bvlanPriority-4',
'BvlanTagBvlanCanonicalFormatIndicator': 'macInMACNoFcs.header.bethernetType.bvlanTag.bvlanCanonicalFormatIndicator-5',
'BvlanTagBvlanID': 'macInMACNoFcs.header.bethernetType.bvlanTag.bvlanID-6',
'ItagEtherTypeEtherTypeItag': 'macInMACNoFcs.header.itagEtherType.etherTypeItag-7',
'ItagPcp': 'macInMACNoFcs.header.itag.pcp-8',
'ItagDei': 'macInMACNoFcs.header.itag.dei-9',
'ItagFmt': 'macInMACNoFcs.header.itag.fmt-10',
'ItagReserved': 'macInMACNoFcs.header.itag.reserved-11',
'ItagIsid': 'macInMACNoFcs.header.itag.isid-12',
'HeaderCdestinationAddressEthernet': 'macInMACNoFcs.header.cdestinationAddressEthernet-13',
'HeaderCsourceAddressEthernet': 'macInMACNoFcs.header.csourceAddressEthernet-14',
'StagEtherTypeStag': 'macInMACNoFcs.header.stag.etherTypeStag-15',
'SvlanTagSvlanPriority': 'macInMACNoFcs.header.stag.svlanTag.svlanPriority-16',
'SvlanTagSvlanCanonicalFormatIndicator': 'macInMACNoFcs.header.stag.svlanTag.svlanCanonicalFormatIndicator-17',
'SvlanTagSvlanID': 'macInMACNoFcs.header.stag.svlanTag.svlanID-18',
'CtagEtherTypeCtag': 'macInMACNoFcs.header.ctag.etherTypeCtag-19',
'CvlanTagCvlanPriority': 'macInMACNoFcs.header.ctag.cvlanTag.cvlanPriority-20',
'CvlanTagCvlanCanonicalFormatIndicator': 'macInMACNoFcs.header.ctag.cvlanTag.cvlanCanonicalFormatIndicator-21',
'CvlanTagCvlanID': 'macInMACNoFcs.header.ctag.cvlanTag.cvlanID-22',
'HeaderType': 'macInMACNoFcs.header.type-23',
}
def __init__(self, parent, list_op=False):
super(MacInMACNoFcs, self).__init__(parent, list_op)
@property
def HeaderBdestinationAddressEthernet(self):
"""
Display Name: B-Destination Address (Ethernet)
Default Value: 0
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderBdestinationAddressEthernet']))
@property
def HeaderBsourceAddressEthernet(self):
"""
Display Name: B-Source Address (Ethernet)
Default Value: 0
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderBsourceAddressEthernet']))
@property
def BethernetTypeEtherTypeBtag(self):
"""
Display Name: EtherType B-tag
Default Value: 0x8100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BethernetTypeEtherTypeBtag']))
@property
def BvlanTagBvlanPriority(self):
"""
Display Name: B-VLAN Priority
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BvlanTagBvlanPriority']))
@property
def BvlanTagBvlanCanonicalFormatIndicator(self):
"""
Display Name: B-VLAN Canonical Format Indicator
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BvlanTagBvlanCanonicalFormatIndicator']))
@property
def BvlanTagBvlanID(self):
"""
Display Name: B-VLAN ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['BvlanTagBvlanID']))
@property
def ItagEtherTypeEtherTypeItag(self):
"""
Display Name: EtherType I-tag
Default Value: 0x8100
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ItagEtherTypeEtherTypeItag']))
@property
def ItagPcp(self):
"""
Display Name: PCP
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ItagPcp']))
@property
def ItagDei(self):
"""
Display Name: DEI
Default Value: 0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ItagDei']))
@property
def ItagFmt(self):
"""
Display Name: FMT
Default Value: 0
Value Format: decimal
Available enum values: Payload Encapsulated Wi Fcs, 0, Payload Encapsulated Wo Fcs, 1, No Encapsulation, 2, Reserved, 3
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ItagFmt']))
@property
def ItagReserved(self):
"""
Display Name: Reserved
Default Value: 0x0
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ItagReserved']))
@property
def ItagIsid(self):
"""
Display Name: I-SID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['ItagIsid']))
@property
def HeaderCdestinationAddressEthernet(self):
"""
Display Name: C-Destination Address (Ethernet)
Default Value: 0
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderCdestinationAddressEthernet']))
@property
def HeaderCsourceAddressEthernet(self):
"""
Display Name: C-Source Address (Ethernet)
Default Value: 0
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderCsourceAddressEthernet']))
@property
def StagEtherTypeStag(self):
"""
Display Name: EtherType S-tag
Default Value: 0x88A8
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['StagEtherTypeStag']))
@property
def SvlanTagSvlanPriority(self):
"""
Display Name: S-VLAN Priority
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SvlanTagSvlanPriority']))
@property
def SvlanTagSvlanCanonicalFormatIndicator(self):
"""
Display Name: S-VLAN Canonical Format Indicator
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SvlanTagSvlanCanonicalFormatIndicator']))
@property
def SvlanTagSvlanID(self):
"""
Display Name: S-VLAN ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['SvlanTagSvlanID']))
@property
def CtagEtherTypeCtag(self):
"""
Display Name: EtherType C-tag
Default Value: 0xFFFF
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CtagEtherTypeCtag']))
@property
def CvlanTagCvlanPriority(self):
"""
Display Name: C-VLAN Priority
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CvlanTagCvlanPriority']))
@property
def CvlanTagCvlanCanonicalFormatIndicator(self):
"""
Display Name: C-VLAN Canonical Format Indicator
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CvlanTagCvlanCanonicalFormatIndicator']))
@property
def CvlanTagCvlanID(self):
"""
Display Name: C-VLAN ID
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['CvlanTagCvlanID']))
@property
def HeaderType(self):
"""
Display Name: Type
Default Value: 0xFFFF
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderType']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
| 150 | 9,666 | 23 |
12e25ddc4af0bf599e86ed3d6dcf5866baa2d81b | 1,436 | py | Python | generate_computational_graph.py | TalkToTheGAN/RelaxTextGAN | 6d0846392c8a1267eaa103dd70492cb80024079e | [
"Apache-2.0"
] | 3 | 2019-05-30T03:40:38.000Z | 2021-04-12T06:50:41.000Z | generate_computational_graph.py | TalkToTheGAN/RelaxTextGAN | 6d0846392c8a1267eaa103dd70492cb80024079e | [
"Apache-2.0"
] | 1 | 2020-06-15T12:27:56.000Z | 2020-06-15T12:27:56.000Z | generate_computational_graph.py | TalkToTheGAN/RelaxTextGAN | 6d0846392c8a1267eaa103dd70492cb80024079e | [
"Apache-2.0"
] | null | null | null | import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.autograd as autograd
from torch.autograd import Variable
from models.plain_lstm import PlainLSTM
from utils import Utils
from data_loader import DataLoader
import argparse
from tqdm import tqdm
import matplotlib.pyplot as plt
from torchviz import make_dot, make_dot_from_trace
parser = argparse.ArgumentParser(description='Training Parameter')
parser.add_argument('--cuda', action='store', default=None, type=int)
opt = parser.parse_args()
print(opt)
# all constants
total_epochs = 20
batch_size = 16
data_path = './data/math_equation_data.txt'
g_seq_length = 15
g_emb_dim = 8
g_hidden_dim = 8
vocab_size = 7 # need to not hard code this. Todo for later.
if __name__ == '__main__':
main() | 27.09434 | 76 | 0.727716 | import os
import random
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torch.autograd as autograd
from torch.autograd import Variable
from models.plain_lstm import PlainLSTM
from utils import Utils
from data_loader import DataLoader
import argparse
from tqdm import tqdm
import matplotlib.pyplot as plt
from torchviz import make_dot, make_dot_from_trace
parser = argparse.ArgumentParser(description='Training Parameter')
parser.add_argument('--cuda', action='store', default=None, type=int)
opt = parser.parse_args()
print(opt)
# all constants
total_epochs = 20
batch_size = 16
data_path = './data/math_equation_data.txt'
g_seq_length = 15
g_emb_dim = 8
g_hidden_dim = 8
vocab_size = 7 # need to not hard code this. Todo for later.
def main():
data_loader = DataLoader(data_path, batch_size)
generator = PlainLSTM(vocab_size, g_emb_dim, g_hidden_dim)
for data, _ in data_loader:
# params = generator.state_dict()
# print(generator.named_parameters())
for x in generator.named_parameters():
print(x)
# print(params)
generator.zero_grad()
output = generator.test_sample(batch_size, g_seq_length, vocab_size)
dot = make_dot(output, params = dict(generator.named_parameters()))
dot.format = 'svg'
dot.render()
break
if __name__ == '__main__':
main() | 564 | 0 | 23 |
e5af07709463973f94d2c5f0008791ffee86f7e8 | 7,928 | py | Python | course_selection/scrape_parse.py | recalapp/recal | 1464d5922f3a1fcc0d756565a3a0e8689e9e154d | [
"MIT"
] | 13 | 2017-01-27T18:07:04.000Z | 2022-02-11T19:32:29.000Z | course_selection/scrape_parse.py | kl29/recal | 1464d5922f3a1fcc0d756565a3a0e8689e9e154d | [
"MIT"
] | 14 | 2017-01-08T22:28:49.000Z | 2020-02-05T03:08:08.000Z | course_selection/scrape_parse.py | kl29/recal | 1464d5922f3a1fcc0d756565a3a0e8689e9e154d | [
"MIT"
] | 5 | 2017-12-07T17:41:35.000Z | 2020-04-27T18:24:52.000Z | """
Scrapes OIT's Web Feeds to add courses and sections to database.
Procedure:
- Get list of departments (3-letter department codes)
- Run this: http://etcweb.princeton.edu/webfeeds/courseofferings/?term=current&subject=COS
- Parse it for courses, sections, and lecture times (as recurring events)
"""
from lxml import etree
import HTMLParser
import urllib2
from bs4 import BeautifulSoup
import re
| 34.469565 | 97 | 0.577699 | """
Scrapes OIT's Web Feeds to add courses and sections to database.
Procedure:
- Get list of departments (3-letter department codes)
- Run this: http://etcweb.princeton.edu/webfeeds/courseofferings/?term=current&subject=COS
- Parse it for courses, sections, and lecture times (as recurring events)
"""
from lxml import etree
import HTMLParser
import urllib2
from bs4 import BeautifulSoup
import re
class ParseError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
def scrape_parse_semester(term_code):
TERM_CODE = term_code
COURSE_OFFERINGS = "http://registrar.princeton.edu/course-offerings/"
FEED_PREFIX = "http://etcweb.princeton.edu/webfeeds/courseofferings/"
# Could also use 'current' instead of str(TERM_CODE), which automatically
# gets the current semester. caveat: cannot get next semester's schedule
# ahead of time
TERM_PREFIX = FEED_PREFIX + "?term=" + str(TERM_CODE)
DEP_PREFIX = TERM_PREFIX + "&subject="
# for now hardwire the namespaces--too annoying
PTON_NAMESPACE = u'http://as.oit.princeton.edu/xml/courseofferings-1_4'
CURRENT_SEMESTER = ['']
h = HTMLParser.HTMLParser()
def get_text(key, object):
return h.unescape(raise_if_none(object.find(key), "key " + key + " does not exist").text)
def get_current_semester():
""" get semester according to TERM_CODE
"""
#global CURRENT_SEMESTER
if not CURRENT_SEMESTER[0]:
parser = etree.XMLParser(ns_clean=True)
termxml = urllib2.urlopen(TERM_PREFIX)
tree = etree.parse(termxml, parser)
remove_namespace(tree, PTON_NAMESPACE)
term = tree.getroot().find('term')
CURRENT_SEMESTER[0] = {
'start_date': get_text('start_date', term),
'end_date': get_text('end_date', term),
'term_code': str(TERM_CODE),
}
return CURRENT_SEMESTER[0]
def get_department_list(seed_page):
""" get list of departments
Parses seed_page and returns a list of the departments' names.
Seed page should be "http://registrar.princeton.edu/course-offerings/"
Automatically gets the courses for the current term.
"""
soup = BeautifulSoup(seed_page)
# Example tag:
# <input name="subject" type="checkbox" value="COS">
dept_tags = soup('input', {"name": "subject"})
departments = map(lambda t: t.attrs['value'], dept_tags)
return departments
def scrape_all():
""" scrape all events from Princeton's course webfeed
"""
#global course_count
#global section_count
seed_page = urllib2.urlopen(COURSE_OFFERINGS)
departments = get_department_list(seed_page)
courses = []
for department in departments:
courses += scrape(department)
return courses
# goes through the listings for this department
def scrape(department):
""" Scrape all events listed under department
"""
parser = etree.XMLParser(ns_clean=True)
link = DEP_PREFIX + department
xmldoc = urllib2.urlopen(link)
tree = etree.parse(xmldoc, parser)
dep_courses = tree.getroot()
remove_namespace(dep_courses, PTON_NAMESPACE)
parsed_courses = []
for term in dep_courses:
for subjects in term:
for subject in subjects:
for courses in subject:
for course in courses:
x = parse_course(course, subject)
if x is not None:
parsed_courses.append(x)
return parsed_courses
def none_to_empty(text):
if text is None:
return ''
else:
return text
def none_to_empty_list(x):
if x is None:
return []
else:
return x
def raise_if_none(text, error_message):
if text is None:
raise ParseError(error_message)
return text
# Parse it for courses, sections, and lecture times (as recurring events)
# If the course with this ID exists in the database, we update the course
# Otherwise, create new course with the information
def parse_course(course, subject):
""" create a course with basic information.
"""
try:
#global new_course_count
#global course_count
return {
"title": get_text('title', course),
"guid": get_text('guid', course),
"description": none_to_empty(course.find('detail').find('description').text),
"semester": get_current_semester(),
"professors": [parse_prof(x) for x in course.find('instructors')],
"course_listings": parse_listings(course, subject),
"sections": [parse_section(x) for x in course.find('classes')]
}
except Exception as inst:
# print inst
raise inst
return None
# may decide to make this function for just one prof/listing/section, then
# do a map
def parse_prof(prof):
return {
"full_name": get_text('full_name', prof)
}
def parse_listings(course, subject):
def parse_cross_listing(cross_listing):
return {
'dept': get_text('subject', cross_listing),
'code': get_text('catalog_number', cross_listing),
'is_primary': False
}
cross_listings = [parse_cross_listing(
x) for x in none_to_empty_list(course.find('crosslistings'))]
primary_listing = {
'dept': get_text('code', subject),
'code': get_text('catalog_number', course),
'is_primary': True
}
return cross_listings + [primary_listing]
def parse_section(section):
def parse_meeting(meeting):
def get_days(meeting):
days = ""
for day in meeting.find('days'):
days += day.text + ' '
return days[:10]
def get_location(meeting):
location = ''
try:
building = meeting.find('building').find('name').text
room = meeting.find('room').text
location = building + " " + room
except Exception as e:
raise e
finally:
return location
# the times are in the format:
# HH:MM AM/PM
return {
'start_time': get_text('start_time', meeting),
'end_time': get_text('end_time', meeting),
'days': get_days(meeting),
'location': get_location(meeting),
}
# NOTE: section.find('schedule') doesn't seem to be used
meetings = None
schedule = section.find('schedule')
if schedule is not None:
meetings = schedule.find('meetings')
return {
'registrar_id': get_text('class_number', section),
'name': get_text('section', section),
'type': get_text('type_name', section)[0:3].upper(),
'capacity': get_text('capacity', section),
'enrollment': get_text('enrollment', section),
'meetings': [parse_meeting(x) for x in none_to_empty_list(meetings)]
}
def remove_namespace(doc, namespace):
"""Hack to remove namespace in the document in place.
"""
ns = u'{%s}' % namespace
nsl = len(ns)
for elem in doc.getiterator():
if elem.tag.startswith(ns):
elem.tag = elem.tag[nsl:]
return scrape_all()
| 7,417 | 7 | 100 |
f2ca13584c91841e3c1379b5d63aa670e6391478 | 757 | py | Python | kotti/views/file.py | IonicaBizauKitchen/Kotti | 8f2363a058c77d5d42adb2d12715e4d0530d5208 | [
"PostgreSQL"
] | 191 | 2015-01-05T23:04:06.000Z | 2022-02-15T21:36:34.000Z | kotti/views/file.py | IonicaBizauKitchen/Kotti | 8f2363a058c77d5d42adb2d12715e4d0530d5208 | [
"PostgreSQL"
] | 198 | 2015-01-01T06:27:33.000Z | 2021-02-23T16:39:28.000Z | kotti/views/file.py | IonicaBizauKitchen/Kotti | 8f2363a058c77d5d42adb2d12715e4d0530d5208 | [
"PostgreSQL"
] | 83 | 2015-01-12T14:29:48.000Z | 2022-02-13T09:41:36.000Z | from pyramid.view import view_config
from kotti.interfaces import IFile
@view_config(
name="view",
context=IFile,
permission="view",
renderer="kotti:templates/view/file.pt",
)
@view_config(name="inline-view", context=IFile, permission="view")
@view_config(name="attachment-view", context=IFile, permission="view")
def includeme(config):
""" Pyramid includeme hook.
:param config: app config
:type config: :class:`pyramid.config.Configurator`
"""
config.scan(__name__)
| 22.264706 | 70 | 0.726552 | from pyramid.view import view_config
from kotti.interfaces import IFile
@view_config(
name="view",
context=IFile,
permission="view",
renderer="kotti:templates/view/file.pt",
)
def view(context, request):
return {}
@view_config(name="inline-view", context=IFile, permission="view")
def inline_view(context, request):
return request.uploaded_file_response(context.data)
@view_config(name="attachment-view", context=IFile, permission="view")
def attachment_view(context, request):
return request.uploaded_file_response(context.data, "attachment")
def includeme(config):
""" Pyramid includeme hook.
:param config: app config
:type config: :class:`pyramid.config.Configurator`
"""
config.scan(__name__)
| 176 | 0 | 66 |
3644fc77790a588e2cb4c844af28ad7dff6408a8 | 2,721 | py | Python | src/trusted/validator_mips/dgen/dgen_input.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | 1 | 2021-12-23T00:36:43.000Z | 2021-12-23T00:36:43.000Z | src/trusted/validator_mips/dgen/dgen_input.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | null | null | null | src/trusted/validator_mips/dgen/dgen_input.py | kapkic/native_client | 51c8bc8c249d55606232ae011bdfc8b4cab3d794 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/python2
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A simple recursive-descent parser for the table file format.
The grammar implemented here is roughly (taking some liberties with whitespace
and comment parsing):
table_file ::= ( BLANK_LINE | table_def ) end_of_file ;
table_def ::= "--" IDENT CITATION NL
table_header
( table_row )+ ;
table_header ::= ( IDENT "(" BITRANGE ")" )+ ;
table_row ::= ( PATTERN )+ ACTION ;
IDENT = /[a-z0-9_]+/
CITATION = "(" /[^)]+/ ")"
BITRANGE = /[0-9]+/ (":" /[0-9]+/)?
PATTERN = /[10x_]+/
ACTION = ( "=" IDENT | "->" IDENT ) ( "(" IDENT ")" )?
NL = a newline
BLANK_LINE = what you might expect it to be
"""
import re
import dgen_core
# These globals track the parser state.
_in = None
_line_no = None
_tables = None
_line = None
_last_row = None
def parse_tables(input):
"""Entry point for the parser. Input should be a file or file-like."""
global _in, _line_no, _tables
_in = input
_line_no = 0
_tables = []
next_line()
while not end_of_file():
blank_line() or table_def() or unexpected()
return _tables
| 21.768 | 80 | 0.603087 | #!/usr/bin/python2
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A simple recursive-descent parser for the table file format.
The grammar implemented here is roughly (taking some liberties with whitespace
and comment parsing):
table_file ::= ( BLANK_LINE | table_def ) end_of_file ;
table_def ::= "--" IDENT CITATION NL
table_header
( table_row )+ ;
table_header ::= ( IDENT "(" BITRANGE ")" )+ ;
table_row ::= ( PATTERN )+ ACTION ;
IDENT = /[a-z0-9_]+/
CITATION = "(" /[^)]+/ ")"
BITRANGE = /[0-9]+/ (":" /[0-9]+/)?
PATTERN = /[10x_]+/
ACTION = ( "=" IDENT | "->" IDENT ) ( "(" IDENT ")" )?
NL = a newline
BLANK_LINE = what you might expect it to be
"""
import re
import dgen_core
# These globals track the parser state.
_in = None
_line_no = None
_tables = None
_line = None
_last_row = None
def parse_tables(input):
"""Entry point for the parser. Input should be a file or file-like."""
global _in, _line_no, _tables
_in = input
_line_no = 0
_tables = []
next_line()
while not end_of_file():
blank_line() or table_def() or unexpected()
return _tables
def blank_line():
if _line:
return False
next_line()
return True
def table_def():
global _last_row
m = re.match(r'^-- ([^ ]+) \(([^)]+)\)', _line)
if not m: return False
table = dgen_core.Table(m.group(1), m.group(2))
next_line()
while blank_line(): pass
table_header(table)
_last_row = None
while not end_of_file() and not blank_line():
table_row(table)
_tables.append(table)
return True
def table_header(table):
for col in _line.split():
m = re.match(r'^([a-z0-9_]+)\(([0-9]+)(:([0-9]+))?\)$', col, re.I)
if not m: raise Exception('Invalid column header: %s' % col)
hi_bit = int(m.group(2))
if m.group(4):
lo_bit = int(m.group(4))
else:
lo_bit = hi_bit
table.add_column(m.group(1), hi_bit, lo_bit)
next_line()
def table_row(table):
global _last_row
row = _line.split()
for i in range(0, len(row)):
if row[i] == '"': row[i] = _last_row[i]
_last_row = row
action = row[-1]
patterns = row[:-1]
table.add_row(patterns, action)
next_line()
def end_of_file():
return _line is None
def next_line():
global _line_no, _line
_line_no += 1
_line = _in.readline()
if _line:
_line = re.sub(r'#.*', '', _line).strip()
else:
_line = None
def unexpected():
raise Exception('Line %d: Unexpected line in input: %s' % (_line_no, _line))
| 1,311 | 0 | 161 |
d830915f8e3e69a3330d6ff63d4fe215fdf4d734 | 1,254 | py | Python | app.py | wieczszy/CVsudoku | 2338d4ee8c75c556966022363d636143abd600c9 | [
"MIT"
] | null | null | null | app.py | wieczszy/CVsudoku | 2338d4ee8c75c556966022363d636143abd600c9 | [
"MIT"
] | 1 | 2022-02-10T00:37:26.000Z | 2022-02-10T00:37:26.000Z | app.py | wieczszy/CVsudoku | 2338d4ee8c75c556966022363d636143abd600c9 | [
"MIT"
] | null | null | null | import cv2
import numpy as np
import logging
from argparse import ArgumentParser
from src.classifier import Classifier
from src.reader import ImageReader
from src.solver import SudokuSolver
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("image", help="Input image")
parser.add_argument("-w", "--weights", help="CNN weights", default="src/models/model.h5")
args = parser.parse_args()
file_path = args.image
model_path = args.weights
reader = ImageReader()
try:
cells = reader.extract_board_cells(file_path)
except AttributeError:
print()
logging.error('\nThe image has not been read correctly - file not found!\n')
exit(0)
try:
classifier = Classifier(model_path)
classifications = classifier.classify_cells(cells)
classifications = [str(c) for c in classifications]
grid = ''.join(classifications)
except OSError:
logging.error('\nThe model weights have not been loaded - file not found!\n')
exit(0)
solver = SudokuSolver()
try:
solver.solve(grid)
except TypeError:
logging.error('The image has not been read correctly - solution not found!\n')
exit(0) | 29.162791 | 93 | 0.668262 | import cv2
import numpy as np
import logging
from argparse import ArgumentParser
from src.classifier import Classifier
from src.reader import ImageReader
from src.solver import SudokuSolver
if __name__ == "__main__":
parser = ArgumentParser()
parser.add_argument("image", help="Input image")
parser.add_argument("-w", "--weights", help="CNN weights", default="src/models/model.h5")
args = parser.parse_args()
file_path = args.image
model_path = args.weights
reader = ImageReader()
try:
cells = reader.extract_board_cells(file_path)
except AttributeError:
print()
logging.error('\nThe image has not been read correctly - file not found!\n')
exit(0)
try:
classifier = Classifier(model_path)
classifications = classifier.classify_cells(cells)
classifications = [str(c) for c in classifications]
grid = ''.join(classifications)
except OSError:
logging.error('\nThe model weights have not been loaded - file not found!\n')
exit(0)
solver = SudokuSolver()
try:
solver.solve(grid)
except TypeError:
logging.error('The image has not been read correctly - solution not found!\n')
exit(0) | 0 | 0 | 0 |
e2abc012a866c8d1954bf677e77b2c3d1ef6d887 | 2,011 | py | Python | addressBook/tests.py | rahman-ishtiyak/Directory | f6f66aaa38e596da231c555cb18c53a0669f98a9 | [
"MIT"
] | null | null | null | addressBook/tests.py | rahman-ishtiyak/Directory | f6f66aaa38e596da231c555cb18c53a0669f98a9 | [
"MIT"
] | null | null | null | addressBook/tests.py | rahman-ishtiyak/Directory | f6f66aaa38e596da231c555cb18c53a0669f98a9 | [
"MIT"
] | null | null | null | from django.test import TestCase
from django.test import Client
# Create your tests here.
from django.conf import settings
from django.test import TestCase
from .models import *
"""
Data is not testing or returning 0 due to authentication. Turn off authentication to get proper result
we can use more test case as CRUD.
Instead of testing every possible scenario i have tried to test on local and client and model.
"""
| 32.967213 | 102 | 0.652909 | from django.test import TestCase
from django.test import Client
# Create your tests here.
from django.conf import settings
from django.test import TestCase
from .models import *
class TestModels(TestCase):
def test_model_str_country(self):
data = Country.objects.count()
self.assertEqual(data, 0)
# success
def test_get_client(self):
data = {
"Name": "Canada",
"Code": " CN"
}
responseCountry = self.client.post("/api/country/", data=data)
data = {
"Name": "Dhaka",
}
responseState = self.client.post("/api/state/", data=data)
self.assertEqual(responseCountry.status_code, 401)
self.assertEqual(responseState.status_code, 401)
# Failed type due to authentication
def test_get_data_state(self):
self.assertEqual(str(State.objects.count()), '0')
# success but return 0 due to authentication
def test_can_filter(self):
Name = Country.objects.filter(Name='Bangladesh')
Code = Country.objects.filter(Code='BD')
self.assertEqual(Name.count(), 0)
self.assertEqual(Code.count(), 0)
# Failed
def test_api_status(self):
Cresponse = self.client.get('/api/country/')
Sresponse = self.client.get('/api/state/')
Aresponse = self.client.get('/api/address/')
# Check that the response is 200 OK.
self.assertEqual(Cresponse.status_code, 200)
self.assertEqual(Sresponse.status_code, 200)
self.assertEqual(Aresponse.status_code, 200)
self.assertEqual(len(Cresponse.context['Name']), 2)
self.assertEqual(len(Sresponse.context['Name']), 2)
self.assertEqual(len(Aresponse.context['Name']), 2)
"""
Data is not testing or returning 0 due to authentication. Turn off authentication to get proper result
we can use more test case as CRUD.
Instead of testing every possible scenario i have tried to test on local and client and model.
"""
| 1,425 | 6 | 157 |
81c6e929131f5087231b8a102174208c5768f655 | 7,687 | py | Python | arviz/plots/backends/__init__.py | zaxtax/arviz | c78deefeeb355d3cee11a93fc148f9198dde8b35 | [
"Apache-2.0"
] | 55 | 2015-07-29T12:06:34.000Z | 2018-03-17T05:42:01.000Z | arviz/plots/backends/__init__.py | zaxtax/arviz | c78deefeeb355d3cee11a93fc148f9198dde8b35 | [
"Apache-2.0"
] | 11 | 2015-08-07T20:56:41.000Z | 2018-03-21T15:24:08.000Z | arviz/plots/backends/__init__.py | zaxtax/arviz | c78deefeeb355d3cee11a93fc148f9198dde8b35 | [
"Apache-2.0"
] | 17 | 2015-07-29T13:55:01.000Z | 2018-03-02T17:16:12.000Z | # pylint: disable=no-member,invalid-name,redefined-outer-name
"""ArviZ plotting backends."""
import re
import numpy as np
from pandas import DataFrame
from ...rcparams import rcParams
__all__ = [
"to_cds",
"output_notebook",
"output_file",
"ColumnDataSource",
"create_layout",
"show_layout",
]
def to_cds(
data,
var_names=None,
groups=None,
dimensions=None,
group_info=True,
var_name_format=None,
index_origin=None,
):
"""Transform data to ColumnDataSource (CDS) compatible with Bokeh.
Uses `_ARVIZ_GROUP_` and `_ARVIZ_CDS_SELECTION_` to separate var_name
from group and dimensions in CDS columns.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_inference_data for details
var_names : str or list of str, optional
Variables to be processed, if None all variables are processed.
groups : str or list of str, optional
Select groups for CDS. Default groups are {"posterior_groups", "prior_groups",
"posterior_groups_warmup"}
- posterior_groups: posterior, posterior_predictive, sample_stats
- prior_groups: prior, prior_predictive, sample_stats_prior
- posterior_groups_warmup: warmup_posterior, warmup_posterior_predictive,
warmup_sample_stats
ignore_groups : str or list of str, optional
Ignore specific groups from CDS.
dimension : str, or list of str, optional
Select dimensions along to slice the data. By default uses ("chain", "draw").
group_info : bool
Add group info for `var_name_format`
var_name_format : str or tuple of tuple of string, optional
Select column name format for non-scalar input.
Predefined options are {"brackets", "underscore", "cds"}
"brackets":
- add_group_info == False: ``theta[0,0]``
- add_group_info == True: ``theta_posterior[0,0]``
"underscore":
- add_group_info == False: ``theta_0_0``
- add_group_info == True: ``theta_posterior_0_0_``
"cds":
- add_group_info == False: ``theta_ARVIZ_CDS_SELECTION_0_0``
- add_group_info == True: ``theta_ARVIZ_GROUP_posterior__ARVIZ_CDS_SELECTION_0_0``
tuple:
Structure:
- tuple: (dim_info, group_info)
- dim_info: (str: `.join` separator,
str: dim_separator_start,
str: dim_separator_end)
- group_info: (str: group separator start, str: group separator end)
Example: ((",", "[", "]"), ("_", ""))
- add_group_info == False: ``theta[0,0]``
- add_group_info == True: ``theta_posterior[0,0]``
index_origin : int, optional
Start parameter indices from `index_origin`. Either 0 or 1.
Returns
-------
bokeh.models.ColumnDataSource object
"""
from ...utils import flatten_inference_data_to_dict
if var_name_format is None:
var_name_format = "cds"
cds_dict = flatten_inference_data_to_dict(
data=data,
var_names=var_names,
groups=groups,
dimensions=dimensions,
group_info=group_info,
index_origin=index_origin,
var_name_format=var_name_format,
)
cds_data = ColumnDataSource(DataFrame.from_dict(cds_dict, orient="columns"))
return cds_data
def output_notebook(*args, **kwargs):
"""Wrap func:`bokeh.plotting.output_notebook`."""
import bokeh.plotting as bkp
return bkp.output_notebook(*args, **kwargs)
def output_file(*args, **kwargs):
"""Wrap :func:`bokeh.plotting.output_file`."""
import bokeh.plotting as bkp
return bkp.output_file(*args, **kwargs)
def ColumnDataSource(*args, **kwargs):
"""Wrap bokeh.models.ColumnDataSource."""
from bokeh.models import ColumnDataSource
return ColumnDataSource(*args, **kwargs)
def create_layout(ax, force_layout=False):
"""Transform bokeh array of figures to layout."""
ax = np.atleast_2d(ax)
subplot_order = rcParams["plot.bokeh.layout.order"]
if force_layout:
from bokeh.layouts import gridplot as layout
ax = ax.tolist()
layout_args = {
"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"],
"toolbar_location": rcParams["plot.bokeh.layout.toolbar_location"],
}
elif any(item in subplot_order for item in ("row", "column")):
# check number of rows
match = re.match(r"(\d*)(row|column)", subplot_order)
n = int(match.group(1)) if match.group(1) is not None else 1
subplot_order = match.group(2)
# set up 1D list of axes
ax = [item for item in ax.ravel().tolist() if item is not None]
layout_args = {"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"]}
if subplot_order == "row" and n == 1:
from bokeh.layouts import row as layout
elif subplot_order == "column" and n == 1:
from bokeh.layouts import column as layout
else:
from bokeh.layouts import layout
if n != 1:
ax = np.array(ax + [None for _ in range(int(np.ceil(len(ax) / n)) - len(ax))])
if subplot_order == "row":
ax = ax.reshape(n, -1)
else:
ax = ax.reshape(-1, n)
ax = ax.tolist()
else:
if subplot_order in ("square", "square_trimmed"):
ax = [item for item in ax.ravel().tolist() if item is not None]
n = int(np.ceil(len(ax) ** 0.5))
ax = ax + [None for _ in range(n**2 - len(ax))]
ax = np.array(ax).reshape(n, n)
ax = ax.tolist()
if (subplot_order == "square_trimmed") and any(
all(item is None for item in row) for row in ax
):
from bokeh.layouts import layout
ax = [row for row in ax if not all(item is None for item in row)]
layout_args = {"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"]}
else:
from bokeh.layouts import gridplot as layout
layout_args = {
"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"],
"toolbar_location": rcParams["plot.bokeh.layout.toolbar_location"],
}
# ignore "fixed" sizing_mode without explicit width and height
if layout_args.get("sizing_mode", "") == "fixed":
layout_args.pop("sizing_mode")
return layout(ax, **layout_args)
def show_layout(ax, show=True, force_layout=False):
"""Create a layout and call bokeh show."""
if show is None:
show = rcParams["plot.bokeh.show"]
if show:
import bokeh.plotting as bkp
layout = create_layout(ax, force_layout=force_layout)
bkp.show(layout)
def _copy_docstring(lib, function):
"""Extract docstring from function."""
import importlib
try:
module = importlib.import_module(lib)
func = getattr(module, function)
doc = func.__doc__
except ImportError:
doc = f"Failed to import function {function} from {lib}"
if not isinstance(doc, str):
doc = ""
return doc
# TODO: try copying substitutions too, or autoreplace them ourselves
output_notebook.__doc__ += "\n\n" + _copy_docstring("bokeh.plotting", "output_notebook")
output_file.__doc__ += "\n\n" + _copy_docstring("bokeh.plotting", "output_file")
ColumnDataSource.__doc__ += "\n\n" + _copy_docstring("bokeh.models", "ColumnDataSource")
| 34.626126 | 98 | 0.619748 | # pylint: disable=no-member,invalid-name,redefined-outer-name
"""ArviZ plotting backends."""
import re
import numpy as np
from pandas import DataFrame
from ...rcparams import rcParams
__all__ = [
"to_cds",
"output_notebook",
"output_file",
"ColumnDataSource",
"create_layout",
"show_layout",
]
def to_cds(
data,
var_names=None,
groups=None,
dimensions=None,
group_info=True,
var_name_format=None,
index_origin=None,
):
"""Transform data to ColumnDataSource (CDS) compatible with Bokeh.
Uses `_ARVIZ_GROUP_` and `_ARVIZ_CDS_SELECTION_` to separate var_name
from group and dimensions in CDS columns.
Parameters
----------
data : obj
Any object that can be converted to an az.InferenceData object
Refer to documentation of az.convert_to_inference_data for details
var_names : str or list of str, optional
Variables to be processed, if None all variables are processed.
groups : str or list of str, optional
Select groups for CDS. Default groups are {"posterior_groups", "prior_groups",
"posterior_groups_warmup"}
- posterior_groups: posterior, posterior_predictive, sample_stats
- prior_groups: prior, prior_predictive, sample_stats_prior
- posterior_groups_warmup: warmup_posterior, warmup_posterior_predictive,
warmup_sample_stats
ignore_groups : str or list of str, optional
Ignore specific groups from CDS.
dimension : str, or list of str, optional
Select dimensions along to slice the data. By default uses ("chain", "draw").
group_info : bool
Add group info for `var_name_format`
var_name_format : str or tuple of tuple of string, optional
Select column name format for non-scalar input.
Predefined options are {"brackets", "underscore", "cds"}
"brackets":
- add_group_info == False: ``theta[0,0]``
- add_group_info == True: ``theta_posterior[0,0]``
"underscore":
- add_group_info == False: ``theta_0_0``
- add_group_info == True: ``theta_posterior_0_0_``
"cds":
- add_group_info == False: ``theta_ARVIZ_CDS_SELECTION_0_0``
- add_group_info == True: ``theta_ARVIZ_GROUP_posterior__ARVIZ_CDS_SELECTION_0_0``
tuple:
Structure:
- tuple: (dim_info, group_info)
- dim_info: (str: `.join` separator,
str: dim_separator_start,
str: dim_separator_end)
- group_info: (str: group separator start, str: group separator end)
Example: ((",", "[", "]"), ("_", ""))
- add_group_info == False: ``theta[0,0]``
- add_group_info == True: ``theta_posterior[0,0]``
index_origin : int, optional
Start parameter indices from `index_origin`. Either 0 or 1.
Returns
-------
bokeh.models.ColumnDataSource object
"""
from ...utils import flatten_inference_data_to_dict
if var_name_format is None:
var_name_format = "cds"
cds_dict = flatten_inference_data_to_dict(
data=data,
var_names=var_names,
groups=groups,
dimensions=dimensions,
group_info=group_info,
index_origin=index_origin,
var_name_format=var_name_format,
)
cds_data = ColumnDataSource(DataFrame.from_dict(cds_dict, orient="columns"))
return cds_data
def output_notebook(*args, **kwargs):
"""Wrap func:`bokeh.plotting.output_notebook`."""
import bokeh.plotting as bkp
return bkp.output_notebook(*args, **kwargs)
def output_file(*args, **kwargs):
"""Wrap :func:`bokeh.plotting.output_file`."""
import bokeh.plotting as bkp
return bkp.output_file(*args, **kwargs)
def ColumnDataSource(*args, **kwargs):
"""Wrap bokeh.models.ColumnDataSource."""
from bokeh.models import ColumnDataSource
return ColumnDataSource(*args, **kwargs)
def create_layout(ax, force_layout=False):
"""Transform bokeh array of figures to layout."""
ax = np.atleast_2d(ax)
subplot_order = rcParams["plot.bokeh.layout.order"]
if force_layout:
from bokeh.layouts import gridplot as layout
ax = ax.tolist()
layout_args = {
"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"],
"toolbar_location": rcParams["plot.bokeh.layout.toolbar_location"],
}
elif any(item in subplot_order for item in ("row", "column")):
# check number of rows
match = re.match(r"(\d*)(row|column)", subplot_order)
n = int(match.group(1)) if match.group(1) is not None else 1
subplot_order = match.group(2)
# set up 1D list of axes
ax = [item for item in ax.ravel().tolist() if item is not None]
layout_args = {"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"]}
if subplot_order == "row" and n == 1:
from bokeh.layouts import row as layout
elif subplot_order == "column" and n == 1:
from bokeh.layouts import column as layout
else:
from bokeh.layouts import layout
if n != 1:
ax = np.array(ax + [None for _ in range(int(np.ceil(len(ax) / n)) - len(ax))])
if subplot_order == "row":
ax = ax.reshape(n, -1)
else:
ax = ax.reshape(-1, n)
ax = ax.tolist()
else:
if subplot_order in ("square", "square_trimmed"):
ax = [item for item in ax.ravel().tolist() if item is not None]
n = int(np.ceil(len(ax) ** 0.5))
ax = ax + [None for _ in range(n**2 - len(ax))]
ax = np.array(ax).reshape(n, n)
ax = ax.tolist()
if (subplot_order == "square_trimmed") and any(
all(item is None for item in row) for row in ax
):
from bokeh.layouts import layout
ax = [row for row in ax if not all(item is None for item in row)]
layout_args = {"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"]}
else:
from bokeh.layouts import gridplot as layout
layout_args = {
"sizing_mode": rcParams["plot.bokeh.layout.sizing_mode"],
"toolbar_location": rcParams["plot.bokeh.layout.toolbar_location"],
}
# ignore "fixed" sizing_mode without explicit width and height
if layout_args.get("sizing_mode", "") == "fixed":
layout_args.pop("sizing_mode")
return layout(ax, **layout_args)
def show_layout(ax, show=True, force_layout=False):
"""Create a layout and call bokeh show."""
if show is None:
show = rcParams["plot.bokeh.show"]
if show:
import bokeh.plotting as bkp
layout = create_layout(ax, force_layout=force_layout)
bkp.show(layout)
def _copy_docstring(lib, function):
"""Extract docstring from function."""
import importlib
try:
module = importlib.import_module(lib)
func = getattr(module, function)
doc = func.__doc__
except ImportError:
doc = f"Failed to import function {function} from {lib}"
if not isinstance(doc, str):
doc = ""
return doc
# TODO: try copying substitutions too, or autoreplace them ourselves
output_notebook.__doc__ += "\n\n" + _copy_docstring("bokeh.plotting", "output_notebook")
output_file.__doc__ += "\n\n" + _copy_docstring("bokeh.plotting", "output_file")
ColumnDataSource.__doc__ += "\n\n" + _copy_docstring("bokeh.models", "ColumnDataSource")
| 0 | 0 | 0 |
4038bdf4d214176cf5c3d3b92c8642280a111fe2 | 513 | py | Python | web/wsgi.py | apsun/VNaaS | d7f8c3c367068bc14778e7182a342ef1b6c0c634 | [
"MIT"
] | 6 | 2016-05-18T16:11:29.000Z | 2021-02-25T00:54:31.000Z | web/wsgi.py | apsun/VNaaS | d7f8c3c367068bc14778e7182a342ef1b6c0c634 | [
"MIT"
] | null | null | null | web/wsgi.py | apsun/VNaaS | d7f8c3c367068bc14778e7182a342ef1b6c0c634 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
import vnaas
PORT = 8080
if __name__ == "__main__":
main()
| 20.52 | 54 | 0.653021 | #!/usr/bin/env python
import sys
from tornado.wsgi import WSGIContainer
from tornado.httpserver import HTTPServer
from tornado.ioloop import IOLoop
import vnaas
PORT = 8080
def main():
if len(sys.argv) == 1:
port = PORT
elif len(sys.argv) == 2:
port = int(sys.argv[1])
else:
print("usage: wsgi.py [port]")
return
http_server = HTTPServer(WSGIContainer(vnaas.app))
http_server.listen(port)
IOLoop.instance().start()
if __name__ == "__main__":
main()
| 276 | 0 | 23 |
3c3122c833af7f81253516b14e983bfae91fa68f | 266 | py | Python | Python-Programing-Basics/First Steps in Coding/First Steps in Coding - Lab/06_concatenate_data/concatanate_date.py | alexanderivanov2/Softuni-Software-Engineering | 8adb96f445f1da17dbb6eded9e9594319154c7e7 | [
"MIT"
] | null | null | null | Python-Programing-Basics/First Steps in Coding/First Steps in Coding - Lab/06_concatenate_data/concatanate_date.py | alexanderivanov2/Softuni-Software-Engineering | 8adb96f445f1da17dbb6eded9e9594319154c7e7 | [
"MIT"
] | null | null | null | Python-Programing-Basics/First Steps in Coding/First Steps in Coding - Lab/06_concatenate_data/concatanate_date.py | alexanderivanov2/Softuni-Software-Engineering | 8adb96f445f1da17dbb6eded9e9594319154c7e7 | [
"MIT"
] | null | null | null | first_name = input()
last_name = input()
age = int(input())
town = input()
concatanate_variables(first_name, last_name, age, town)
| 22.166667 | 77 | 0.703008 | first_name = input()
last_name = input()
age = int(input())
town = input()
def concatanate_variables(fn, ln, ages, place_town):
print(f"You are {fn} {ln}, a {ages}-years old person from {place_town}.")
concatanate_variables(first_name, last_name, age, town)
| 109 | 0 | 23 |
1c8ba93ca1af203359817567224495c00bb7482d | 1,258 | py | Python | or78_helpers.py | philjonas/oregon-trail-1978-python | c47efaa6b2a6e8898f1cd8f43ba7ce47b04c0045 | [
"MIT"
] | 4 | 2020-10-29T01:55:20.000Z | 2021-11-11T19:07:41.000Z | or78_helpers.py | philjonas/oregon-trail-1978-python | c47efaa6b2a6e8898f1cd8f43ba7ce47b04c0045 | [
"MIT"
] | null | null | null | or78_helpers.py | philjonas/oregon-trail-1978-python | c47efaa6b2a6e8898f1cd8f43ba7ce47b04c0045 | [
"MIT"
] | 1 | 2021-01-16T23:45:41.000Z | 2021-01-16T23:45:41.000Z | import random
import time
| 26.765957 | 62 | 0.629571 | import random
import time
def input_yes_no(message):
reply = input(message)
return True if 'y' in reply else False
def input_int(message):
text_2_int = None
while text_2_int == None:
try:
text_2_int = int(input(message))
except:
text_2_int = None
return text_2_int
def shooting(shooting_level):
words = ["bang", "blam", "pow", "wham"]
word = random.choice(words)
t0 = time.time()
typed_word = input("TYPE {}: ".format(word))
t1 = time.time()
B1 = (t1-t0)-(shooting_level-1)
if typed_word != word:
return 9
return max(B1, 0)
def illness(this_vars):
RND = random.random()
if 100*RND < 10+35*(this_vars.choice_of_eating-1):
print("MILD ILLNESS---MEDICINE USED")
this_vars.total_mileage -= 5
this_vars.amount_spent_on_miscellaneous -= 2
elif 100*RND < 100-(40/4**(this_vars.choice_of_eating-1)):
print("BAD ILLNESS---MEDICINE USED")
this_vars.total_mileage -= 5
this_vars.amount_spent_on_miscellaneous -= 5
else:
print("SERIOUS ILLNESS")
print("YOU MUST STOP FOR MEDICAL ATTENTION")
this_vars.amount_spent_on_miscellaneous -= 10
this_vars.has_illness = True
| 1,136 | 0 | 92 |
30b872192e3b8c3c9605c77fdc5fa1954cca85e6 | 1,033 | py | Python | aiida_vasp/io/tests/test_outcar_io.py | chrisjsewell/aiida-vasp | 11b6abf728efdba475c2fa24ede41c4ffe8fac64 | [
"MIT"
] | 1 | 2021-06-13T14:40:31.000Z | 2021-06-13T14:40:31.000Z | aiida_vasp/io/tests/test_outcar_io.py | chrisjsewell/aiida-vasp | 11b6abf728efdba475c2fa24ede41c4ffe8fac64 | [
"MIT"
] | null | null | null | aiida_vasp/io/tests/test_outcar_io.py | chrisjsewell/aiida-vasp | 11b6abf728efdba475c2fa24ede41c4ffe8fac64 | [
"MIT"
] | null | null | null | """Test the OUTCAR io interface"""
# pylint: disable=unused-import,redefined-outer-name,unused-argument,unused-wildcard-import,wildcard-import
import pytest
from aiida_vasp.utils.fixtures import *
from aiida_vasp.utils.fixtures.testdata import data_path
from aiida_vasp.io.outcar import OutcarParser
def test_parse_outcar():
"""Parse a reference OUTCAR file with the OutcarParser and compare the result to a reference value."""
file_name = 'OUTCAR'
path = data_path('outcar', file_name)
parser = OutcarParser(file_path=path)
params = parser.get_quantity('outcar-parameters', {})
result = params['outcar-parameters'].get_dict()
assert result['outcar-volume'] == 65.94
assert result['outcar-efermi'] == 7.2948
assert result['outcar-energies']
assert result['symmetries']['num_space_group_operations'] == 48
assert result['symmetries']['num_point_group_operations'] == 48
assert result['symmetries']['point_symmetry'] == 'O_h'
assert result['symmetries']['space_group'] == 'D_2d'
| 41.32 | 107 | 0.734753 | """Test the OUTCAR io interface"""
# pylint: disable=unused-import,redefined-outer-name,unused-argument,unused-wildcard-import,wildcard-import
import pytest
from aiida_vasp.utils.fixtures import *
from aiida_vasp.utils.fixtures.testdata import data_path
from aiida_vasp.io.outcar import OutcarParser
def test_parse_outcar():
"""Parse a reference OUTCAR file with the OutcarParser and compare the result to a reference value."""
file_name = 'OUTCAR'
path = data_path('outcar', file_name)
parser = OutcarParser(file_path=path)
params = parser.get_quantity('outcar-parameters', {})
result = params['outcar-parameters'].get_dict()
assert result['outcar-volume'] == 65.94
assert result['outcar-efermi'] == 7.2948
assert result['outcar-energies']
assert result['symmetries']['num_space_group_operations'] == 48
assert result['symmetries']['num_point_group_operations'] == 48
assert result['symmetries']['point_symmetry'] == 'O_h'
assert result['symmetries']['space_group'] == 'D_2d'
| 0 | 0 | 0 |
78dfb0de46f72805031b6ac3304e2e0cb7627623 | 7,591 | py | Python | my_models/action_conv.py | Yifanfanfanfan/sifar-pytorch | 888a18414f7187a6fe7dddc2bf5deb9fa2700e63 | [
"Apache-2.0"
] | 20 | 2022-02-19T13:16:07.000Z | 2022-03-24T02:24:00.000Z | my_models/action_conv.py | Yifanfanfanfan/sifar-pytorch | 888a18414f7187a6fe7dddc2bf5deb9fa2700e63 | [
"Apache-2.0"
] | null | null | null | my_models/action_conv.py | Yifanfanfanfan/sifar-pytorch | 888a18414f7187a6fe7dddc2bf5deb9fa2700e63 | [
"Apache-2.0"
] | 5 | 2022-02-07T16:43:11.000Z | 2022-02-21T10:53:15.000Z | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
import torch.nn as nn
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
import logging
from einops import rearrange, reduce, repeat
from timm.models import resnet50, tv_resnet101, tv_resnet152
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
import torchvision.models as models
_logger = logging.getLogger(__name__)
default_cfgs = {
# ResNet
'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'),
'resnet34': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'),
'resnet50': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth',
interpolation='bicubic'),
'resnet101': _cfg(url='', interpolation='bicubic'),
'resnet101d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=1.0, test_input_size=(3, 320, 320)),
'resnet152': _cfg(url='', interpolation='bicubic'),
'resnet200': _cfg(url='', interpolation='bicubic'),
}
class ConvActionModule(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
@register_model
@register_model
@register_model
'''
@register_model
def action_tf_efficientnetv2_m_in21k(pretrained=False, **kwargs):
num_features = 2048
model_kwargs = dict(num_features=num_features, **kwargs)
model = ConvActionModule(backbone=None, **model_kwargs)
backbone = action_tf_efficientnetv2_m_in21k(pretrained=pretrained)
print (backone)
model.backbone = backbone
model.default_cfg = backbone.default_cfga
return model
'''
| 39.536458 | 151 | 0.661178 | # --------------------------------------------------------
# Swin Transformer
# Copyright (c) 2021 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Written by Ze Liu
# --------------------------------------------------------
import torch
import torch.nn as nn
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
from timm.models.registry import register_model
import logging
from einops import rearrange, reduce, repeat
from timm.models import resnet50, tv_resnet101, tv_resnet152
from timm.data import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD
import torchvision.models as models
_logger = logging.getLogger(__name__)
def _cfg(url='', **kwargs):
return {
'url': url,
'num_classes': 1000, 'input_size': (3, 224, 224), 'pool_size': (7, 7),
'crop_pct': 0.875, 'interpolation': 'bilinear',
'mean': IMAGENET_DEFAULT_MEAN, 'std': IMAGENET_DEFAULT_STD,
'first_conv': 'conv1', 'classifier': 'fc',
**kwargs
}
default_cfgs = {
# ResNet
'resnet18': _cfg(url='https://download.pytorch.org/models/resnet18-5c106cde.pth'),
'resnet34': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet34-43635321.pth'),
'resnet50': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet50_ram-a26f946b.pth',
interpolation='bicubic'),
'resnet101': _cfg(url='', interpolation='bicubic'),
'resnet101d': _cfg(
url='https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/resnet101d_ra2-2803ffab.pth',
interpolation='bicubic', first_conv='conv1.0', input_size=(3, 256, 256), pool_size=(8, 8),
crop_pct=1.0, test_input_size=(3, 320, 320)),
'resnet152': _cfg(url='', interpolation='bicubic'),
'resnet200': _cfg(url='', interpolation='bicubic'),
}
class ConvActionModule(nn.Module):
r""" Swin Transformer
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
https://arxiv.org/pdf/2103.14030
Args:
img_size (int | tuple(int)): Input image size. Default 224
patch_size (int | tuple(int)): Patch size. Default: 4
in_chans (int): Number of input image channels. Default: 3
num_classes (int): Number of classes for classification head. Default: 1000
embed_dim (int): Patch embedding dimension. Default: 96
depths (tuple(int)): Depth of each Swin Transformer layer.
num_heads (tuple(int)): Number of attention heads in different layers.
window_size (int): Window size. Default: 7
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. Default: None
drop_rate (float): Dropout rate. Default: 0
attn_drop_rate (float): Attention dropout rate. Default: 0
drop_path_rate (float): Stochastic depth rate. Default: 0.1
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False
patch_norm (bool): If True, add normalization after patch embedding. Default: True
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
"""
def __init__(self, backbone=None, duration=8, img_size=224, in_chans=3, num_classes=1000, num_features=0,
super_img_rows=1, default_cfg=None, **kwargs):
super().__init__()
self.backbone = backbone
self.num_features = int(num_features)
self.duration = duration
self.num_classes = num_classes
self.super_img_rows = super_img_rows
self.default_cfg = default_cfg
self.img_size = img_size
self.frame_padding = self.duration % super_img_rows
if self.frame_padding != 0:
self.frame_padding = self.super_img_rows - self.frame_padding
self.duration += self.frame_padding
# assert (self.duration % super_img_rows) == 0, 'number of fames must be a multiple of the rows of the super image'
self.avgpool = nn.AdaptiveAvgPool2d(1)
self.head = nn.Linear(self.num_features, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
print('image_size:', self.img_size, 'padding frame:', self.frame_padding, 'super_img_size:', (super_img_rows, self.duration // super_img_rows))
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
def pad_frames(self, x):
frame_num = self.duration - self.frame_padding
x = x.view((-1,3*frame_num)+x.size()[2:])
x_padding = torch.zeros((x.shape[0], 3*self.frame_padding) + x.size()[2:]).cuda()
x = torch.cat((x, x_padding), dim=1)
assert x.shape[1] == 3 * self.duration, 'frame number %d not the same as adjusted input size %d' % (x.shape[1], 3 * self.duration)
return x
def create_super_img(self, x):
input_size = x.shape[-2:]
if input_size != self.img_size:
x = nn.functional.interpolate(x, size=self.img_size, mode='bilinear')
x = rearrange(x, 'b (th tw c) h w -> b c (th h) (tw w)', th=self.super_img_rows, c=3)
return x
def forward_features(self, x):
# x = rearrange(x, 'b (t c) h w -> b c h (t w)', t=self.duration)
# in evaluation, it's Bx(num_crops*num_cips*num_frames*3)xHxW
if self.frame_padding > 0:
x = self.pad_frames(x)
else:
x = x.view((-1,3*self.duration)+x.size()[2:])
x = self.create_super_img(x)
x = self.backbone.forward_features(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
return x
def forward(self, x):
x = self.forward_features(x)
x = self.head(x)
return x
@register_model
def action_conv_resnet50(pretrained=False, **kwargs):
num_features = 2048
model = ConvActionModule(backbone=None, num_features=num_features, **kwargs)
backbone = resnet50(pretrained=pretrained)
model.backbone = backbone
model.default_cfg = backbone.default_cfg
return model
@register_model
def action_conv_resnet101(pretrained=False, **kwargs):
num_features = 2048
model = ConvActionModule(backbone=None, num_features=num_features, **kwargs)
backbone = tv_resnet101(pretrained=pretrained)
model.backbone = backbone
model.default_cfg = backbone.default_cfg
return model
@register_model
def action_conv_resnet152(pretrained=False, **kwargs):
num_features = 2048
model = ConvActionModule(backbone=None, num_features=num_features, **kwargs)
backbone = tv_resnet152(pretrained=pretrained)
model.backbone = backbone
model.default_cfg = backbone.default_cfg
return model
'''
@register_model
def action_tf_efficientnetv2_m_in21k(pretrained=False, **kwargs):
num_features = 2048
model_kwargs = dict(num_features=num_features, **kwargs)
model = ConvActionModule(backbone=None, **model_kwargs)
backbone = action_tf_efficientnetv2_m_in21k(pretrained=pretrained)
print (backone)
model.backbone = backbone
model.default_cfg = backbone.default_cfga
return model
'''
| 3,719 | 0 | 251 |
09417823733242741dd9965f5c2ed1a6be80d9de | 1,455 | py | Python | backend/db/fields.py | the-lans/FastAPITemplate | 51c25622944b26fde545fec1824b2e334b020662 | [
"MIT"
] | 2 | 2021-08-15T09:07:21.000Z | 2021-08-15T21:03:37.000Z | backend/db/fields.py | the-lans/YaML | cded6010b800f9a0bf00e50f0a144d9b1fc85959 | [
"MIT"
] | null | null | null | backend/db/fields.py | the-lans/YaML | cded6010b800f9a0bf00e50f0a144d9b1fc85959 | [
"MIT"
] | 1 | 2021-08-15T21:03:40.000Z | 2021-08-15T21:03:40.000Z | from typing import Union, Iterable, Mapping, Any
import peewee
from backend.library.decorators.cache import unified
| 29.1 | 102 | 0.578007 | from typing import Union, Iterable, Mapping, Any
import peewee
from backend.library.decorators.cache import unified
class OptionsField(peewee.IntegerField):
def __init__(
self,
options: Union[Iterable, Mapping[int, Any]] = None,
*args,
first_i=0,
**kwargs,
):
if options is not None: # for migrations
if not isinstance(options, Mapping):
options = {i + first_i: item for i, item in enumerate(options)}
else:
for i in options.keys():
assert isinstance(i, int), f'All keys for {self.__class__.__name__} should be int'
self._options = options
super().__init__(*args, **kwargs)
@property
def options(self):
return self._options.values()
def __contains__(self, item):
return self.options.__contains__(item)
def python_value(self, value):
if value is not None:
if isinstance(value, int):
return self._options[value]
elif isinstance(value, str) and value in self._options.values():
return value
@unified
def db_value(self, value):
if value is None:
return
for i, item in self._options.items():
if value == item:
return i
if value in self._options:
return value
raise ValueError(f'There\'s no such option "{value}"')
| 1,134 | 180 | 23 |
aefa93d7bcd48c97f558b280687e6727b5aa6b07 | 1,276 | py | Python | typing/crawl.py | zhuning1999/PythonCode | 8ca9aed385af64763c79a13f1bf9c5b67b0c8a63 | [
"MIT"
] | 28 | 2019-11-08T03:09:59.000Z | 2022-03-09T06:59:04.000Z | typing/crawl.py | zhuning1999/PythonCode | 8ca9aed385af64763c79a13f1bf9c5b67b0c8a63 | [
"MIT"
] | null | null | null | typing/crawl.py | zhuning1999/PythonCode | 8ca9aed385af64763c79a13f1bf9c5b67b0c8a63 | [
"MIT"
] | 50 | 2019-09-17T15:28:34.000Z | 2022-03-29T09:10:12.000Z | import time
import random
import requests
from lxml import etree
headers = {
"UserAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
if __name__ == "__main__":
crawl_main()
| 23.2 | 135 | 0.552508 | import time
import random
import requests
from lxml import etree
headers = {
"UserAgent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.132 Safari/537.36"
}
def is_chinese(word):
# 判断是不是中文
for ch in word:
if '\u4e00' <= ch <= '\u9fff':
return True
return False
def get_page(url):
# 请求页面
time.sleep(random.randint(1, 4))
res = requests.get(url, headers=headers)
res.encoding = "utf-8"
# 解析网页
et = etree.HTML(res.text)
text_list = et.xpath('//*[@id="article"]/div/p/span/text()')
result = []
for text in text_list:
if is_chinese(text[0]):
pass
else:
if text[1] == ":":
result.append(text.split(":")[1])
else:
result.append(text.split(":")[1])
# print("\n".join(result))
save_text(result)
def save_text(text):
# 保存结果
with open("text.txt", "w", encoding="utf-8") as f:
f.write("\n".join(text))
def crawl_main():
# 爬取主函数
start_url = "https://baijiahao.baidu.com/s?id=1608464841941419175&wfr=spider&for=pc"
get_page(start_url)
if __name__ == "__main__":
crawl_main()
| 945 | 0 | 100 |
0ee16d8c95d934e2d3c6b60aa7145bb28cff6d95 | 1,429 | py | Python | endtoend_tests/grep.py | pchaigno/grr | 69c81624c281216a45c4bb88a9d4e4b0613a3556 | [
"Apache-2.0"
] | 1 | 2015-01-07T05:29:57.000Z | 2015-01-07T05:29:57.000Z | endtoend_tests/grep.py | pchaigno/grr | 69c81624c281216a45c4bb88a9d4e4b0613a3556 | [
"Apache-2.0"
] | null | null | null | endtoend_tests/grep.py | pchaigno/grr | 69c81624c281216a45c4bb88a9d4e4b0613a3556 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""End to end tests for lib.flows.general.grep."""
from grr.endtoend_tests import base
from grr.lib import aff4
from grr.lib.rdfvalues import client as rdf_client
class TestSearchFiles(base.AutomatedTest):
"""Test SearchFileContent."""
platforms = ["Linux"]
flow = "SearchFileContent"
test_output_path = "analysis/SearchFiles/testing"
args = {"output": test_output_path,
"paths": ["/bin/ls*"],
"also_download": True}
class TestSearchFilesGrep(base.AutomatedTest):
"""Test SearchFileContent with grep."""
platforms = ["Linux"]
flow = "SearchFileContent"
test_output_path = "analysis/SearchFilesGrep/testing"
args = {"output": test_output_path,
"paths": ["/bin/ls*"],
"grep": rdf_client.BareGrepSpec(literal="ELF"),
"also_download": True}
| 32.477273 | 74 | 0.6662 | #!/usr/bin/env python
"""End to end tests for lib.flows.general.grep."""
from grr.endtoend_tests import base
from grr.lib import aff4
from grr.lib.rdfvalues import client as rdf_client
class TestSearchFiles(base.AutomatedTest):
"""Test SearchFileContent."""
platforms = ["Linux"]
flow = "SearchFileContent"
test_output_path = "analysis/SearchFiles/testing"
args = {"output": test_output_path,
"paths": ["/bin/ls*"],
"also_download": True}
def CheckFlow(self):
results = aff4.FACTORY.Open(self.client_id.Add(self.test_output_path),
token=self.token)
self.assertGreater(len(results), 1)
for result in results:
self.assertTrue(result.pathspec.path.startswith("/bin/ls"))
class TestSearchFilesGrep(base.AutomatedTest):
"""Test SearchFileContent with grep."""
platforms = ["Linux"]
flow = "SearchFileContent"
test_output_path = "analysis/SearchFilesGrep/testing"
args = {"output": test_output_path,
"paths": ["/bin/ls*"],
"grep": rdf_client.BareGrepSpec(literal="ELF"),
"also_download": True}
def CheckFlow(self):
results = aff4.FACTORY.Open(self.client_id.Add(self.test_output_path),
token=self.token)
self.assertGreater(len(results), 1)
for result in results:
self.assertTrue("ELF" in result.data)
self.assertTrue("ls" in result.pathspec.path)
| 544 | 0 | 50 |
6f7d6c235f928d355357114c1a117ab955e5f493 | 298 | py | Python | core/apps/attendance/admin.py | yavuzbektas/StudentAnalayze | 794f433f650633f646bee5cbe08d04afdbd75e84 | [
"MIT"
] | null | null | null | core/apps/attendance/admin.py | yavuzbektas/StudentAnalayze | 794f433f650633f646bee5cbe08d04afdbd75e84 | [
"MIT"
] | null | null | null | core/apps/attendance/admin.py | yavuzbektas/StudentAnalayze | 794f433f650633f646bee5cbe08d04afdbd75e84 | [
"MIT"
] | null | null | null | from django.contrib import admin
# from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
# from django.contrib.auth.models import User
# Register your models here.
from .models import LessonPeriods,DailyAttendance
admin.site.register(LessonPeriods)
admin.site.register(DailyAttendance)
| 33.111111 | 66 | 0.83557 | from django.contrib import admin
# from django.contrib.auth.admin import UserAdmin as BaseUserAdmin
# from django.contrib.auth.models import User
# Register your models here.
from .models import LessonPeriods,DailyAttendance
admin.site.register(LessonPeriods)
admin.site.register(DailyAttendance)
| 0 | 0 | 0 |
6e31c41793bea851900f354d8d475f0c33a394ce | 7,904 | py | Python | mobtexting_sms/models/send_sms.py | mobtexting/mobtexting-odoo | 01b1a6f87fc41b8c9f4099476d00023ff84b8372 | [
"MIT"
] | null | null | null | mobtexting_sms/models/send_sms.py | mobtexting/mobtexting-odoo | 01b1a6f87fc41b8c9f4099476d00023ff84b8372 | [
"MIT"
] | null | null | null | mobtexting_sms/models/send_sms.py | mobtexting/mobtexting-odoo | 01b1a6f87fc41b8c9f4099476d00023ff84b8372 | [
"MIT"
] | null | null | null | import logging
from odoo import http
from odoo.exceptions import except_orm, UserError, Warning
from odoo.http import request, serialize_exception as _serialize_exception, content_disposition
from odoo import api, fields, models, tools, _
import datetime
from urllib.parse import urlencode, quote as quote
import urllib.parse
import requests
import re
import json
from functools import reduce
_logger = logging.getLogger(__name__)
try:
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
'datetime': datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
| 42.042553 | 207 | 0.573001 | import logging
from odoo import http
from odoo.exceptions import except_orm, UserError, Warning
from odoo.http import request, serialize_exception as _serialize_exception, content_disposition
from odoo import api, fields, models, tools, _
import datetime
from urllib.parse import urlencode, quote as quote
import urllib.parse
import requests
import re
import json
from functools import reduce
_logger = logging.getLogger(__name__)
try:
from jinja2.sandbox import SandboxedEnvironment
mako_template_env = SandboxedEnvironment(
block_start_string="<%",
block_end_string="%>",
variable_start_string="${",
variable_end_string="}",
comment_start_string="<%doc>",
comment_end_string="</%doc>",
line_statement_prefix="%",
line_comment_prefix="##",
trim_blocks=True, # do not output newline after blocks
autoescape=True, # XML/HTML automatic escaping
)
mako_template_env.globals.update({
'str': str,
'quote': quote,
'urlencode': urlencode,
'datetime': datetime,
'len': len,
'abs': abs,
'min': min,
'max': max,
'sum': sum,
'filter': filter,
'reduce': reduce,
'map': map,
'round': round,
'relativedelta': lambda *a, **kw : relativedelta.relativedelta(*a, **kw),
})
except ImportError:
_logger.warning("jinja2 not available, templating features will not work!")
class SendSMS(models.Model):
_name = "mob_send_sms"
_description = "Send SMS"
name = fields.Char(required=True, string='Template Name')
gateway_id = fields.Many2one('mob_gateway_setup',required=True,string='SMS Gateway')
model_id = fields.Many2one('ir.model', string='Applies to', help="The kind of document with with this template can be used")
sms_to = fields.Char(string='To (Phone)', help="To mobile number (placeholders may be used here)")
sms_html = fields.Text('Body')
ref_ir_act_window = fields.Many2one('ir.actions.act_window', 'Sidebar action', readonly=True, copy=False,help="Sidebar action to make this template available on records " "of the related document model")
def send_sms(self, template_id, record_id):
sms_rendered_content = self.env['mob_send_sms'].render_template(template_id.sms_html, template_id.model_id.model, record_id)
rendered_sms_to = self.env['mob_send_sms'].render_template(template_id.sms_to, template_id.model_id.model, record_id)
self.send_sms_link(sms_rendered_content,rendered_sms_to,record_id,template_id.model_id.model,template_id.gateway_id)
def send_sms_link(self,sms_rendered_content,rendered_sms_to,record_id,model,gateway_url_id):
sms_rendered_content = sms_rendered_content
sms_rendered_content_msg = urllib.parse.quote_plus(sms_rendered_content)
if rendered_sms_to:
rendered_sms_to = re.sub(r'\s+', '', rendered_sms_to)
if '+' in rendered_sms_to:
rendered_sms_to = rendered_sms_to.replace('+', '')
if '-' in rendered_sms_to:
rendered_sms_to = rendered_sms_to.replace('-', '')
if '(' in rendered_sms_to:
rendered_sms_to = rendered_sms_to.replace('(', '')
if ')' in rendered_sms_to:
rendered_sms_to = rendered_sms_to.replace(')', '')
if rendered_sms_to:
send_url = gateway_url_id.gateway_url
para = {"access_token": gateway_url_id.accesstoken,
"service": gateway_url_id.service,
"sender": gateway_url_id.sender,
"message": sms_rendered_content_msg,
"to": rendered_sms_to}
activecheck = gateway_url_id.active
if activecheck == 1:
try:
response = requests.post(url=send_url, params=para).text
resultRes = ""
checkval = ""
if "https://portal.mobtexting.com/login" in response:
response = "Access Token Invalid"
resultRes = response
else:
response = json.loads(response)
checkval = response['status']
if checkval == "ERROR":
response = response['message']
resultRes = response
if checkval == 200:
response = "Message Send"
resultRes = response
result = self.env['mob_sms_track'].sms_track_create(sms_rendered_content, rendered_sms_to,
resultRes,
model, gateway_url_id.id)
if model != 'mob_gateway_setup':
self.env['mail.message'].create({
'author_id': http.request.env.user.partner_id.id,
'date': datetime.datetime.today().strftime('%Y-%m-%d %H:%M:%S'),
'model': model,
'res_id': record_id,
'message_type': 'email',
'body': '<b>SMS: </b>' + sms_rendered_content,
})
return response
except Exception as e:
return e
else:
return "Enable Active"
def render_template(self, template, model, res_id):
"""Render the given template text, replace mako expressions ``${expr}``
with the result of evaluating these expressions with
an evaluation context containing:
* ``user``: browse_record of the current user
* ``object``: browse_record of the document record this sms is
related to
* ``context``: the context passed to the sms composition wizard
:param str template: the template text to render
:param str model: model name of the document record this sms is related to.
:param int res_id: id of document records those sms are related to.
"""
template = mako_template_env.from_string(tools.ustr(template))
user = self.env.user
record = self.env[model].browse(res_id)
variables = {
'user': user
}
variables['object'] = record
try:
render_result = template.render(variables)
except Exception:
_logger.error("Failed to render template %r using values %r" % (template, variables))
render_result = u""
if render_result == u"False":
render_result = u""
return render_result
def create_action(self):
action_obj = self.env['ir.actions.act_window']
data_obj = self.env['ir.model.data']
view = self.env.ref('mobtexting_sms.sms_compose_wizard_form')
src_obj = self.model_id.model
button_name = _('SMS Send (%s)') % self.name
action = action_obj.create({
'name': button_name,
'type': 'ir.actions.act_window',
'res_model': 'mob_sms.compose',
'context': "{'default_template_id' : %d, 'default_use_template': True}" % (self.id),
'view_mode': 'tree,form',
'view_id': view.id,
'target': 'new',
'binding_model_id': self.model_id.id,
})
self.write({
'ref_ir_act_window': action.id,
})
return True
def unlink_action(self):
for template in self:
if template.ref_ir_act_window:
template.ref_ir_act_window.sudo().unlink()
return True
| 4,270 | 2,114 | 22 |
5fe7c27af5807b5b054a2d1aaee39cfae6b4adb3 | 606 | py | Python | OpenSees/SRC/interpreter/test.py | kuanshi/ductile-fracture | ccb350564df54f5c5ec3a079100effe261b46650 | [
"MIT"
] | 8 | 2019-03-05T16:25:10.000Z | 2020-04-17T14:12:03.000Z | examples/test.py | ElsevierSoftwareX/SOFTX-D-17-00072 | b7aa03d0243dce5fa3c79e53a753ceb871926908 | [
"BSD-2-Clause-FreeBSD"
] | null | null | null | examples/test.py | ElsevierSoftwareX/SOFTX-D-17-00072 | b7aa03d0243dce5fa3c79e53a753ceb871926908 | [
"BSD-2-Clause-FreeBSD"
] | 5 | 2018-02-26T09:38:24.000Z | 2020-11-26T15:38:56.000Z | import opensees as ops
ops.wipe()
ops.uniaxialMaterial("Elastic", 1, 1000.);
ops.testUniaxialMaterial(1);
for strain in [0.01, 0.02, 0.03, 0.04, 0.05]:
ops.setStrain(strain);
print("strain: ", str(ops.getStrain()), " stress: ", str(ops.getStress()), " tangent: ", str(ops.getTangent()));
ops.uniaxialMaterial("Elastic", 2, 1000.);
ops.uniaxialMaterial("Parallel", 3, 1, 2);
ops.testUniaxialMaterial(3);
for strain in [0.01, 0.02, 0.03, 0.04, 0.05]:
ops.setStrain(strain);
print("strain: ", str(ops.getStrain()), " stress: ", str(ops.getStress()), " tangent: ", str(ops.getTangent()));
| 31.894737 | 116 | 0.650165 | import opensees as ops
ops.wipe()
ops.uniaxialMaterial("Elastic", 1, 1000.);
ops.testUniaxialMaterial(1);
for strain in [0.01, 0.02, 0.03, 0.04, 0.05]:
ops.setStrain(strain);
print("strain: ", str(ops.getStrain()), " stress: ", str(ops.getStress()), " tangent: ", str(ops.getTangent()));
ops.uniaxialMaterial("Elastic", 2, 1000.);
ops.uniaxialMaterial("Parallel", 3, 1, 2);
ops.testUniaxialMaterial(3);
for strain in [0.01, 0.02, 0.03, 0.04, 0.05]:
ops.setStrain(strain);
print("strain: ", str(ops.getStrain()), " stress: ", str(ops.getStress()), " tangent: ", str(ops.getTangent()));
| 0 | 0 | 0 |
c7e6af834f54b88f53d67454458b733d57a12bf3 | 9,923 | py | Python | know/scrap/pieces.py | otosense/know | 1e0688dedb48335644464c6be35ca91afdb5b2e3 | [
"Apache-2.0"
] | null | null | null | know/scrap/pieces.py | otosense/know | 1e0688dedb48335644464c6be35ca91afdb5b2e3 | [
"Apache-2.0"
] | 1 | 2021-11-24T20:46:33.000Z | 2021-11-24T20:46:33.000Z | know/scrap/pieces.py | otosense/know | 1e0688dedb48335644464c6be35ca91afdb5b2e3 | [
"Apache-2.0"
] | null | null | null | """pieces of thing for inspiration
"""
import itertools
from dataclasses import dataclass
from itertools import takewhile as itertools_takewhile
from atypes import Slab
from creek import Creek
from creek.util import to_iterator
from i2 import MultiObj, FuncFanout, ContextFanout, Pipe
from typing import Callable, Iterable, Iterator, Any, Mapping, Dict
from i2 import Pipe
from know.util import (
Name,
SlabService,
iterate,
FiltFunc,
iterate_dict_values,
always_false,
always_true,
StreamId,
Stream,
SlabCallback,
)
from taped import chunk_indices
always: FiltFunc
Hunker: HunkerType
# TODO: Make smart default for stop_condition. If finite iterable, use any_value_is_none?
no_more_data = type('no_more_data', (), {})
# class DictZip:
# def __init__(self, *unnamed, takewhile=None, **named):
# self.multi_iterator = MultiIterator(*unnamed, **named)
# self.objects = self.multi_iterator.objects
# self.takewhile = takewhile
#
# def __iter__(self):
# while True:
# x = next(self.multi_iterator)
# if not self.takewhile(x):
# break
# yield x
#
# class MultiIterable:
# def __init__(self, *unnamed, **named):
# self.multi_iterator = MultiIterator(*unnamed, **named)
# self.objects = self.multi_iterator.objects
#
# def __iter__(self):
# while True:
# yield next(self.multi_iterator)
#
# def takewhile(self, predicate=None):
# """itertools.takewhile applied to self, with a bit of syntactic sugar
# There's nothing to stop the iteration"""
# if predicate is None:
# predicate = lambda x: True # always true
# return itertools_takewhile(predicate, self)
class _MultiIterator(MultiObj):
"""Helper class for DictZip"""
StopCondition = Callable[[Any], bool]
# TODO: Make smart default for stop_condition. If finite iterable, use any_value_is_none?
# TODO: Default consumer(s) (e.g. data-safe prints?)
# TODO: Default slabs? (iterate through
@dataclass
@dataclass
apply = Pipe(map, tuple)
class MultiIterable:
"""Join several iterables together.
>>> from know.util import any_value_is_none
>>> from functools import partial
>>>
>>> any_value_is_none = lambda d: any(d[k] is None for k in d)
>>> mk_multi_iterable = partial(MultiIterable, stop_condition=any_value_is_none)
>>> mi = mk_multi_iterable(lets='abc', nums=[1, 2, 3, 4])
>>> list(mi)
[{'lets': 'a', 'nums': 1}, {'lets': 'b', 'nums': 2}, {'lets': 'c', 'nums': 3}]
>>> mi = MultiIterable(
... x=[5, 4, 3, 2, 1], y=[1, 2, 3, 4, 5],
... stop_condition=lambda d: d['x'] == d['y']
... )
>>> list(mi)
[{'x': 5, 'y': 1}, {'x': 4, 'y': 2}]
"""
def takewhile(self, predicate=None):
"""itertools.takewhile applied to self, with a bit of syntactic sugar
There's nothing to stop the iteration"""
if predicate is None:
predicate = lambda x: True # always true
return itertools.takewhile(predicate, self)
@dataclass
# TODO: Weird subclassing. Not the Creek init. Consider factory or delegation
| 31.009375 | 89 | 0.620377 | """pieces of thing for inspiration
"""
import itertools
from dataclasses import dataclass
from itertools import takewhile as itertools_takewhile
from atypes import Slab
from creek import Creek
from creek.util import to_iterator
from i2 import MultiObj, FuncFanout, ContextFanout, Pipe
from typing import Callable, Iterable, Iterator, Any, Mapping, Dict
from i2 import Pipe
from know.util import (
Name,
SlabService,
iterate,
FiltFunc,
iterate_dict_values,
always_false,
always_true,
StreamId,
Stream,
SlabCallback,
)
from taped import chunk_indices
always: FiltFunc
Hunker: HunkerType
class MultiIterator(MultiObj):
def _gen_next(self):
for name, iterator in self.objects.items():
yield name, next(iterator, None)
def __next__(self) -> dict:
return dict(self._gen_next())
# TODO: Make smart default for stop_condition. If finite iterable, use any_value_is_none?
no_more_data = type('no_more_data', (), {})
# class DictZip:
# def __init__(self, *unnamed, takewhile=None, **named):
# self.multi_iterator = MultiIterator(*unnamed, **named)
# self.objects = self.multi_iterator.objects
# self.takewhile = takewhile
#
# def __iter__(self):
# while True:
# x = next(self.multi_iterator)
# if not self.takewhile(x):
# break
# yield x
#
# class MultiIterable:
# def __init__(self, *unnamed, **named):
# self.multi_iterator = MultiIterator(*unnamed, **named)
# self.objects = self.multi_iterator.objects
#
# def __iter__(self):
# while True:
# yield next(self.multi_iterator)
#
# def takewhile(self, predicate=None):
# """itertools.takewhile applied to self, with a bit of syntactic sugar
# There's nothing to stop the iteration"""
# if predicate is None:
# predicate = lambda x: True # always true
# return itertools_takewhile(predicate, self)
def test_multi_iterator():
# get_multi_iterable = lambda: MultiIterable(
# audio=iter([1, 2, 3]), keyboard=iter([4, 5, 6])
# )
def is_none(x):
return x is None
def is_not_none(x):
return x is not None
# Note: Equivalent to any_non_none_value = Pipe(methodcaller('values'), iterize(
# is_not_none), any)
def any_non_none_value(d: dict):
"""True if and only if d has any non-None values
>>> assert not any_non_none_value({'a': None, 'b': None})
>>> assert any_non_none_value({'a': None, 'b': 3})
"""
return any(map(is_not_none, d.values()))
# Note: Does not work (never stops)
# get_multi_iterable = lambda: MultiIterable(
# audio=iter([1, 2, 3]),
# keyboard=iter([4, 5, 6])
# )
get_multi_iterable = lambda: DictZip(
audio=iter([1, 2, 3]), keyboard=iter([4, 5, 6]), takewhile=any_non_none_value,
)
m = get_multi_iterable()
assert list(m.objects) == ['audio', 'keyboard']
from functools import partial
def if_then_else(x, then_func, else_func, if_func):
if if_func(x):
return then_func(x)
else:
return else_func(x)
call_if_not_none = partial(
if_then_else, if_func=lambda x: x is not None, else_func=lambda x: None
)
#
predicate = partial(call_if_not_none, then_func=lambda x: sum(x.values()) < 7)
def predicate(x):
if x is not None:
return any(v is not None for v in x.values())
else:
return False
m = get_multi_iterable()
assert list(m) == [
{'audio': 1, 'keyboard': 4},
{'audio': 2, 'keyboard': 5},
{'audio': 3, 'keyboard': 6},
]
class _MultiIterator(MultiObj):
"""Helper class for DictZip"""
def __init__(self, *unnamed, **named):
super().__init__(*unnamed, **named)
self.objects = {k: to_iterator(v) for k, v in self.objects.items()}
def _gen_next(self):
for name, iterator in self.objects.items():
yield name, next(iterator, None)
def __next__(self) -> dict:
return dict(self._gen_next())
StopCondition = Callable[[Any], bool]
# TODO: Make smart default for stop_condition. If finite iterable, use any_value_is_none?
# TODO: Default consumer(s) (e.g. data-safe prints?)
# TODO: Default slabs? (iterate through
@dataclass
class SlabsPushTuple:
slabs: Iterable[Slab]
services: Mapping[Name, SlabService]
def __post_init__(self):
if isinstance(self.services, FuncFanout):
self.multi_service = self.services
else:
# TODO: Add capability (in FuncFanout) to get a mix of (un)named consumers
self.multi_service = FuncFanout(**self.services)
self.slabs_and_services_context = ContextFanout(
slabs=self.slabs, **self.multi_service
)
def __iter__(self):
with self.slabs_and_services_context: # enter all contained contexts
# get an iterable slabs object
if isinstance(self.slabs, ContextFanout):
its = tuple(getattr(self.slabs, s) for s in self.slabs)
slabs = iterate(its)
# slabs = iterate_dict_values(self.slabs)
else:
slabs = self.slabs
# Now iterate...
for slab in slabs:
yield self.multi_service(slab) # ... calling the services on each slab
def __call__(
self, callback: Callable = None, sentinel_func: FiltFunc = None,
):
for multi_service_output in self:
if callback:
callback_output = callback(multi_service_output)
if sentinel_func and sentinel_func(callback_output):
break
@dataclass
class SlabsPush:
slabs: Iterable[Slab]
services: Mapping[Name, SlabService]
def __post_init__(self):
if isinstance(self.services, FuncFanout):
self.multi_service = self.services
else:
# TODO: Add capability (in FuncFanout) to get a mix of (un)named consumers
self.multi_service = FuncFanout(**self.services)
# Put slabs and multi_services in a ContextFanout so that
# anything that needs to be contextualized, will.
self.slabs_and_services_context = ContextFanout(
slabs=self.slabs, **self.multi_service
)
def __iter__(self):
with self.slabs_and_services_context: # enter all contained contexts
# get an iterable slabs object
# TODO: not sure this ContextFanout is the right check
if isinstance(self.slabs, ContextFanout):
slabs = iterate_dict_values(self.slabs)
else:
slabs = self.slabs
# Now iterate...
for slab in slabs:
yield self.multi_service(slab) # ... calling the services on each slab
def __call__(
self, callback: Callable = None, sentinel_func: FiltFunc = None,
):
for multi_service_output in self:
if callback:
callback_output = callback(multi_service_output)
if sentinel_func and sentinel_func(callback_output):
break
apply = Pipe(map, tuple)
class MultiIterable:
"""Join several iterables together.
>>> from know.util import any_value_is_none
>>> from functools import partial
>>>
>>> any_value_is_none = lambda d: any(d[k] is None for k in d)
>>> mk_multi_iterable = partial(MultiIterable, stop_condition=any_value_is_none)
>>> mi = mk_multi_iterable(lets='abc', nums=[1, 2, 3, 4])
>>> list(mi)
[{'lets': 'a', 'nums': 1}, {'lets': 'b', 'nums': 2}, {'lets': 'c', 'nums': 3}]
>>> mi = MultiIterable(
... x=[5, 4, 3, 2, 1], y=[1, 2, 3, 4, 5],
... stop_condition=lambda d: d['x'] == d['y']
... )
>>> list(mi)
[{'x': 5, 'y': 1}, {'x': 4, 'y': 2}]
"""
def __init__(self, *unnamed, stop_condition: StopCondition = always_false, **named):
self.multi_iterator = _MultiIterator(*unnamed, **named)
self.iterators = self.multi_iterator.objects
self.stop_condition = stop_condition
def __iter__(self):
while not self.stop_condition(items := next(self.multi_iterator)):
yield items
def takewhile(self, predicate=None):
"""itertools.takewhile applied to self, with a bit of syntactic sugar
There's nothing to stop the iteration"""
if predicate is None:
predicate = lambda x: True # always true
return itertools.takewhile(predicate, self)
class DictZip:
def __init__(self, *unnamed, takewhile=None, **named):
self.multi_iterator = _MultiIterator(*unnamed, **named)
self.objects = self.multi_iterator.objects
if takewhile is None:
takewhile = always_true
self.takewhile = takewhile
def __iter__(self):
while self.takewhile(d := next(self.multi_iterator)):
yield d
@dataclass
class LiveProcess:
streams: Dict[StreamId, Stream]
slab_callback: SlabCallback = print
walk: Callable = DictZip
def __call__(self):
with ContextFanout(self.streams, self.slab_callback):
slabs = self.walk(self.streams)
for slab in slabs:
callback_output = self.slab_callback(slab)
return callback_output
# TODO: Weird subclassing. Not the Creek init. Consider factory or delegation
class FixedStepHunker(Creek):
def __init__(self, src, chk_size, chk_step=None, start_idx=0, end_idx=None):
intervals = chunk_indices(
chk_size=chk_size, chk_step=chk_step, start_idx=start_idx, end_idx=end_idx
)
super().__init__(stream=intervals)
self.src = src
def data_to_obj(self, data):
return self.src[slice(*data)]
| 5,829 | 430 | 452 |
2ef422cadf4f6a9f51b16cd6259af716310db401 | 722 | py | Python | core/migrations/0015_auto_20210116_1202.py | Specialistik/my_internet | fedbc0266b2646f0ec19ea89ead7d0342ff79d81 | [
"MIT"
] | null | null | null | core/migrations/0015_auto_20210116_1202.py | Specialistik/my_internet | fedbc0266b2646f0ec19ea89ead7d0342ff79d81 | [
"MIT"
] | null | null | null | core/migrations/0015_auto_20210116_1202.py | Specialistik/my_internet | fedbc0266b2646f0ec19ea89ead7d0342ff79d81 | [
"MIT"
] | null | null | null | # Generated by Django 3.1.4 on 2021-01-16 09:02
import datetime
from django.db import migrations, models
| 28.88 | 134 | 0.619114 | # Generated by Django 3.1.4 on 2021-01-16 09:02
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0014_auto_20210116_1202'),
]
operations = [
migrations.AlterField(
model_name='payment',
name='datetime',
field=models.DateTimeField(default=datetime.datetime(2021, 1, 16, 12, 2, 54, 802603), verbose_name='Дата платежа'),
),
migrations.AlterField(
model_name='person',
name='payment_date',
field=models.DateField(default=datetime.datetime(2021, 1, 16, 12, 2, 54, 801604), verbose_name='Дата следующего платежа'),
),
]
| 0 | 624 | 23 |
8773f63df6fbc10b6d52360a5354fbed727f45aa | 890 | py | Python | apps/vars/site_config.py | wizzzet/todo_backend | 58d27a639899514a3b10058cebb82c9b420a5bcc | [
"MIT"
] | null | null | null | apps/vars/site_config.py | wizzzet/todo_backend | 58d27a639899514a3b10058cebb82c9b420a5bcc | [
"MIT"
] | null | null | null | apps/vars/site_config.py | wizzzet/todo_backend | 58d27a639899514a3b10058cebb82c9b420a5bcc | [
"MIT"
] | null | null | null | import datetime
from snippets.utils.datetime import utcnow
from vars.models import SiteConfig
CACHE_TIMEOUT = datetime.timedelta(0, 30)
site_config = SiteConfigs()
| 22.25 | 63 | 0.626966 | import datetime
from snippets.utils.datetime import utcnow
from vars.models import SiteConfig
CACHE_TIMEOUT = datetime.timedelta(0, 30)
class SiteConfigs(object):
def __init__(self):
self.last_modified = None
self.configs = None
def index(self, force=False):
now = utcnow()
if force \
or self.last_modified is None \
or now - self.last_modified > CACHE_TIMEOUT:
self.configs = SiteConfig.get_solo()
self.last_modified = now
def force_index(self):
return self.index(force=True)
def get(self, field_name, default_value=''):
self.index()
if not self.configs:
return default_value
return getattr(self.configs, field_name, default_value)
def all(self):
self.index()
return self.configs
site_config = SiteConfigs()
| 558 | 5 | 157 |
0249338edf6fa8f543122d623322698d0e9205f4 | 16,213 | py | Python | plotting_functions.py | h-mayorquin/attractor_sequences | 885271f30d73a58a7aad83b55949e4e32ba0b45a | [
"MIT"
] | 1 | 2016-08-19T18:58:51.000Z | 2016-08-19T18:58:51.000Z | plotting_functions.py | h-mayorquin/attractor_sequences | 885271f30d73a58a7aad83b55949e4e32ba0b45a | [
"MIT"
] | null | null | null | plotting_functions.py | h-mayorquin/attractor_sequences | 885271f30d73a58a7aad83b55949e4e32ba0b45a | [
"MIT"
] | null | null | null | import seaborn as sns
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from data_transformer import transform_neural_to_normal
from analysis_functions import calculate_angle_from_history, calculate_winning_pattern_from_distances
from analysis_functions import calculate_patterns_timings
def set_text(ax, coordinate_from, coordinate_to, fontsize=25, color='black'):
"""
Set text in an axis
:param ax: The axis
:param coordinate_from: From pattern
:param coordinate_to: To pattern
:param fontsize: The fontsize
:return:
"""
message = str(coordinate_from) + '->' + str(coordinate_to)
ax.text(coordinate_from, coordinate_to, message, ha='center', va='center',
rotation=315, fontsize=fontsize, color=color)
def hinton(matrix, max_weight=None, ax=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w) / max_weight)
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
def plot_winning_pattern(manager, ax=None, separators=False, remove=0):
"""
Plots the winning pattern for the sequences
:param manager: A network manager instance
:param ax: an axis instance
:return:
"""
n_patterns = manager.nn.minicolumns
T_total = manager.T_total
# Get the angles
angles = calculate_angle_from_history(manager)
winning = calculate_winning_pattern_from_distances(angles) + 1 # Get them in the color bounds
timings = calculate_patterns_timings(winning, manager.dt, remove)
winners = [x[0] for x in timings]
pattern_times = [x[2] + 0.5 * x[1] for x in timings]
# 0.5 is for half of the time that the pattern lasts ( that is x[1])
start_times = [x[2] for x in timings]
# Filter the data
angles[angles < 0.1] = 0
filter = np.arange(1, angles.shape[1] + 1)
angles = angles * filter
# Add a column of zeros and of the winners to the stack
zeros = np.zeros_like(winning)
angles = np.column_stack((angles, zeros, winning))
# Plot
with sns.axes_style("whitegrid", {'axes.grid': False}):
if ax is None:
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
fig = ax.figure
cmap = matplotlib.cm.Paired
cmap.set_under('white')
extent = [0, n_patterns + 2, T_total, 0]
im = ax.imshow(angles, aspect='auto', interpolation='None', cmap=cmap, vmax=filter[-1], vmin=0.9, extent=extent)
ax.set_title('Sequence of patterns')
ax.set_xlabel('Patterns')
ax.set_ylabel('Time')
# Put labels in both axis
ax.tick_params(labeltop=False, labelright=False)
# Add seperator
ax.axvline(n_patterns, color='k', linewidth=2)
ax.axvline(n_patterns + 1, color='k', linewidth=2)
ax.axvspan(n_patterns, n_patterns + 1, facecolor='gray', alpha=0.3)
# Add the sequence as a text in a column
x_min = n_patterns * 1.0/ (n_patterns + 2)
x_max = (n_patterns + 1) * 1.0 / (n_patterns + 2)
for winning_pattern, time, start_time in zip(winners, pattern_times, start_times):
ax.text(n_patterns + 0.5, time, str(winning_pattern), va='center', ha='center')
if separators:
ax.axhline(y=start_time, xmin=x_min, xmax=x_max, linewidth=2, color='black')
# Colorbar
bounds = np.arange(0.5, n_patterns + 1.5, 1)
ticks = np.arange(1, n_patterns + 1, 1)
# Set the ticks positions
ax.set_xticks(bounds)
# Set the strings in those ticks positions
strings = [str(int(x + 1)) for x in bounds[:-1]]
strings.append('Winner')
ax.xaxis.set_major_formatter(plt.FixedFormatter(strings))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.12, 0.05, 0.79])
fig.colorbar(im, cax=cbar_ax, boundaries=bounds, cmap=cmap, ticks=ticks, spacing='proportional')
| 33.428866 | 120 | 0.645593 | import seaborn as sns
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from data_transformer import transform_neural_to_normal
from analysis_functions import calculate_angle_from_history, calculate_winning_pattern_from_distances
from analysis_functions import calculate_patterns_timings
def set_text(ax, coordinate_from, coordinate_to, fontsize=25, color='black'):
"""
Set text in an axis
:param ax: The axis
:param coordinate_from: From pattern
:param coordinate_to: To pattern
:param fontsize: The fontsize
:return:
"""
message = str(coordinate_from) + '->' + str(coordinate_to)
ax.text(coordinate_from, coordinate_to, message, ha='center', va='center',
rotation=315, fontsize=fontsize, color=color)
def plot_artificial_sequences(sequences, minicolumns):
sns.set_style("whitegrid", {'axes.grid': False})
sequence_matrix = np.zeros((len(sequences), minicolumns))
for index, sequence in enumerate(sequences):
sequence_matrix[index, sequence] = index + 1
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
cmap = matplotlib.cm.Paired
cmap = matplotlib.cm.prism
cmap.set_under('white')
ax.imshow(sequence_matrix, cmap=cmap, vmin=0.5)
sns.set()
def plot_weight_matrix(nn, ampa=False, one_hypercolum=True, ax=None):
with sns.axes_style("whitegrid", {'axes.grid': False}):
if ampa:
w = nn.w_ampa
title = 'AMPA'
else:
w = nn.w
title = 'NMDA'
if one_hypercolum:
w = w[:nn.minicolumns, :nn.minicolumns]
aux_max = np.max(np.abs(w))
cmap = 'coolwarm'
if ax is None:
# sns.set_style("whitegrid", {'axes.grid': False})
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
im = ax.imshow(w, cmap=cmap, interpolation='None', vmin=-aux_max, vmax=aux_max)
ax.set_title(title + ' connectivity')
divider = make_axes_locatable(ax)
cax = divider.append_axes('right', size='5%', pad=0.05)
ax.get_figure().colorbar(im, ax=ax, cax=cax)
def hinton(matrix, max_weight=None, ax=None):
"""Draw Hinton diagram for visualizing a weight matrix."""
ax = ax if ax is not None else plt.gca()
if not max_weight:
max_weight = 2 ** np.ceil(np.log(np.abs(matrix).max()) / np.log(2))
ax.patch.set_facecolor('gray')
ax.set_aspect('equal', 'box')
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
for (x, y), w in np.ndenumerate(matrix):
color = 'white' if w > 0 else 'black'
size = np.sqrt(np.abs(w) / max_weight)
rect = plt.Rectangle([x - size / 2, y - size / 2], size, size,
facecolor=color, edgecolor=color)
ax.add_patch(rect)
ax.autoscale_view()
ax.invert_yaxis()
def plot_winning_pattern(manager, ax=None, separators=False, remove=0):
"""
Plots the winning pattern for the sequences
:param manager: A network manager instance
:param ax: an axis instance
:return:
"""
n_patterns = manager.nn.minicolumns
T_total = manager.T_total
# Get the angles
angles = calculate_angle_from_history(manager)
winning = calculate_winning_pattern_from_distances(angles) + 1 # Get them in the color bounds
timings = calculate_patterns_timings(winning, manager.dt, remove)
winners = [x[0] for x in timings]
pattern_times = [x[2] + 0.5 * x[1] for x in timings]
# 0.5 is for half of the time that the pattern lasts ( that is x[1])
start_times = [x[2] for x in timings]
# Filter the data
angles[angles < 0.1] = 0
filter = np.arange(1, angles.shape[1] + 1)
angles = angles * filter
# Add a column of zeros and of the winners to the stack
zeros = np.zeros_like(winning)
angles = np.column_stack((angles, zeros, winning))
# Plot
with sns.axes_style("whitegrid", {'axes.grid': False}):
if ax is None:
fig = plt.figure(figsize=(16, 12))
ax = fig.add_subplot(111)
fig = ax.figure
cmap = matplotlib.cm.Paired
cmap.set_under('white')
extent = [0, n_patterns + 2, T_total, 0]
im = ax.imshow(angles, aspect='auto', interpolation='None', cmap=cmap, vmax=filter[-1], vmin=0.9, extent=extent)
ax.set_title('Sequence of patterns')
ax.set_xlabel('Patterns')
ax.set_ylabel('Time')
# Put labels in both axis
ax.tick_params(labeltop=False, labelright=False)
# Add seperator
ax.axvline(n_patterns, color='k', linewidth=2)
ax.axvline(n_patterns + 1, color='k', linewidth=2)
ax.axvspan(n_patterns, n_patterns + 1, facecolor='gray', alpha=0.3)
# Add the sequence as a text in a column
x_min = n_patterns * 1.0/ (n_patterns + 2)
x_max = (n_patterns + 1) * 1.0 / (n_patterns + 2)
for winning_pattern, time, start_time in zip(winners, pattern_times, start_times):
ax.text(n_patterns + 0.5, time, str(winning_pattern), va='center', ha='center')
if separators:
ax.axhline(y=start_time, xmin=x_min, xmax=x_max, linewidth=2, color='black')
# Colorbar
bounds = np.arange(0.5, n_patterns + 1.5, 1)
ticks = np.arange(1, n_patterns + 1, 1)
# Set the ticks positions
ax.set_xticks(bounds)
# Set the strings in those ticks positions
strings = [str(int(x + 1)) for x in bounds[:-1]]
strings.append('Winner')
ax.xaxis.set_major_formatter(plt.FixedFormatter(strings))
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.12, 0.05, 0.79])
fig.colorbar(im, cax=cbar_ax, boundaries=bounds, cmap=cmap, ticks=ticks, spacing='proportional')
def plot_sequence(manager):
T_total = manager.T_total
# Get the angles
angles = calculate_angle_from_history(manager)
winning = calculate_winning_pattern_from_distances(angles)
winning = winning[np.newaxis]
# Plot
sns.set_style("whitegrid", {'axes.grid': False})
filter = np.arange(1, angles.shape[1] + 1)
angles = angles * filter
cmap = matplotlib.cm.Paired
cmap.set_under('white')
extent = [0, T_total, manager.nn.minicolumns, 0]
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(111)
im1 = ax1.imshow(winning, aspect=2, interpolation='None', cmap=cmap, vmax=filter[-1], vmin=0.9, extent=extent)
ax1.set_title('Winning pattern')
# Colorbar
bounds = np.arange(0, manager.nn.minicolumns + 1, 0.5)
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.12, 0.05, 0.79])
cb = fig.colorbar(im1, cax=cbar_ax, boundaries=bounds)
def plot_network_activity_angle(manager):
T_total = manager.T_total
history = manager.history
# Get the angles
angles = calculate_angle_from_history(manager)
# Plot
sns.set_style("whitegrid", {'axes.grid': False})
cmap = 'plasma'
extent1 = [0, manager.nn.minicolumns * manager.nn.hypercolumns, T_total, 0]
extent2 = [0, manager.nn.minicolumns, T_total, 0]
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(121)
im1 = ax1.imshow(history['o'], aspect='auto', interpolation='None', cmap=cmap, vmax=1, vmin=0, extent=extent1)
ax1.set_title('Unit activation')
ax1.set_xlabel('Units')
ax1.set_ylabel('Time')
ax2 = fig.add_subplot(122)
im2 = ax2.imshow(angles, aspect='auto', interpolation='None', cmap=cmap, vmax=1, vmin=0, extent=extent2)
ax2.set_title('Winning pattern')
ax2.set_xlabel('Patterns')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.12, 0.05, 0.79])
fig.colorbar(im1, cax=cbar_ax)
def plot_network_activity(manager):
T_total = manager.T_total
history = manager.history
sns.set_style("whitegrid", {'axes.grid': False})
cmap = 'plasma'
extent = [0, manager.nn.minicolumns * manager.nn.hypercolumns, T_total, 0]
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(221)
im1 = ax1.imshow(history['o'], aspect='auto', interpolation='None', cmap=cmap, vmax=1, vmin=0, extent=extent)
ax1.set_title('Unit activation')
ax2 = fig.add_subplot(222)
im2 = ax2.imshow(history['z_pre'], aspect='auto', interpolation='None', cmap=cmap, vmax=1, vmin=0, extent=extent)
ax2.set_title('Traces of activity (z)')
ax3 = fig.add_subplot(223)
im3 = ax3.imshow(history['a'], aspect='auto', interpolation='None', cmap=cmap, vmax=1, vmin=0, extent=extent)
ax3.set_title('Adaptation')
ax4 = fig.add_subplot(224)
im4 = ax4.imshow(history['p_pre'], aspect='auto', interpolation='None', cmap=cmap, vmax=1, vmin=0, extent=extent)
ax4.set_title('Probability')
fig.subplots_adjust(right=0.8)
cbar_ax = fig.add_axes([0.85, 0.12, 0.05, 0.79])
fig.colorbar(im1, cax=cbar_ax)
def plot_adaptation_dynamics(manager, traces_to_plot):
sns.set_style("darkgrid", {'axes.grid': True})
history = manager.history
minicolumns = manager.nn.minicolumns
# Get the right time
T_total = manager.T_total
total_time = np.arange(0, T_total - 0.5 * manager.dt, manager.dt)
# Extract the required data
o_hypercolum = history['o'][..., :minicolumns]
a_hypercolum = history['a'][..., :minicolumns]
# Plot configuration
cmap_string = 'Paired'
cmap = matplotlib.cm.get_cmap(cmap_string)
norm = matplotlib.colors.Normalize(vmin=0, vmax=minicolumns)
fig = plt.figure(figsize=(16, 12))
ax11 = fig.add_subplot(221)
ax12 = fig.add_subplot(222)
ax21 = fig.add_subplot(223)
ax22 = fig.add_subplot(224)
fig.tight_layout()
import IPython
# IPython.embed()
# Plot the wanted activities
for index in traces_to_plot:
ax11.plot(total_time, o_hypercolum[:, index], color=cmap(norm(index)), label=str(index))
# Plot ALL the activities
for index in range(minicolumns):
ax12.plot(total_time, o_hypercolum[:, index], color=cmap(norm(index)), label=str(index))
# Plot the wanted adaptations
for index in traces_to_plot:
ax21.plot(total_time, a_hypercolum[:, index], color=cmap(norm(index)), label=str(index))
# Plot ALL the adaptations
for index in range(minicolumns):
ax22.plot(total_time, a_hypercolum[:, index], color=cmap(norm(index)), label=str(index))
axes = fig.get_axes()
for ax in axes:
ax.set_xlim([0, T_total])
ax.set_ylim([-0.1, 1.1])
ax.legend()
ax.axhline(0, color='black')
ax11.set_title('Unit activity')
ax21.set_title('Adaptations')
def plot_state_variables_vs_time(manager, traces_to_plot, ampa=False):
sns.set_style("darkgrid", {'axes.grid': True})
history = manager.history
minicolumns = manager.nn.minicolumns
T_total = manager.T_total
total_time = np.arange(0, T_total - 0.5 * manager.dt, manager.dt)
o_hypercolum = history['o'][..., :minicolumns]
if ampa:
z_pre_hypercolum = history['z_pre_ampa'][..., :minicolumns]
z_post_hypercolum = history['z_post_ampa'][..., :minicolumns]
p_pre_hypercolum = history['p_pre_ampa'][..., :minicolumns]
p_post_hypercolum = history['p_post_ampa'][..., :minicolumns]
# Take coactivations
p_co = history['p_co_ampa']
z_co = history['z_co_ampa']
w = history['w_ampa']
else:
z_pre_hypercolum = history['z_pre'][..., :minicolumns]
z_post_hypercolum = history['z_post'][..., :minicolumns]
o_hypercolum = history['o'][..., :minicolumns]
p_pre_hypercolum = history['p_pre'][..., :minicolumns]
p_post_hypercolum = history['p_post'][..., :minicolumns]
# Take coactivations
p_co = history['p_co']
z_co = history['z_co']
w = history['w']
# Build labels and pairs
coactivations_to_plot = [(traces_to_plot[2], traces_to_plot[1]), (traces_to_plot[0], traces_to_plot[1])]
labels_of_coactivations = [str(x) + '<--' + str(y) for (x, y) in coactivations_to_plot]
p_co_list = []
z_co_list = []
w_list = []
for (x, y) in coactivations_to_plot:
p_co_list.append(p_co[:, x, y])
z_co_list.append(z_co[:, x, y])
w_list.append(w[:, x, y])
cmap_string = 'nipy_spectral'
cmap_string = 'hsv'
cmap_string = 'Paired'
cmap = matplotlib.cm.get_cmap(cmap_string)
norm = matplotlib.colors.Normalize(vmin=0, vmax=minicolumns)
# Plot the traces
fig = plt.figure(figsize=(20, 15))
if ampa:
fig.suptitle('ampa')
else:
fig.suptitle('NMDA')
ax11 = fig.add_subplot(421)
ax12 = fig.add_subplot(422)
ax21 = fig.add_subplot(423)
ax22 = fig.add_subplot(424)
ax31 = fig.add_subplot(425)
ax32 = fig.add_subplot(426)
ax41 = fig.add_subplot(427)
ax42 = fig.add_subplot(428)
fig.tight_layout()
for index in range(minicolumns):
# Plot ALL the activities
ax12.plot(total_time, o_hypercolum[:, index], label=str(index))
for index in traces_to_plot:
# Plot activities
ax11.plot(total_time, o_hypercolum[:, index], color=cmap(norm(index)), label=str(index))
# Plot the z post and pre traces in the same graph
ax21.plot(total_time, z_pre_hypercolum[:, index], color=cmap(norm(index)), label='pre ' + str(index))
ax21.plot(total_time, z_post_hypercolum[:, index], color=cmap(norm(index)), linestyle='--',
label='post ' + str(index))
# Plot the pre and post probabilties in the same graph
ax22.plot(total_time, p_pre_hypercolum[:, index], color=cmap(norm(index)), label='pre ' + str(index))
ax22.plot(total_time, p_post_hypercolum[:, index], color=cmap(norm(index)), linestyle='--',
label='post ' + str(index))
# Plot z_co and p_co in the same graph
for z_co, label in zip(z_co_list, labels_of_coactivations):
ax31.plot(total_time, z_co, label='z_co ' + label)
# Plot the individual probabilities and the coactivations
for p_co, (x, y), label in zip(p_co_list, coactivations_to_plot, labels_of_coactivations):
ax32.plot(total_time, p_co, '-', label='p_co ' + label)
ax32.plot(total_time, p_post_hypercolum[:, x] * p_pre_hypercolum[:, y],
label='p_post_' + label[0] + ' x p_pre_' + label[1])
# Plot the coactivations probabilities
for p_co, label in zip(p_co_list, labels_of_coactivations):
ax41.plot(total_time, p_co, '-', label='p_co ' + label)
# Plot the weights
for w, label in zip(w_list, labels_of_coactivations):
ax42.plot(total_time, w, label=r'$w_{' + label + '}$')
axes = fig.get_axes()
for ax in axes:
ax.set_xlim([0, T_total])
ax.legend()
ax.axhline(0, color='black')
ax11.set_ylim([-0.1, 1.1])
ax12.set_ylim([-0.1, 1.1])
if False:
ax21.set_ylim([-0.1, 1.1])
ax31.set_ylim([-0.1, 1.1])
ax21.set_title('z-traces')
ax22.set_title('probabilities')
ax31.set_title('z_co')
ax32.set_title('p_co and p_i * p*j')
ax41.set_title('p_co')
ax42.set_title('w')
def plot_quantity_history(dic_history, quantity, minicolumns=2):
sns.set_style("whitegrid", {'axes.grid': False})
quantity_to_plot_1 = transform_neural_to_normal(dic_history[quantity], minicolumns=2)
quantity_to_plot_2 = dic_history[quantity]
gs = gridspec.GridSpec(1, 2)
fig = plt.figure(figsize=(16, 12))
ax1 = fig.add_subplot(gs[0, 0])
im1 = ax1.imshow(quantity_to_plot_1, aspect='auto', interpolation='nearest')
divider1 = make_axes_locatable(ax1)
cax1 = divider1.append_axes("right", size='5%', pad=0.05)
fig.colorbar(im1, cax=cax1)
ax2 = fig.add_subplot(gs[0, 1])
im2 = ax2.imshow(quantity_to_plot_2, aspect='auto', interpolation='nearest')
divider2 = make_axes_locatable(ax2)
cax2 = divider2.append_axes("right", size='5%', pad=0.05)
fig.colorbar(im2, cax=cax2)
plt.show()
| 11,424 | 0 | 184 |
50637c98a7aa9839bc89e1671304eacf81cfdf18 | 10,818 | py | Python | rswarp/diagnostics/FieldDiagnostic.py | tanxicccc/rswarp | 3cead0d4e96c73caaae15e4376ac4637dc34c5dc | [
"Apache-2.0"
] | null | null | null | rswarp/diagnostics/FieldDiagnostic.py | tanxicccc/rswarp | 3cead0d4e96c73caaae15e4376ac4637dc34c5dc | [
"Apache-2.0"
] | null | null | null | rswarp/diagnostics/FieldDiagnostic.py | tanxicccc/rswarp | 3cead0d4e96c73caaae15e4376ac4637dc34c5dc | [
"Apache-2.0"
] | null | null | null | import os
import datetime
from dateutil.tz import tzlocal
import h5py as h5
import numpy as np
from warp import getselfe, getphi, getb, geta
class FieldDiagnostic(object):
"""
Common functionality for field diagnostic classes
Parameters:
solver: A solver object containing fields to be output.
top: The object representing Warp's top package.
w3d: The object representing Warp's w3d package.
comm_world: Object representing an MPI communicator.
period (int): Sets the period in steps of data writeout by the diagnostic.
Defaults to writeout on every step if not set.
write_dir (str): Relative path to place data output of the diagnostic.
Defaults to 'diags/fields/electric' for electric fields/potentials, and 'diags/fields/magnetic'
for magnetic fields/vector potentials if not set.
"""
class ElectrostaticFields(FieldDiagnostic):
"""
Test
Produce an HDF5 file with electric fields and potential .
File tree:
/data/meshes
/mesh
/x
/y
/z
Note that the coordinates will be replaced as appropriate for different
solver geometries (e.g. xyz -> rtz for RZgeom).
/phi
/E
/x
/y
/z
"""
class MagnetostaticFields(FieldDiagnostic):
"""
Produce an HDF5 file with magnetic fields and vector potential.
File tree:
/data/meshes/
/mesh
/x
/y
/z
Note that the coordinates will be replaced as appropriate for different
solver geometries (e.g. xyz -> rtz for RZgeom).
/vector_potential
/x
/y
/z
/B
/x
/y
/z
"""
| 38.498221 | 152 | 0.564615 | import os
import datetime
from dateutil.tz import tzlocal
import h5py as h5
import numpy as np
from warp import getselfe, getphi, getb, geta
class FieldDiagnostic(object):
"""
Common functionality for field diagnostic classes
Parameters:
solver: A solver object containing fields to be output.
top: The object representing Warp's top package.
w3d: The object representing Warp's w3d package.
comm_world: Object representing an MPI communicator.
period (int): Sets the period in steps of data writeout by the diagnostic.
Defaults to writeout on every step if not set.
write_dir (str): Relative path to place data output of the diagnostic.
Defaults to 'diags/fields/electric' for electric fields/potentials, and 'diags/fields/magnetic'
for magnetic fields/vector potentials if not set.
"""
def __init__(self, solver, top, w3d, comm_world, period=None, write_dir=None):
self.solver = solver
self.top = top
self.w3d = w3d
self.comm_world = comm_world
if self.comm_world is None:
self.lparallel = 0
else:
self.lparallel = comm_world.Get_size()
self.period = period
if not write_dir:
self.write_dir = None
else:
self.write_dir = write_dir
self.geometryParameters = ''
if self.solver.solvergeom == self.w3d.XYZgeom:
self.geometry = 'cartesian'
self.dims = ['x', 'y', 'z']
self.gridsize = [self.solver.nx + 1, self.solver.ny + 1, self.solver.nz + 1]
self.gridSpacing = [self.solver.dx, self.solver.dy, self.solver.dz]
self.gridGlobalOffset = [self.solver.xmmin, self.solver.ymmin, self.solver.zmmin]
self.mesh = [self.solver.xmesh, self.solver.ymesh, self.solver.zmesh]
elif self.solver.solvergeom == self.w3d.XZgeom:
self.geometry = 'cartesian2D'
self.dims = ['x', 'y', 'z']
self.gridsize = [self.solver.nx + 1, self.solver.nz + 1]
self.gridSpacing = [self.solver.dx, self.solver.dz]
self.gridGlobalOffset = [self.solver.xmmin, self.solver.zmmin]
self.mesh = [self.solver.xmesh, self.solver.zmesh]
elif self.solver.solvergeom == self.w3d.RZgeom:
self.geometry = 'thetaMode'
self.geometryParameters = 'm=0'
self.dims = ['r', 't', 'z']
self.gridsize = [self.solver.nx + 1, self.solver.nz + 1]
self.gridSpacing = [self.solver.dx, self.solver.dz]
self.gridGlobalOffset = [self.solver.xmmin, self.solver.zmmin]
self.mesh = [self.solver.xmesh, self.solver.zmesh]
else:
raise Exception("No handler for geometry type %i" % self.solver.solvergeom)
def write(self, write_dir=None):
if self.period and self.top.it % self.period != 0:
return False
if write_dir is None:
write_dir = self.write_dir
if not os.path.lexists(write_dir):
if self.lparallel == 0 or self.comm_world.rank == 0:
os.makedirs(write_dir)
step = str(self.top.it)
filename = '%s/data%s.h5' % (write_dir, step.zfill(5))
if self.lparallel == 0 or self.comm_world.rank == 0:
f = h5.File(filename, 'w')
# for i, v in enumerate(self.mesh):
# f['/data/meshes/mesh/%s' % self.dims[i]] = v
# f['/data/meshes/mesh'].attrs['geometry'] = self.geometry
# f['/data/meshes/mesh'].attrs['geometryParameters'] = self.geometryParameters
# from warp.data_dumping.openpmd_diag.generic_diag
# This header information is from https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#hierarchy-of-the-data-file
f.attrs["openPMD"] = np.string_("1.0.0")
f.attrs["openPMDextension"] = np.uint32(1)
f.attrs["software"] = np.string_("warp")
f.attrs["softwareVersion"] = np.string_("4")
f.attrs["date"] = np.string_(
datetime.datetime.now(tzlocal()).strftime('%Y-%m-%d %H:%M:%S %z'))
f.attrs["meshesPath"] = np.string_("meshes/")
f.attrs["particlesPath"] = np.string_("particles/")
# Setup the basePath
f.attrs["basePath"] = np.string_("/data/%T/")
base_path = "/data/%d/" % self.top.it
bp = f.require_group(base_path)
# https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#required-attributes-for-the-basepath
bp.attrs["time"] = self.top.time
bp.attrs["dt"] = self.top.dt
bp.attrs["timeUnitSI"] = 1.
# https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#iterations-and-time-series
f.attrs["iterationEncoding"] = np.string_("fileBased")
f.attrs["iterationFormat"] = np.string_("%s%%T.h5" % write_dir)
self.basePath = base_path
self.meshPath = f.attrs["meshesPath"]
self.particlesPath = f.attrs["particlesPath"]
self.file = f
return True
def writeDataset(self, data, prefix, attrs={}):
# print "Shape in writeDataset", self.efield.shape
if len(data.shape) == len(self.dims) or (self.geometry == 'cartesian2D' and len(data.shape) == len(self.dims) - 1): # Scalar data on the mesh
self.file[prefix] = data
field = self.file[prefix]
field.attrs['position'] = [0.0]*len(self.dims) # Report scalar as on the mesh elements
field.attrs['unitSI'] = 1.0
elif len(data.shape) == len(self.dims) + 1 or (self.geometry == 'cartesian2D' and len(data.shape) == len(self.dims)): # Vector data on the mesh
if self.geometry == 'thetaMode':
data = data.swapaxes(1, 2) # For thetaMode, components stored in order of m,r,z
for i, v in enumerate(data):
self.file['%s/%s' % (prefix, self.dims[i])] = v
coord = self.file['%s/%s' % (prefix, self.dims[i])]
coord.attrs['position'] = [0.0]*len(self.dims) # Report field as on the mesh elements
coord.attrs['unitSI'] = 1.0
field = self.file[prefix]
# field.attrs['n%s' % self.dims[i]] = self.gridsize[i]
else:
raise Exception("Unknown data shape: %s" % repr(data.shape))
field.attrs['geometry'] = self.geometry
field.attrs['geometryParameters'] = self.geometryParameters
field.attrs['dataOrder'] = 'C' # C-like order
field.attrs['axisLabels'] = self.dims
field.attrs['gridSpacing'] = self.gridSpacing
field.attrs['gridGlobalOffset'] = self.gridGlobalOffset
field.attrs['gridUnitSI'] = 1.0
field.attrs['unitSI'] = 1.0
for k, v in attrs.items():
self.file[prefix].attrs[k] = v
class ElectrostaticFields(FieldDiagnostic):
"""
Test
Produce an HDF5 file with electric fields and potential .
File tree:
/data/meshes
/mesh
/x
/y
/z
Note that the coordinates will be replaced as appropriate for different
solver geometries (e.g. xyz -> rtz for RZgeom).
/phi
/E
/x
/y
/z
"""
def gatherfields(self):
if self.lparallel == 1:
self.efield = self.solver.getselfe()
else:
self.efield = []
for dim in ['x', 'y', 'z']:
self.efield.append(getselfe(comp=dim, bcast=0))
self.efield = np.array(self.efield)
def gatherpotential(self):
if self.lparallel == 1:
self.phi = self.solver.getphi()
else:
self.phi = getphi(bcast=0)
def write(self):
if not self.write_dir:
write_dir = 'diags/fields/electric'
else:
write_dir = self.write_dir
if not super(ElectrostaticFields, self).write(write_dir):
return False
self.gatherfields()
self.gatherpotential()
if self.solver.__class__.__name__ == 'MultiGrid2D':
# Kludge to make 2D electrostatic solver compatible with thetaMode
# output (which is currently the only relevant option)
self.efield = self.efield[:, :, np.newaxis, :]
# this is particularly awful, because there is no decomposition for
# the potential, but it's the only way to shoehorn the data into
# OpenPMD compliance right now.
self.phi = self.phi[np.newaxis, :, :]
if self.lparallel == 0 or self.comm_world.rank == 0:
self.writeDataset(self.efield, prefix='%s%sE' % (self.basePath, self.meshPath))
self.writeDataset(self.phi, prefix='%s%sphi' % (self.basePath, self.meshPath))
self.file.close()
class MagnetostaticFields(FieldDiagnostic):
"""
Produce an HDF5 file with magnetic fields and vector potential.
File tree:
/data/meshes/
/mesh
/x
/y
/z
Note that the coordinates will be replaced as appropriate for different
solver geometries (e.g. xyz -> rtz for RZgeom).
/vector_potential
/x
/y
/z
/B
/x
/y
/z
"""
def gatherfields(self):
if self.lparallel == 1:
self.bfield = self.solver.getb(bcast=0)
else:
self.bfield = []
for dim in ['x', 'y', 'z']:
self.bfield.append(getb(comp=dim, bcast=0))
self.bfield = np.array(self.bfield)
def gathervectorpotential(self):
if self.lparallel == 1:
self.a = self.solver.geta()
else:
self.a = []
for dim in ['x', 'y', 'z']:
self.a.append(geta(comp=dim))
self.a = np.array(self.a)
def write(self):
if not self.write_dir:
write_dir = 'diags/fields/magnetic'
else:
write_dir = self.write_dir
if not super(MagnetostaticFields, self).write(write_dir):
return False
self.gatherfields()
self.gathervectorpotential()
if self.lparallel == 0 or self.comm_world.rank == 0:
self.writeDataset(self.bfield, prefix='%s%sB' % (self.basePath, self.meshPath))
self.writeDataset(self.a, prefix='%s%svector_potential' % (self.basePath, self.meshPath))
self.file.close()
| 8,591 | 0 | 242 |
12c890bdbd604a4966085d68d959b37602ac980f | 2,704 | py | Python | synapse/_scripts/export_signing_key.py | Fizzadar/synapse | 6b46c3eb3d526d903e1e4833b2e8ae9b73de8502 | [
"Apache-2.0"
] | null | null | null | synapse/_scripts/export_signing_key.py | Fizzadar/synapse | 6b46c3eb3d526d903e1e4833b2e8ae9b73de8502 | [
"Apache-2.0"
] | null | null | null | synapse/_scripts/export_signing_key.py | Fizzadar/synapse | 6b46c3eb3d526d903e1e4833b2e8ae9b73de8502 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import time
from typing import NoReturn, Optional
from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys
from signedjson.types import VerifyKey
if __name__ == "__main__":
main()
| 26.252427 | 86 | 0.60503 | #!/usr/bin/env python
# Copyright 2019 The Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import sys
import time
from typing import NoReturn, Optional
from signedjson.key import encode_verify_key_base64, get_verify_key, read_signing_keys
from signedjson.types import VerifyKey
def exit(status: int = 0, message: Optional[str] = None) -> NoReturn:
if message:
print(message, file=sys.stderr)
sys.exit(status)
def format_plain(public_key: VerifyKey) -> None:
print(
"%s:%s %s"
% (
public_key.alg,
public_key.version,
encode_verify_key_base64(public_key),
)
)
def format_for_config(public_key: VerifyKey, expiry_ts: int) -> None:
print(
' "%s:%s": { key: "%s", expired_ts: %i }'
% (
public_key.alg,
public_key.version,
encode_verify_key_base64(public_key),
expiry_ts,
)
)
def main() -> None:
parser = argparse.ArgumentParser()
parser.add_argument(
"key_file",
nargs="+",
type=argparse.FileType("r"),
help="The key file to read",
)
parser.add_argument(
"-x",
action="store_true",
dest="for_config",
help="format the output for inclusion in the old_signing_keys config setting",
)
parser.add_argument(
"--expiry-ts",
type=int,
default=int(time.time() * 1000) + 6 * 3600000,
help=(
"The expiry time to use for -x, in milliseconds since 1970. The default "
"is (now+6h)."
),
)
args = parser.parse_args()
formatter = (
(lambda k: format_for_config(k, args.expiry_ts))
if args.for_config
else format_plain
)
for file in args.key_file:
try:
res = read_signing_keys(file)
except Exception as e:
exit(
status=1,
message="Error reading key from file %s: %s %s"
% (file.name, type(e), e),
)
for key in res:
formatter(get_verify_key(key))
if __name__ == "__main__":
main()
| 1,746 | 0 | 92 |
13bb8da2ca5eeda7d756f842c47cc33eab784784 | 8,620 | py | Python | data_loader/data_loader.py | SigureMo/shoeprint-recognition | fe9288938827497c8b555f4fea98e96487943d44 | [
"MIT"
] | 1 | 2020-04-06T05:37:03.000Z | 2020-04-06T05:37:03.000Z | data_loader/data_loader.py | cattidea/shoeprint-recognition | fe9288938827497c8b555f4fea98e96487943d44 | [
"MIT"
] | 2 | 2019-12-16T23:43:38.000Z | 2020-02-01T07:01:39.000Z | data_loader/data_loader.py | cattidea/shoeprint-recognition | fe9288938827497c8b555f4fea98e96487943d44 | [
"MIT"
] | 1 | 2019-11-29T16:41:28.000Z | 2019-11-29T16:41:28.000Z | import os
import json
import h5py
import random
import numpy as np
from config_parser.config import PATHS, DEBUG
from data_loader.base import CacheLoader
from data_loader.image import image2array
H5_PATH = PATHS["h5_path"]
JSON_PATH = PATHS["json_path"]
SHOEPRINT_DIR = PATHS["shoeprint_dir"]
SAMPLE_DIR = PATHS["sample_dir"]
SHOEPRINT_DIR_TEST = PATHS["shoeprint_test_dir"]
DETERMINE_FILE = PATHS["determine_file"]
DETERMINE_FILE_TEST = PATHS["determine_test_file"]
@CacheLoader(name="sample", debug=DEBUG)
def get_sample_arrays(augment):
""" 获取样本文件结构,将样本图片预处理成所需格式
``` python
[
[<img1_array1>, <img1_array2>, ...],
[<img2_array1>, <img2_array2>, ...],
...
],
{
<type_id>: {
"img_indices": [<img1_index>, <img2_index>, <img3_index>, ...],
},
...
},
```
"""
sample_map = {}
sample_arrays = []
types = os.listdir(SAMPLE_DIR)
index = 0
assert types, "样本图库文件夹为空!"
for i, type_id in enumerate(types):
print("get_sample_arrays {}/{} ".format(i, len(types)), end='\r')
type_dir = os.path.join(SAMPLE_DIR, type_id)
img_path = os.path.join(type_dir, os.listdir(type_dir)[0])
sample_map[type_id] = {}
img_array = image2array(img_path, augment)
sample_map[type_id]["img_indices"] = [index + j for j in range(len(img_array))]
index += len(img_array)
sample_arrays.extend(img_array)
assert len(sample_arrays) == index
return sample_arrays, sample_map
@CacheLoader(name="shoeprint", debug=DEBUG)
def get_shoeprint_arrays(augment, sample_length, action_type="train"):
""" 获取鞋印文件结构,将鞋印图片预处理成所需格式追加在 sample_arrays 后,并将数据分类为训练类型、开发类型
之所以不整体打乱,是因为验证集与训练集、开发集是与验证集在不同的样式中,
所以开发集理应与训练集也在不同的样式中
``` python
[
[<img1_array1>, <img1_array2>, ...],
[<img2_array1>, <img2_array2>, ...],
...
],
{
<name>: {
"type_id": <xxxxxxxx>,
"img_indices": [<img1_index>, <img2_index>, <img3_index>, ...],
"set_type": "train/dev/test"
},
...
}
{
<type_id1>: [<name1>, <name2>, ...],
<type_id2>: [<name1>, <name2>, ...],
...
}
```
"""
shoeprint_map = {}
shoeprint_arrays = []
type_map = {}
shoeprint_base_dir = SHOEPRINT_DIR if action_type == "train" else SHOEPRINT_DIR_TEST
types = os.listdir(shoeprint_base_dir)
type_counter = {"train": set(), "dev": set(), "test": set()}
index = sample_length
assert types, "鞋印图库文件夹为空!"
for i, type_id in enumerate(types):
print("get_shoeprint_arrays {}/{} ".format(i, len(types)), end='\r')
if action_type == "train":
set_type = "train" if random.random() < 0.95 else "dev"
else:
set_type = "test"
type_dir = os.path.join(shoeprint_base_dir, type_id)
type_map[type_id] = []
for filename in os.listdir(type_dir):
img_path = os.path.join(type_dir, filename)
img_array = image2array(img_path, augment)
shoeprint_map[filename] = {}
shoeprint_map[filename]["type_id"] = type_id
shoeprint_map[filename]["img_indices"] = [index + j for j in range(len(img_array))]
shoeprint_map[filename]["set_type"] = set_type
shoeprint_arrays.extend(img_array)
index += len(img_array)
type_counter[set_type].add(type_id)
type_map[type_id].append(filename)
if action_type == "train":
print("训练数据共 {} 类,开发数据共 {} 类".format(len(type_counter["train"]), len(type_counter["dev"])))
else:
print("测试数据共 {} 类".format(len(type_counter["test"])))
assert len(shoeprint_arrays) == index - sample_length
return shoeprint_arrays, shoeprint_map, type_map
@CacheLoader(name="determine", debug=DEBUG)
def get_determine_scope(action_type="train"):
""" 读取待判定范围文件,并构造成字典型
``` python
{
<name>: [
<P>, <N1>, <N2>, <N3>, ... // 注意, P 不一定在最前面,而且这里记录的是 type_id
],
...
}
```
"""
determine_scope = {}
determine_scope_file = DETERMINE_FILE if action_type == "train" else DETERMINE_FILE_TEST
with open(determine_scope_file, 'r') as f:
for line in f:
line_items = line.split('\t')
for i in range(len(line_items)):
line_items[i] = line_items[i].strip()
determine_scope[line_items[0]] = line_items[1:]
return determine_scope
@CacheLoader(name="class_indices", debug=DEBUG)
def get_indices(sample_map, shoeprint_map, type_map):
""" 将所有 indices 组织在一起
``` python
[
[
[<idx01>, <idx02>], # 某一个
[<idx01>, <idx02>],
...
], # 某一类
...
]
```
"""
indices = []
for i, type_id in enumerate(sample_map):
print("get_indices {}/{} ".format(i, len(sample_map)), end='\r')
class_indices = []
class_indices.append(sample_map[type_id]["img_indices"])
if type_id in type_map:
for pos_name in type_map[type_id]:
if shoeprint_map[pos_name]["set_type"] == "train":
class_indices.append(shoeprint_map[pos_name]["img_indices"])
indices.append(class_indices)
return indices
@CacheLoader(name="test_data_set", debug=DEBUG)
def test_data_import(augment=[], action_type="test"):
""" 构造测试数据
``` python
img_arrays
{
"train": [
{
"name": <name>,
"index": <idx>,
"scope_indices": [<idx01>, <idx02>, ...],
"label": <correct_idx>
},
...
],
"dev": ...,
"test": ...
}
```
"""
determine_scope = get_determine_scope(action_type=action_type)
sample_arrays, sample_map = get_sample_arrays(augment=[])
shoeprint_arrays, shoeprint_map, _ = get_shoeprint_arrays(
augment=augment, sample_length=len(sample_arrays), action_type=action_type)
img_arrays = np.concatenate((sample_arrays, shoeprint_arrays))
test_data_map = {"train": [], "dev": [], "test": []}
print("sample {} shoeprint {} ".format(len(sample_arrays), len(shoeprint_arrays)))
scope_length = len(determine_scope[list(determine_scope.keys())[0]])
imgs_num = len(determine_scope)
for i, origin_name in enumerate(determine_scope):
print("get_test_data ({}) {}/{} ".format(action_type, i, imgs_num), end='\r')
if action_type == "test":
assert origin_name in shoeprint_map
else:
if origin_name not in shoeprint_map:
print(origin_name)
continue
set_type = shoeprint_map[origin_name]["set_type"]
type_id = shoeprint_map[origin_name]["type_id"]
item = {}
item["name"] = origin_name
item["indices"] = shoeprint_map[origin_name]["img_indices"]
item["scope_indices"] = []
item["label"] = determine_scope[origin_name].index(type_id)
for j in range(scope_length):
item["scope_indices"].append(sample_map[determine_scope[origin_name][j]]["img_indices"][0])
test_data_map[set_type].append(item)
return img_arrays, test_data_map, len(sample_arrays)
def data_import(augment=[]):
""" 导入数据集, 分为训练集、开发集
``` h5
{
"img_arrays": [<img01>, <img02>, ...] # 每个都是 (H, W, 1)
}
```
"""
data_set = {}
if not os.path.exists(H5_PATH) or not os.path.exists(JSON_PATH):
print("未发现处理好的数据文件,正在处理...")
determine_scope = get_determine_scope(action_type="train")
sample_arrays, sample_map = get_sample_arrays(augment)
shoeprint_arrays, shoeprint_map, type_map = get_shoeprint_arrays(
augment, sample_length=len(sample_arrays), action_type="train")
img_arrays = np.concatenate((sample_arrays, shoeprint_arrays))
indices = get_indices(sample_map, shoeprint_map, type_map)
data_set["img_arrays"] = img_arrays
data_set["indices"] = indices
h5f = h5py.File(H5_PATH, 'w')
h5f["img_arrays"] = data_set["img_arrays"]
h5f.close()
with open(JSON_PATH, 'w', encoding="utf8") as f:
json.dump(data_set["indices"], f, indent=2)
else:
print("发现处理好的数据文件,正在读取...")
h5f = h5py.File(H5_PATH, 'r')
data_set["img_arrays"] = h5f["img_arrays"][: ]
h5f.close()
with open(JSON_PATH, 'r', encoding="utf8") as f:
data_set["indices"] = json.load(f)
print("成功加载数据")
return data_set
| 32.900763 | 103 | 0.594896 | import os
import json
import h5py
import random
import numpy as np
from config_parser.config import PATHS, DEBUG
from data_loader.base import CacheLoader
from data_loader.image import image2array
H5_PATH = PATHS["h5_path"]
JSON_PATH = PATHS["json_path"]
SHOEPRINT_DIR = PATHS["shoeprint_dir"]
SAMPLE_DIR = PATHS["sample_dir"]
SHOEPRINT_DIR_TEST = PATHS["shoeprint_test_dir"]
DETERMINE_FILE = PATHS["determine_file"]
DETERMINE_FILE_TEST = PATHS["determine_test_file"]
@CacheLoader(name="sample", debug=DEBUG)
def get_sample_arrays(augment):
""" 获取样本文件结构,将样本图片预处理成所需格式
``` python
[
[<img1_array1>, <img1_array2>, ...],
[<img2_array1>, <img2_array2>, ...],
...
],
{
<type_id>: {
"img_indices": [<img1_index>, <img2_index>, <img3_index>, ...],
},
...
},
```
"""
sample_map = {}
sample_arrays = []
types = os.listdir(SAMPLE_DIR)
index = 0
assert types, "样本图库文件夹为空!"
for i, type_id in enumerate(types):
print("get_sample_arrays {}/{} ".format(i, len(types)), end='\r')
type_dir = os.path.join(SAMPLE_DIR, type_id)
img_path = os.path.join(type_dir, os.listdir(type_dir)[0])
sample_map[type_id] = {}
img_array = image2array(img_path, augment)
sample_map[type_id]["img_indices"] = [index + j for j in range(len(img_array))]
index += len(img_array)
sample_arrays.extend(img_array)
assert len(sample_arrays) == index
return sample_arrays, sample_map
@CacheLoader(name="shoeprint", debug=DEBUG)
def get_shoeprint_arrays(augment, sample_length, action_type="train"):
""" 获取鞋印文件结构,将鞋印图片预处理成所需格式追加在 sample_arrays 后,并将数据分类为训练类型、开发类型
之所以不整体打乱,是因为验证集与训练集、开发集是与验证集在不同的样式中,
所以开发集理应与训练集也在不同的样式中
``` python
[
[<img1_array1>, <img1_array2>, ...],
[<img2_array1>, <img2_array2>, ...],
...
],
{
<name>: {
"type_id": <xxxxxxxx>,
"img_indices": [<img1_index>, <img2_index>, <img3_index>, ...],
"set_type": "train/dev/test"
},
...
}
{
<type_id1>: [<name1>, <name2>, ...],
<type_id2>: [<name1>, <name2>, ...],
...
}
```
"""
shoeprint_map = {}
shoeprint_arrays = []
type_map = {}
shoeprint_base_dir = SHOEPRINT_DIR if action_type == "train" else SHOEPRINT_DIR_TEST
types = os.listdir(shoeprint_base_dir)
type_counter = {"train": set(), "dev": set(), "test": set()}
index = sample_length
assert types, "鞋印图库文件夹为空!"
for i, type_id in enumerate(types):
print("get_shoeprint_arrays {}/{} ".format(i, len(types)), end='\r')
if action_type == "train":
set_type = "train" if random.random() < 0.95 else "dev"
else:
set_type = "test"
type_dir = os.path.join(shoeprint_base_dir, type_id)
type_map[type_id] = []
for filename in os.listdir(type_dir):
img_path = os.path.join(type_dir, filename)
img_array = image2array(img_path, augment)
shoeprint_map[filename] = {}
shoeprint_map[filename]["type_id"] = type_id
shoeprint_map[filename]["img_indices"] = [index + j for j in range(len(img_array))]
shoeprint_map[filename]["set_type"] = set_type
shoeprint_arrays.extend(img_array)
index += len(img_array)
type_counter[set_type].add(type_id)
type_map[type_id].append(filename)
if action_type == "train":
print("训练数据共 {} 类,开发数据共 {} 类".format(len(type_counter["train"]), len(type_counter["dev"])))
else:
print("测试数据共 {} 类".format(len(type_counter["test"])))
assert len(shoeprint_arrays) == index - sample_length
return shoeprint_arrays, shoeprint_map, type_map
@CacheLoader(name="determine", debug=DEBUG)
def get_determine_scope(action_type="train"):
""" 读取待判定范围文件,并构造成字典型
``` python
{
<name>: [
<P>, <N1>, <N2>, <N3>, ... // 注意, P 不一定在最前面,而且这里记录的是 type_id
],
...
}
```
"""
determine_scope = {}
determine_scope_file = DETERMINE_FILE if action_type == "train" else DETERMINE_FILE_TEST
with open(determine_scope_file, 'r') as f:
for line in f:
line_items = line.split('\t')
for i in range(len(line_items)):
line_items[i] = line_items[i].strip()
determine_scope[line_items[0]] = line_items[1:]
return determine_scope
@CacheLoader(name="class_indices", debug=DEBUG)
def get_indices(sample_map, shoeprint_map, type_map):
""" 将所有 indices 组织在一起
``` python
[
[
[<idx01>, <idx02>], # 某一个
[<idx01>, <idx02>],
...
], # 某一类
...
]
```
"""
indices = []
for i, type_id in enumerate(sample_map):
print("get_indices {}/{} ".format(i, len(sample_map)), end='\r')
class_indices = []
class_indices.append(sample_map[type_id]["img_indices"])
if type_id in type_map:
for pos_name in type_map[type_id]:
if shoeprint_map[pos_name]["set_type"] == "train":
class_indices.append(shoeprint_map[pos_name]["img_indices"])
indices.append(class_indices)
return indices
@CacheLoader(name="test_data_set", debug=DEBUG)
def test_data_import(augment=[], action_type="test"):
""" 构造测试数据
``` python
img_arrays
{
"train": [
{
"name": <name>,
"index": <idx>,
"scope_indices": [<idx01>, <idx02>, ...],
"label": <correct_idx>
},
...
],
"dev": ...,
"test": ...
}
```
"""
determine_scope = get_determine_scope(action_type=action_type)
sample_arrays, sample_map = get_sample_arrays(augment=[])
shoeprint_arrays, shoeprint_map, _ = get_shoeprint_arrays(
augment=augment, sample_length=len(sample_arrays), action_type=action_type)
img_arrays = np.concatenate((sample_arrays, shoeprint_arrays))
test_data_map = {"train": [], "dev": [], "test": []}
print("sample {} shoeprint {} ".format(len(sample_arrays), len(shoeprint_arrays)))
scope_length = len(determine_scope[list(determine_scope.keys())[0]])
imgs_num = len(determine_scope)
for i, origin_name in enumerate(determine_scope):
print("get_test_data ({}) {}/{} ".format(action_type, i, imgs_num), end='\r')
if action_type == "test":
assert origin_name in shoeprint_map
else:
if origin_name not in shoeprint_map:
print(origin_name)
continue
set_type = shoeprint_map[origin_name]["set_type"]
type_id = shoeprint_map[origin_name]["type_id"]
item = {}
item["name"] = origin_name
item["indices"] = shoeprint_map[origin_name]["img_indices"]
item["scope_indices"] = []
item["label"] = determine_scope[origin_name].index(type_id)
for j in range(scope_length):
item["scope_indices"].append(sample_map[determine_scope[origin_name][j]]["img_indices"][0])
test_data_map[set_type].append(item)
return img_arrays, test_data_map, len(sample_arrays)
def data_import(augment=[]):
""" 导入数据集, 分为训练集、开发集
``` h5
{
"img_arrays": [<img01>, <img02>, ...] # 每个都是 (H, W, 1)
}
```
"""
data_set = {}
if not os.path.exists(H5_PATH) or not os.path.exists(JSON_PATH):
print("未发现处理好的数据文件,正在处理...")
determine_scope = get_determine_scope(action_type="train")
sample_arrays, sample_map = get_sample_arrays(augment)
shoeprint_arrays, shoeprint_map, type_map = get_shoeprint_arrays(
augment, sample_length=len(sample_arrays), action_type="train")
img_arrays = np.concatenate((sample_arrays, shoeprint_arrays))
indices = get_indices(sample_map, shoeprint_map, type_map)
data_set["img_arrays"] = img_arrays
data_set["indices"] = indices
h5f = h5py.File(H5_PATH, 'w')
h5f["img_arrays"] = data_set["img_arrays"]
h5f.close()
with open(JSON_PATH, 'w', encoding="utf8") as f:
json.dump(data_set["indices"], f, indent=2)
else:
print("发现处理好的数据文件,正在读取...")
h5f = h5py.File(H5_PATH, 'r')
data_set["img_arrays"] = h5f["img_arrays"][: ]
h5f.close()
with open(JSON_PATH, 'r', encoding="utf8") as f:
data_set["indices"] = json.load(f)
print("成功加载数据")
return data_set
| 0 | 0 | 0 |
a2e702854416df3867f7115a181a4fd224ad1bcc | 6,444 | py | Python | src/utils.py | annihilatorrrr/opentele | ff90c36a867cf7902e80f480a35041c5e7902e4c | [
"MIT"
] | 30 | 2022-01-17T20:46:02.000Z | 2022-03-31T18:49:07.000Z | src/utils.py | studasd/opentele | ff90c36a867cf7902e80f480a35041c5e7902e4c | [
"MIT"
] | 4 | 2022-02-13T10:21:12.000Z | 2022-03-28T16:05:51.000Z | src/utils.py | studasd/opentele | ff90c36a867cf7902e80f480a35041c5e7902e4c | [
"MIT"
] | 9 | 2022-01-24T18:02:08.000Z | 2022-03-24T14:23:16.000Z | from __future__ import annotations
from . import debug
from typing import Coroutine, Tuple, Type, Callable, TypeVar, Optional, List, Any, Dict
from types import FunctionType
import abc
APP_VERSION = 3004000
TDF_MAGIC = b"TDF$"
_T = TypeVar("_T")
_TCLS = TypeVar("_TCLS", bound=type)
_RT = TypeVar("_RT")
_F = TypeVar("_F", bound=Callable[..., Any])
class override(object): # nocov
"""
To use inside a class decorated with @extend_class\n
Any attributes decorated with @override will be replaced
"""
@staticmethod
class extend_class(object): # nocov
"""
Extend a class, all attributes will be added to its parents\n
This won't override attributes that are already existed, please refer to @override or @extend_override_class to do this
"""
@staticmethod
@staticmethod
class extend_override_class(extend_class):
"""
Extend a class, all attributes will be added to its parents\n
If those attributes are already existed, they will be replaced by the new one
"""
| 30.980769 | 123 | 0.575109 | from __future__ import annotations
from . import debug
from typing import Coroutine, Tuple, Type, Callable, TypeVar, Optional, List, Any, Dict
from types import FunctionType
import abc
APP_VERSION = 3004000
TDF_MAGIC = b"TDF$"
_T = TypeVar("_T")
_TCLS = TypeVar("_TCLS", bound=type)
_RT = TypeVar("_RT")
_F = TypeVar("_F", bound=Callable[..., Any])
class BaseMetaClass(abc.ABCMeta): # pragma: no cover
def __new__(
cls: Type[_T], clsName: str, bases: Tuple[type], attrs: Dict[str, Any]
) -> _T:
# Hook all subclass methods
if debug.IS_DEBUG_MODE: # pragma: no cover
ignore_list = [
"__new__",
"__del__",
"__get__",
"__call__",
"__set_name__",
"__str__",
"__repr__",
]
for attr, val in attrs.items():
if (
not attr in ignore_list
and callable(val)
and not isinstance(val, type)
):
newVal = debug.DebugMethod(val)
attrs[attr] = newVal
result = super().__new__(cls, clsName, bases, attrs)
return result
class BaseObject(object, metaclass=BaseMetaClass):
pass
class override(object): # nocov
"""
To use inside a class decorated with @extend_class\n
Any attributes decorated with @override will be replaced
"""
def __new__(cls, decorated_func: _F) -> _F:
# check if decorated_cls really is a function
if not isinstance(decorated_func, FunctionType):
raise BaseException(
"@override decorator is only for functions, not classes"
)
decorated_func.__isOverride__ = True # type: ignore
return decorated_func # type: ignore
@staticmethod
def isOverride(func: _F) -> bool:
if not hasattr(func, "__isOverride__"):
return False
return func.__isOverride__
class extend_class(object): # nocov
"""
Extend a class, all attributes will be added to its parents\n
This won't override attributes that are already existed, please refer to @override or @extend_override_class to do this
"""
def __new__(cls, decorated_cls: _TCLS, isOverride: bool = False) -> _TCLS:
# check if decorated_cls really is a class (type)
if not isinstance(cls, type):
raise BaseException(
"@extend_class decorator is only for classes, not functions"
)
newAttributes = dict(decorated_cls.__dict__)
crossDelete = ["__abstractmethods__", "__module__", "_abc_impl", "__doc__"]
[
(newAttributes.pop(cross) if cross in newAttributes else None)
for cross in crossDelete
]
crossDelete = {}
base = decorated_cls.__bases__[0]
if not isOverride:
# loop through its parents and add attributes
for attributeName, attributeValue in newAttributes.items():
# check if class base already has this attribute
result = extend_class.getattr(base, attributeName)
if result != None:
if id(result["value"]) == id(attributeValue):
crossDelete[attributeName] = attributeValue
else:
# if not override this attribute
if not override.isOverride(attributeValue):
print(
f"[{attributeName}] {id(result['value'])} - {id(attributeValue)}"
)
raise BaseException("err")
[newAttributes.pop(cross) for cross in crossDelete]
for attributeName, attributeValue in newAttributes.items():
# let's backup this attribute for future uses
result = extend_class.getattr(base, attributeName)
if result != None:
# ! dirty code, gonna fix it later, it's okay for now
setattr(
base,
f"__{decorated_cls.__name__}__{attributeName}",
result["value"],
)
setattr(
decorated_cls,
f"__{decorated_cls.__name__}__{attributeName}",
result["value"],
)
setattr(base, attributeName, attributeValue)
return decorated_cls
@staticmethod
def object_hierarchy_getattr(obj: object, attributeName: str) -> List[str]:
results = []
if type(obj) == object:
return results
if attributeName in obj.__dict__:
val = obj.__dict__[attributeName]
results.append({"owner": obj, "value": val})
if attributeName in obj.__class__.__dict__:
val = obj.__class__.__dict__[attributeName]
results.append({"owner": obj, "value": val})
for base in obj.__bases__: # type: ignore
results += extend_class.object_hierarchy_getattr(base, attributeName)
results.reverse()
return results
@staticmethod
def getattr(obj: object, attributeName: str) -> Optional[dict]:
try:
value = getattr(obj, attributeName)
return {"owner": obj, "value": value}
except BaseException as e:
return None
class extend_override_class(extend_class):
"""
Extend a class, all attributes will be added to its parents\n
If those attributes are already existed, they will be replaced by the new one
"""
def __new__(cls, decorated_cls: _TCLS) -> _TCLS:
return super().__new__(cls, decorated_cls, True)
class sharemethod(type):
def __get__(self, obj, cls):
self.__owner__ = obj if obj else cls
return self
def __call__(self, *args) -> Any:
return self.__fget__.__get__(self.__owner__)(*args) # type: ignore
def __set_name__(self, owner, name):
self.__owner__ = owner
def __new__(cls: Type[_T], func: _F) -> Type[_F]:
clsName = func.__class__.__name__
bases = func.__class__.__bases__
attrs = func.__dict__
# attrs = dict(func.__class__.__dict__)
result = super().__new__(cls, clsName, bases, attrs)
result.__fget__ = func
return result
| 4,973 | 73 | 361 |
d06f09440a6a5fcdcc7fe798643dc77e8de50601 | 5,021 | py | Python | qa/tasks/cram.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
] | 4 | 2020-04-08T03:42:02.000Z | 2020-10-01T20:34:48.000Z | qa/tasks/cram.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
] | 93 | 2020-03-26T14:29:14.000Z | 2020-11-12T05:54:55.000Z | qa/tasks/cram.py | rpratap-bot/ceph | 9834961a66927ae856935591f2fd51082e2ee484 | [
"MIT"
] | 23 | 2020-03-24T10:28:44.000Z | 2020-09-24T09:42:19.000Z | """
Cram tests
"""
import logging
import os
import six
from tasks.util.workunit import get_refspec_after_overrides
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.orchestra import run
from teuthology.config import config as teuth_config
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Run all cram tests from the specified paths on the specified
clients. Each client runs tests in parallel.
Limitations:
Tests must have a .t suffix. Tests with duplicate names will
overwrite each other, so only the last one will run.
For example::
tasks:
- ceph:
- cram:
clients:
client.0:
- qa/test.t
- qa/test2.t]
client.1: [qa/test.t]
branch: foo
You can also run a list of cram tests on all clients::
tasks:
- ceph:
- cram:
clients:
all: [qa/test.t]
:param ctx: Context
:param config: Configuration
"""
assert isinstance(config, dict)
assert 'clients' in config and isinstance(config['clients'], dict), \
'configuration must contain a dictionary of clients'
clients = teuthology.replace_all_with_clients(ctx.cluster,
config['clients'])
testdir = teuthology.get_testdir(ctx)
overrides = ctx.config.get('overrides', {})
refspec = get_refspec_after_overrides(config, overrides)
git_url = teuth_config.get_ceph_qa_suite_git_url()
log.info('Pulling tests from %s ref %s', git_url, refspec)
try:
for client, tests in clients.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
remote.run(
args=[
'mkdir', '--', client_dir,
run.Raw('&&'),
'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
run.Raw('&&'),
'{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
'install', 'cram==0.6',
],
)
clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
remote.run(args=refspec.clone(git_url, clone_dir))
for test in tests:
assert test.endswith('.t'), 'tests must end in .t'
remote.run(
args=[
'cp', '--', os.path.join(clone_dir, test), client_dir,
],
)
with parallel() as p:
for role in clients.keys():
p.spawn(_run_tests, ctx, role)
finally:
for client, tests in clients.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
test_files = set([test.rsplit('/', 1)[1] for test in tests])
# remove test files unless they failed
for test_file in test_files:
abs_file = os.path.join(client_dir, test_file)
remote.run(
args=[
'test', '-f', abs_file + '.err',
run.Raw('||'),
'rm', '-f', '--', abs_file,
],
)
# ignore failure since more than one client may
# be run on a host, and the client dir should be
# non-empty if the test failed
remote.run(
args=[
'rm', '-rf', '--',
'{tdir}/virtualenv'.format(tdir=testdir),
clone_dir,
run.Raw(';'),
'rmdir', '--ignore-fail-on-non-empty', client_dir,
],
)
def _run_tests(ctx, role):
"""
For each role, check to make sure it's a client, then run the cram on that client
:param ctx: Context
:param role: Roles
"""
assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.keys()
ceph_ref = ctx.summary.get('ceph-sha1', 'master')
testdir = teuthology.get_testdir(ctx)
log.info('Running tests for %s...', role)
remote.run(
args=[
run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
run.Raw('CEPH_ID="{id}"'.format(id=id_)),
run.Raw('PATH=$PATH:/usr/sbin'),
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{tdir}/virtualenv/bin/cram'.format(tdir=testdir),
'-v', '--',
run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)),
],
logger=log.getChild(role),
)
| 33.032895 | 87 | 0.5238 | """
Cram tests
"""
import logging
import os
import six
from tasks.util.workunit import get_refspec_after_overrides
from teuthology import misc as teuthology
from teuthology.parallel import parallel
from teuthology.orchestra import run
from teuthology.config import config as teuth_config
log = logging.getLogger(__name__)
def task(ctx, config):
"""
Run all cram tests from the specified paths on the specified
clients. Each client runs tests in parallel.
Limitations:
Tests must have a .t suffix. Tests with duplicate names will
overwrite each other, so only the last one will run.
For example::
tasks:
- ceph:
- cram:
clients:
client.0:
- qa/test.t
- qa/test2.t]
client.1: [qa/test.t]
branch: foo
You can also run a list of cram tests on all clients::
tasks:
- ceph:
- cram:
clients:
all: [qa/test.t]
:param ctx: Context
:param config: Configuration
"""
assert isinstance(config, dict)
assert 'clients' in config and isinstance(config['clients'], dict), \
'configuration must contain a dictionary of clients'
clients = teuthology.replace_all_with_clients(ctx.cluster,
config['clients'])
testdir = teuthology.get_testdir(ctx)
overrides = ctx.config.get('overrides', {})
refspec = get_refspec_after_overrides(config, overrides)
git_url = teuth_config.get_ceph_qa_suite_git_url()
log.info('Pulling tests from %s ref %s', git_url, refspec)
try:
for client, tests in clients.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
remote.run(
args=[
'mkdir', '--', client_dir,
run.Raw('&&'),
'virtualenv', '{tdir}/virtualenv'.format(tdir=testdir),
run.Raw('&&'),
'{tdir}/virtualenv/bin/pip'.format(tdir=testdir),
'install', 'cram==0.6',
],
)
clone_dir = '{tdir}/clone.{role}'.format(tdir=testdir, role=client)
remote.run(args=refspec.clone(git_url, clone_dir))
for test in tests:
assert test.endswith('.t'), 'tests must end in .t'
remote.run(
args=[
'cp', '--', os.path.join(clone_dir, test), client_dir,
],
)
with parallel() as p:
for role in clients.keys():
p.spawn(_run_tests, ctx, role)
finally:
for client, tests in clients.items():
(remote,) = ctx.cluster.only(client).remotes.keys()
client_dir = '{tdir}/archive/cram.{role}'.format(tdir=testdir, role=client)
test_files = set([test.rsplit('/', 1)[1] for test in tests])
# remove test files unless they failed
for test_file in test_files:
abs_file = os.path.join(client_dir, test_file)
remote.run(
args=[
'test', '-f', abs_file + '.err',
run.Raw('||'),
'rm', '-f', '--', abs_file,
],
)
# ignore failure since more than one client may
# be run on a host, and the client dir should be
# non-empty if the test failed
remote.run(
args=[
'rm', '-rf', '--',
'{tdir}/virtualenv'.format(tdir=testdir),
clone_dir,
run.Raw(';'),
'rmdir', '--ignore-fail-on-non-empty', client_dir,
],
)
def _run_tests(ctx, role):
"""
For each role, check to make sure it's a client, then run the cram on that client
:param ctx: Context
:param role: Roles
"""
assert isinstance(role, six.string_types)
PREFIX = 'client.'
assert role.startswith(PREFIX)
id_ = role[len(PREFIX):]
(remote,) = ctx.cluster.only(role).remotes.keys()
ceph_ref = ctx.summary.get('ceph-sha1', 'master')
testdir = teuthology.get_testdir(ctx)
log.info('Running tests for %s...', role)
remote.run(
args=[
run.Raw('CEPH_REF={ref}'.format(ref=ceph_ref)),
run.Raw('CEPH_ID="{id}"'.format(id=id_)),
run.Raw('PATH=$PATH:/usr/sbin'),
'adjust-ulimits',
'ceph-coverage',
'{tdir}/archive/coverage'.format(tdir=testdir),
'{tdir}/virtualenv/bin/cram'.format(tdir=testdir),
'-v', '--',
run.Raw('{tdir}/archive/cram.{role}/*.t'.format(tdir=testdir, role=role)),
],
logger=log.getChild(role),
)
| 0 | 0 | 0 |
aa18e439d183a51bf3e6d04d8f0c183825d9ae89 | 4,211 | py | Python | codes/unique1_pairs_review.py | gordeli/JCR_LIARspaper | cd2eee016e0dfe36d9c947be9c1523cfa1badac2 | [
"MIT"
] | null | null | null | codes/unique1_pairs_review.py | gordeli/JCR_LIARspaper | cd2eee016e0dfe36d9c947be9c1523cfa1badac2 | [
"MIT"
] | null | null | null | codes/unique1_pairs_review.py | gordeli/JCR_LIARspaper | cd2eee016e0dfe36d9c947be9c1523cfa1badac2 | [
"MIT"
] | null | null | null | # This code calculates the number of unique words in each review:
# from pathos.multiprocessing import ProcessingPool as Pool
import multiprocessing
from multiprocessing.pool import Pool
from tqdm import tqdm
import time
# import timing
import re # import the library for regular expressions
from nltk.tokenize import sent_tokenize, word_tokenize
import sqlite3
import shutil
def get_rarepairs():
""" This code finds the list of rare word pairs
"""
connn = sqlite3.connect('Liars7_unique1.sqlite')
# Get the cursor, which is used to traverse the database, line by line
currr = conn.cursor()
sqlstr = 'SELECT Word_pair, TF FROM [Review word pairs]'
wordpairs_rare = []
for row in currr.execute(sqlstr):
wordpair = row[0]
TF = row[1]
if TF < 2: # This number is highly corpus specific (27 for Liars, 28 for Mott?), 6.3 for Liars Pilot, 20.2 for Liars 7 (reviews only), 2.78 for pairs Liars 7, 2.98 for Ott, 3.90 for yelpCHIhotels, 3.49 for kaggle21k
wordpairs_rare.append(wordpair)
return wordpairs_rare
def process_row(arg):
"""
this function receives a single row of a table
and returns a pair (id, depth) for a given row
"""
wordpairs_rare, row = arg
reviewtext = row[0]
wordpairs = dict() # Initializes an empty dictionary where we will keep track of all wordforms in the whole corpus of reviews and how many times their occurence values
sentences = sent_tokenize(reviewtext)
for s in sentences:
words = word_tokenize(s)
for i in range(len(words) - 2 + 1):
key = tuple(words[i:i+2])
if ',' in key or '.' in key or ':' in key or '!' in key or '?' in key or ';' in key:
continue
else:
wordpairs[key] = wordpairs.get(key, 0) + 1
unique_pairs = 0
for wp in wordpairs:
wp_str = ' '.join(wp)
if wp_str in wordpairs_rare:
unique_pairs = unique_pairs + wordpairs[wp]
return (unique_pairs, reviewtext)
def record_answers(curr, answers):
"""
this function receives cursor to sql (cur) and list of answers List[(id, depth)]
and records answers to the sql
for now, this is single process code
"""
for answer in answers:
unique_pairs, reviewtext = answer
curr.execute('UPDATE Reviews SET Uniqpairs1 = ? WHERE Review_cleaned = ?', (unique_pairs,reviewtext, ))
if __name__ == '__main__':
conn = sqlite3.connect('Liars7_unique1.sqlite')
# Get the cursor, which is used to traverse the database, line by line
cur = conn.cursor()
shutil.copyfile('Liars7_unique1.sqlite', 'Am_kg_w.sqlite')
conn_w = sqlite3.connect('Am_kg_w.sqlite') # The database to be updated
cur_w = conn_w.cursor()
try:
cur_w.execute('''ALTER TABLE Reviews ADD Uniqpairs1 INTEGER NOT NULL DEFAULT 0''') # DEFAULT 0 was removed from the sql string
except:
print("The column 'Uniqpairs1' exists already")
pass # handle the error
wordpairs_rare = get_rarepairs()
sqlstr = 'SELECT Review_cleaned FROM Reviews' # Select query that instructs over what we will be iterating
args = [(wordpairs_rare, row) for row in cur.execute(sqlstr)] # read rows from sql
print("start computing..")
t0 = time.time()
n_processes = multiprocessing.cpu_count()
if n_processes == 1:
print("single process")
answers = [process_row(arg) for arg in args] # single process each row in rows
else:
print(f"pool process with {n_processes} threads")
# we call initializer function = set_wordnet so that each worker receives separate wn object
with Pool(processes=n_processes) as pool:
answers = list(tqdm(pool.imap(process_row, args), total = len(args)))
print(f"finished computing in {time.time() - t0} seconds...")
t0 = time.time()
print("start recording...")
record_answers(cur_w, answers) # recording answers
print(f"finished recording in {time.time() - t0} seconds")
conn_w.commit()
cur_w.close()
conn_w.close()
cur.close()
conn.close()
shutil.copyfile('Am_kg_w.sqlite', 'Liars7_uniquepairs1.sqlite')
| 35.686441 | 223 | 0.666588 | # This code calculates the number of unique words in each review:
# from pathos.multiprocessing import ProcessingPool as Pool
import multiprocessing
from multiprocessing.pool import Pool
from tqdm import tqdm
import time
# import timing
import re # import the library for regular expressions
from nltk.tokenize import sent_tokenize, word_tokenize
import sqlite3
import shutil
def get_rarepairs():
""" This code finds the list of rare word pairs
"""
connn = sqlite3.connect('Liars7_unique1.sqlite')
# Get the cursor, which is used to traverse the database, line by line
currr = conn.cursor()
sqlstr = 'SELECT Word_pair, TF FROM [Review word pairs]'
wordpairs_rare = []
for row in currr.execute(sqlstr):
wordpair = row[0]
TF = row[1]
if TF < 2: # This number is highly corpus specific (27 for Liars, 28 for Mott?), 6.3 for Liars Pilot, 20.2 for Liars 7 (reviews only), 2.78 for pairs Liars 7, 2.98 for Ott, 3.90 for yelpCHIhotels, 3.49 for kaggle21k
wordpairs_rare.append(wordpair)
return wordpairs_rare
def process_row(arg):
"""
this function receives a single row of a table
and returns a pair (id, depth) for a given row
"""
wordpairs_rare, row = arg
reviewtext = row[0]
wordpairs = dict() # Initializes an empty dictionary where we will keep track of all wordforms in the whole corpus of reviews and how many times their occurence values
sentences = sent_tokenize(reviewtext)
for s in sentences:
words = word_tokenize(s)
for i in range(len(words) - 2 + 1):
key = tuple(words[i:i+2])
if ',' in key or '.' in key or ':' in key or '!' in key or '?' in key or ';' in key:
continue
else:
wordpairs[key] = wordpairs.get(key, 0) + 1
unique_pairs = 0
for wp in wordpairs:
wp_str = ' '.join(wp)
if wp_str in wordpairs_rare:
unique_pairs = unique_pairs + wordpairs[wp]
return (unique_pairs, reviewtext)
def record_answers(curr, answers):
"""
this function receives cursor to sql (cur) and list of answers List[(id, depth)]
and records answers to the sql
for now, this is single process code
"""
for answer in answers:
unique_pairs, reviewtext = answer
curr.execute('UPDATE Reviews SET Uniqpairs1 = ? WHERE Review_cleaned = ?', (unique_pairs,reviewtext, ))
if __name__ == '__main__':
conn = sqlite3.connect('Liars7_unique1.sqlite')
# Get the cursor, which is used to traverse the database, line by line
cur = conn.cursor()
shutil.copyfile('Liars7_unique1.sqlite', 'Am_kg_w.sqlite')
conn_w = sqlite3.connect('Am_kg_w.sqlite') # The database to be updated
cur_w = conn_w.cursor()
try:
cur_w.execute('''ALTER TABLE Reviews ADD Uniqpairs1 INTEGER NOT NULL DEFAULT 0''') # DEFAULT 0 was removed from the sql string
except:
print("The column 'Uniqpairs1' exists already")
pass # handle the error
wordpairs_rare = get_rarepairs()
sqlstr = 'SELECT Review_cleaned FROM Reviews' # Select query that instructs over what we will be iterating
args = [(wordpairs_rare, row) for row in cur.execute(sqlstr)] # read rows from sql
print("start computing..")
t0 = time.time()
n_processes = multiprocessing.cpu_count()
if n_processes == 1:
print("single process")
answers = [process_row(arg) for arg in args] # single process each row in rows
else:
print(f"pool process with {n_processes} threads")
# we call initializer function = set_wordnet so that each worker receives separate wn object
with Pool(processes=n_processes) as pool:
answers = list(tqdm(pool.imap(process_row, args), total = len(args)))
print(f"finished computing in {time.time() - t0} seconds...")
t0 = time.time()
print("start recording...")
record_answers(cur_w, answers) # recording answers
print(f"finished recording in {time.time() - t0} seconds")
conn_w.commit()
cur_w.close()
conn_w.close()
cur.close()
conn.close()
shutil.copyfile('Am_kg_w.sqlite', 'Liars7_uniquepairs1.sqlite')
| 0 | 0 | 0 |
8cdb7958b0bb8caf4566be41008f5ddad98b4203 | 4,450 | py | Python | apps/flaskexample/views.py | joelmpiper/bill_taxonomy | 9284dfae905ca8efa558b4fd93469d03cf4b8074 | [
"MIT"
] | null | null | null | apps/flaskexample/views.py | joelmpiper/bill_taxonomy | 9284dfae905ca8efa558b4fd93469d03cf4b8074 | [
"MIT"
] | null | null | null | apps/flaskexample/views.py | joelmpiper/bill_taxonomy | 9284dfae905ca8efa558b4fd93469d03cf4b8074 | [
"MIT"
] | null | null | null | from flask import render_template
from flask import request
from flaskexample import app
from sqlalchemy import create_engine
import pandas as pd
import psycopg2
import yaml
from a_Model import ModelIt
from flaskexample.support_functions import formatted_query
ymlfile = open("../configs.yml", 'r')
cfg = yaml.load(ymlfile)
ymlfile.close()
dbname = cfg['dbname']
user = cfg['username']
host = 'localhost'
db = create_engine('postgres://%s%s/%s' % (user, host, dbname))
con = None
con = psycopg2.connect(database=dbname, user=user)
@app.route('/')
@app.route('/index')
@app.route('/db')
@app.route('/ny_bills')
@app.route('/ny_bill_input')
@app.route('/us_bill_input')
@app.route('/ny_bills_output')
@app.route('/us_bills_output')
@app.route('/lda_topics')
| 29.865772 | 72 | 0.63618 | from flask import render_template
from flask import request
from flaskexample import app
from sqlalchemy import create_engine
import pandas as pd
import psycopg2
import yaml
from a_Model import ModelIt
from flaskexample.support_functions import formatted_query
ymlfile = open("../configs.yml", 'r')
cfg = yaml.load(ymlfile)
ymlfile.close()
dbname = cfg['dbname']
user = cfg['username']
host = 'localhost'
db = create_engine('postgres://%s%s/%s' % (user, host, dbname))
con = None
con = psycopg2.connect(database=dbname, user=user)
@app.route('/')
@app.route('/index')
def index():
return render_template("ny_bill_input.html", title='Home',
user={'nickname': 'Joel'},)
@app.route('/db')
def birth_page():
sql_query = """
SELECT nyb.bill_num, nyb.bill_name, ts.score
FROM table_score as ts
INNER JOIN ny_bills as nyb
ON nyb.bill_num=ts.bill_num
WHERE ts.subject='Health'
ORDER BY ts.score DESC
LIMIT 100;
"""
query_results = pd.read_sql_query(sql_query, con)
bills = ""
print(query_results[:100])
for i in range(0, 100):
bills += "New York Bill "
bills += query_results.iloc[i]['bill_num']
bills += ': '
bills += query_results.iloc[i]['bill_name']
bills += "<br>"
return bills
@app.route('/ny_bills')
def ny_bills_fancy():
sql_query = """
SELECT nyb.bill_num, nyb.bill_name, ts.score
FROM table_score as ts
INNER JOIN ny_bills as nyb
ON nyb.bill_num=ts.bill_num
WHERE ts.subject='Health'
ORDER BY ts.score DESC
LIMIT 100;
"""
query_results = pd.read_sql_query(sql_query, con)
bills = []
for i in range(0, query_results.shape[0]):
bills.append(dict(bill_num=query_results.iloc[i]['bill_num'],
bill_name=query_results.iloc[i]['bill_name'],
score=query_results.iloc[i]['score']))
return render_template('ny_bills.html', bills=bills)
@app.route('/ny_bill_input')
def ny_bills_input():
return render_template("ny_bill_input.html")
@app.route('/us_bill_input')
def us_bills_input():
return render_template("us_bill_input.html")
@app.route('/ny_bills_output')
def ny_bills_output():
# pull 'subject' from input field and store it
subject = request.args.get('subject')
# just select the bills for which
# have the subject that the user inputs
q_str = """
SELECT nyb.bill_num, nyb.bill_name, ts.logistic
FROM ny_score as ts
INNER JOIN ny_bills as nyb
ON nyb.bill_num=ts.bill_num
WHERE ts.subject={0}
AND ts.logistic IS NOT NULL
ORDER BY ts.logistic DESC
LIMIT 100;
"""
q_fill = q_str.format(subject)
query_results = pd.read_sql_query(q_fill, con)
bills = formatted_query(query_results, 'logistic')
# bills = []
# for i in range(0, query_results.shape[0]):
# bills.append(dict(bill_num=query_results.iloc[i]['bill_num'],
# bill_name=query_results.iloc[i]['bill_name'],
# score=query_results.iloc[i]['logistic']))
the_result = ModelIt(subject, bills)
return render_template("ny_bills_output.html",
bills=bills, the_result=the_result)
@app.route('/us_bills_output')
def us_bills_output():
# pull 'subject' from input field and store it
subject = request.args.get('subject')
# just select the bills for which
# have the subject that the user inputs
q_str = """
SELECT usb.bill_num, usb.bill_name, ts.logistic
FROM us_score as ts
INNER JOIN us_bills as usb
ON usb.bill_num=ts.bill_num
WHERE ts.subject={0}
AND ts.logistic IS NOT NULL
ORDER BY ts.logistic DESC
LIMIT 100;
"""
q_fill = q_str.format(subject)
query_results = pd.read_sql_query(q_fill, con)
bills = []
for i in range(0, query_results.shape[0]):
bills.append(dict(bill_num=query_results.iloc[i]['bill_num'],
bill_name=query_results.iloc[i]['bill_name'],
score=query_results.iloc[i]['logistic']))
the_result = ModelIt(subject, bills)
return render_template("us_bills_output.html",
bills=bills, the_result=the_result)
@app.route('/lda_topics')
def lda_topics():
return render_template("lda_vis.html")
| 3,500 | 0 | 176 |
919c91b562b34a6a6b8fa222823d83beb6c23d2d | 71 | py | Python | bempy/django/blocks/cssreset/__init__.py | svetlyak40wt/bempy | ad87982d17c2d14c344d9e3d91a48c37dfb72535 | [
"BSD-3-Clause"
] | 1 | 2015-04-29T15:19:45.000Z | 2015-04-29T15:19:45.000Z | bempy/django/blocks/cssreset/__init__.py | svetlyak40wt/bempy | ad87982d17c2d14c344d9e3d91a48c37dfb72535 | [
"BSD-3-Clause"
] | null | null | null | bempy/django/blocks/cssreset/__init__.py | svetlyak40wt/bempy | ad87982d17c2d14c344d9e3d91a48c37dfb72535 | [
"BSD-3-Clause"
] | 1 | 2019-06-10T16:08:54.000Z | 2019-06-10T16:08:54.000Z | from bempy import context_blocks
context_blocks('cssreset', locals())
| 17.75 | 36 | 0.802817 | from bempy import context_blocks
context_blocks('cssreset', locals())
| 0 | 0 | 0 |
c5098c62114ee7e651fe7e22098a41baf587b44f | 2,457 | py | Python | tests/functional/tabloid/test_dbp_2146_distinct_not_in.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2022-02-05T11:37:13.000Z | 2022-02-05T11:37:13.000Z | tests/functional/tabloid/test_dbp_2146_distinct_not_in.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-09-03T11:47:00.000Z | 2021-09-03T12:42:10.000Z | tests/functional/tabloid/test_dbp_2146_distinct_not_in.py | FirebirdSQL/firebird-qa | 96af2def7f905a06f178e2a80a2c8be4a4b44782 | [
"MIT"
] | 1 | 2021-06-30T14:14:16.000Z | 2021-06-30T14:14:16.000Z | #coding:utf-8
#
# id: functional.tabloid.dbp_2146_distinct_not_in
# title: Common SQL. Check correctness of the results
# decription:
# tracker_id:
# min_versions: ['2.5']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = [('=.*', '')]
init_script_1 = """"""
db_1 = db_factory(from_backup='tabloid-dbp-2146.fbk', init=init_script_1)
test_script_1 = """
set list on;
with
eset
as(
select tbi, count(distinct ari) as cnt
from pdata u
where (
select count(distinct ari)
from pdata where tbi=u.tbi
) > 2
group by tbi having sum(cv)=16*16-1
)
,wset
as(
select ari
from pdata
where tbi in (
select tbi from pdata group by tbi
having sum(cv)=16*16-1
)
group by ari having sum(cv)=1000-235
)
,q1 as(
select distinct pa.id ari, pt.id tbi, p.cnt
from pdata u
join eset p on p.tbi=u.tbi
join parea pa on pa.id=u.ari
join ptube pt on pt.id=u.tbi
join wset b on b.ari=u.ari
)
,q2 as (
select
a.ari
,a.tbi
,b.cnt
from
(
select distinct a.ari, b.tbi
from
(
select ari
from pdata
where tbi not in (
select tbi
from pdata
group by tbi
having sum(cv) <> 16*16-1
)
group by ari
having 1000 - sum(cv) = 235
) a
, pdata b
where a.ari = b.ari
) a,
(
select tbi, count(distinct ari) cnt
from pdata group by tbi
having count(distinct ari) > 2
) b
where a.tbi = b.tbi
)
select ari,tbi,cnt
from q1 natural join q2
order by 1,2,3
;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ARI 6
TBI 10
CNT 3
"""
@pytest.mark.version('>=2.5')
| 23.4 | 73 | 0.49654 | #coding:utf-8
#
# id: functional.tabloid.dbp_2146_distinct_not_in
# title: Common SQL. Check correctness of the results
# decription:
# tracker_id:
# min_versions: ['2.5']
# versions: 2.5
# qmid: None
import pytest
from firebird.qa import db_factory, isql_act, Action
# version: 2.5
# resources: None
substitutions_1 = [('=.*', '')]
init_script_1 = """"""
db_1 = db_factory(from_backup='tabloid-dbp-2146.fbk', init=init_script_1)
test_script_1 = """
set list on;
with
eset
as(
select tbi, count(distinct ari) as cnt
from pdata u
where (
select count(distinct ari)
from pdata where tbi=u.tbi
) > 2
group by tbi having sum(cv)=16*16-1
)
,wset
as(
select ari
from pdata
where tbi in (
select tbi from pdata group by tbi
having sum(cv)=16*16-1
)
group by ari having sum(cv)=1000-235
)
,q1 as(
select distinct pa.id ari, pt.id tbi, p.cnt
from pdata u
join eset p on p.tbi=u.tbi
join parea pa on pa.id=u.ari
join ptube pt on pt.id=u.tbi
join wset b on b.ari=u.ari
)
,q2 as (
select
a.ari
,a.tbi
,b.cnt
from
(
select distinct a.ari, b.tbi
from
(
select ari
from pdata
where tbi not in (
select tbi
from pdata
group by tbi
having sum(cv) <> 16*16-1
)
group by ari
having 1000 - sum(cv) = 235
) a
, pdata b
where a.ari = b.ari
) a,
(
select tbi, count(distinct ari) cnt
from pdata group by tbi
having count(distinct ari) > 2
) b
where a.tbi = b.tbi
)
select ari,tbi,cnt
from q1 natural join q2
order by 1,2,3
;
"""
act_1 = isql_act('db_1', test_script_1, substitutions=substitutions_1)
expected_stdout_1 = """
ARI 6
TBI 10
CNT 3
"""
@pytest.mark.version('>=2.5')
def test_1(act_1: Action):
act_1.expected_stdout = expected_stdout_1
act_1.execute()
assert act_1.clean_stdout == act_1.clean_expected_stdout
| 132 | 0 | 22 |
c5cc06f128837cada07ecbefad07ffa87dff0290 | 240 | py | Python | exemplos/connect.py | Fr4ncisTaylor/HubSale-api | 30a2c0900b2c37503e9ba1df7b93381552eef746 | [
"Apache-2.0"
] | 1 | 2021-02-26T16:25:26.000Z | 2021-02-26T16:25:26.000Z | exemplos/connect.py | Fr4ncisTaylor/hubsaleapi | 30a2c0900b2c37503e9ba1df7b93381552eef746 | [
"Apache-2.0"
] | null | null | null | exemplos/connect.py | Fr4ncisTaylor/hubsaleapi | 30a2c0900b2c37503e9ba1df7b93381552eef746 | [
"Apache-2.0"
] | null | null | null | from HubSale import HubSale
from pprint import pprint
client_id = ""
client_secret = ""
hubsale = HubSale(client_id, client_secret)
code, message = hubsale.connect()
if code:
print("Success!!!")
else:
print('Oops,', message) | 18.461538 | 49 | 0.695833 | from HubSale import HubSale
from pprint import pprint
client_id = ""
client_secret = ""
hubsale = HubSale(client_id, client_secret)
code, message = hubsale.connect()
if code:
print("Success!!!")
else:
print('Oops,', message) | 0 | 0 | 0 |
39199a0dac7cb4544d8a1d3056e6f89535893cd2 | 1,816 | py | Python | p22.py | nymoral/euler | 9dcc1bb6c733e3164e06e97f2363993fb932f5fc | [
"MIT"
] | null | null | null | p22.py | nymoral/euler | 9dcc1bb6c733e3164e06e97f2363993fb932f5fc | [
"MIT"
] | null | null | null | p22.py | nymoral/euler | 9dcc1bb6c733e3164e06e97f2363993fb932f5fc | [
"MIT"
] | null | null | null | """"""
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names,
begin by sorting it into alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a name score.
For example, when the list is sorted into alphabetical order,
COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list.
So, COLIN would obtain a score of 938 × 53 = 49714.
What is the total of all the name scores in the file?
"""
def total_score(filename):
with open(filename, mode='r') as f:
names = [n[1:-1] for n in f.readline().split(',')]
names.sort()
return sum((1 + i) * (sum(ord(letter) - ord('A') + 1 for letter in name)) for i, name in enumerate(names))
if __name__ == "__main__":
filename = "22.txt"
print(total_score(filename))
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names,
begin by sorting it into alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a name score.
For example, when the list is sorted into alphabetical order,
COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list.
So, COLIN would obtain a score of 938 × 53 = 49714.
What is the total of all the name scores in the file?
"""
def total_score(filename):
with open(filename, mode='r') as f:
names = [n[1:-1] for n in f.readline().split(',')]
names.sort()
return sum((1 + i) * (sum(ord(letter) - ord('A') + 1 for letter in name)) for i, name in enumerate(names))
if __name__ == "__main__":
filename = "22.txt"
print(total_score(filename))
| 46.564103 | 118 | 0.686123 | """"""
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names,
begin by sorting it into alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a name score.
For example, when the list is sorted into alphabetical order,
COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list.
So, COLIN would obtain a score of 938 × 53 = 49714.
What is the total of all the name scores in the file?
"""
def total_score(filename):
with open(filename, mode='r') as f:
names = [n[1:-1] for n in f.readline().split(',')]
names.sort()
return sum((1 + i) * (sum(ord(letter) - ord('A') + 1 for letter in name)) for i, name in enumerate(names))
if __name__ == "__main__":
filename = "22.txt"
print(total_score(filename))
Using names.txt (right click and 'Save Link/Target As...'), a 46K text file containing over five-thousand first names,
begin by sorting it into alphabetical order. Then working out the alphabetical value for each name,
multiply this value by its alphabetical position in the list to obtain a name score.
For example, when the list is sorted into alphabetical order,
COLIN, which is worth 3 + 15 + 12 + 9 + 14 = 53, is the 938th name in the list.
So, COLIN would obtain a score of 938 × 53 = 49714.
What is the total of all the name scores in the file?
"""
def total_score(filename):
with open(filename, mode='r') as f:
names = [n[1:-1] for n in f.readline().split(',')]
names.sort()
return sum((1 + i) * (sum(ord(letter) - ord('A') + 1 for letter in name)) for i, name in enumerate(names))
if __name__ == "__main__":
filename = "22.txt"
print(total_score(filename))
| 0 | 0 | 0 |
7cc22316f1d22ad2cc918f1334b3c3e2ab028733 | 5,950 | py | Python | src/hcrystalball/compose/_ts_column_transformer.py | pavelg087/hcrystalball | 25f186dc72d4e273c6696a5c822f601d54bab734 | [
"MIT"
] | 1 | 2021-04-12T17:08:17.000Z | 2021-04-12T17:08:17.000Z | src/hcrystalball/compose/_ts_column_transformer.py | pavelg087/hcrystalball | 25f186dc72d4e273c6696a5c822f601d54bab734 | [
"MIT"
] | null | null | null | src/hcrystalball/compose/_ts_column_transformer.py | pavelg087/hcrystalball | 25f186dc72d4e273c6696a5c822f601d54bab734 | [
"MIT"
] | 1 | 2022-01-03T16:02:35.000Z | 2022-01-03T16:02:35.000Z | from sklearn.compose import ColumnTransformer
from sklearn.utils.validation import check_array, check_is_fitted
from scipy import sparse
from collections import namedtuple
import numpy as np
import pandas as pd
class TSColumnTransformer(ColumnTransformer):
"""Time Series compatible ColumnTransformer.
Allow usage of hcrystalball wrappers and index based transformers.
See also: `sklearn.compose.ColumnTransformer`
Returns
-------
pandas.DataFrame
Data transformed on given column
Raises
------
ValueError
If `remainder=='passthrough'` is set. Use `passthrough` as an identity estimator
If sparse output is requested, but not all columns are numeric
"""
@property
def remainder(self):
"""Access to original remainder"""
return self._remainder_original
@remainder.setter
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
check_is_fitted(self, "transformers_")
# gather column names generated by transformers to defined structure
# and solve name duplicities in more sophisticated way
Columns = namedtuple("Columns", "col_name trans_name trans_index passthrough")
feature_tuples = []
for index, (name, trans, apply_cols, _) in enumerate(self._iter(fitted=True)):
if trans == "passthrough":
col_tuple = Columns(
col_name=apply_cols,
trans_name=name,
trans_index=index,
passthrough=True,
)
elif trans == "drop":
continue
elif hasattr(trans, "get_feature_names"):
col_tuple = Columns(
col_name=trans.get_feature_names(),
trans_name=name,
trans_index=index,
passthrough=False,
)
else:
# TODO: for transformers that reduce/inflate dimensions,
# this might cause unwanted behavior
# Temporary fix for PCA
if hasattr(trans, "n_components"):
if trans.n_components != len(apply_cols):
apply_cols = [name + "_" + str(i) for i in range(trans.n_components)]
col_tuple = Columns(
col_name=apply_cols,
trans_name=name,
trans_index=index,
passthrough=False,
)
feature_tuples.append(col_tuple)
# make sure passthrough column names have precendece over other transformers
# when duplicate colum names occur
df = (
pd.DataFrame(feature_tuples)
.explode("col_name")
.reset_index(drop=True)
.sort_values("passthrough", ascending=False)
)
duplicates = df.duplicated(subset=["col_name"])
df.loc[duplicates, "col_name"] += "_" + df.loc[duplicates, "trans_name"]
feature_names = df.sort_index()["col_name"].tolist()
return feature_names
def _hstack(self, Xs):
"""Stack Xs horizontally.
This allows subclasses to control the stacking behavior, while reusing
everything else from ColumnTransformer and returning pandas.DataFrame
version of data at the end.
Parameters
----------
Xs : List
List of numpy arrays, sparse arrays, or DataFrames
Returns
-------
pandas.DataFrame
Stacked data with correct column names
Raises
------
ValueError
Raises ValueError when columns are not numeric for sparse output
"""
if self.sparse_output_:
try:
# since all columns should be numeric before stacking them
# in a sparse matrix, `check_array` is used for the
# dtype conversion if necessary.
converted_Xs = [check_array(X, accept_sparse=True, force_all_finite=False) for X in Xs]
except ValueError:
raise ValueError(
"For a sparse output, all columns should" " be a numeric or convertible to a numeric."
)
return pd.DataFrame(sparse.hstack(converted_Xs).tocsr(), columns=self.get_feature_names())
else:
Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
# addition, that turns nparray to dataframe with correct column names
return pd.DataFrame(np.hstack(Xs), columns=self.get_feature_names())
def transform(self, X):
"""Run index aware transform
Parameters
----------
X : pandas.DataFrame
Input features.
Returns
-------
pandas.DataFrame
Transformed data by given transformer on given column
"""
df = super().transform(X)
df.index = X.index
return df
def fit_transform(self, X, y=None):
"""Run index aware fit_transform
Parameters
----------
X : pandas.DataFrame
Input features.
y : pandas.Series or numpy.array
Target values
Returns
-------
pandas.DataFrame
Transformed data by given transformer on given column
"""
df = super().fit_transform(X, y)
df.index = X.index
return df
| 34 | 106 | 0.572941 | from sklearn.compose import ColumnTransformer
from sklearn.utils.validation import check_array, check_is_fitted
from scipy import sparse
from collections import namedtuple
import numpy as np
import pandas as pd
class TSColumnTransformer(ColumnTransformer):
"""Time Series compatible ColumnTransformer.
Allow usage of hcrystalball wrappers and index based transformers.
See also: `sklearn.compose.ColumnTransformer`
Returns
-------
pandas.DataFrame
Data transformed on given column
Raises
------
ValueError
If `remainder=='passthrough'` is set. Use `passthrough` as an identity estimator
If sparse output is requested, but not all columns are numeric
"""
@property
def remainder(self):
"""Access to original remainder"""
return self._remainder_original
@remainder.setter
def remainder(self, value):
if value == "passthrough":
raise ValueError(
"TSColumnTransformer.remainder=='passthrough' is not supported."
"Please use 'passthrough' as an identity estimator"
)
else:
self._remainder_original = value
def get_feature_names(self):
"""Get feature names from all transformers.
Returns
-------
feature_names : list of strings
Names of the features produced by transform.
"""
check_is_fitted(self, "transformers_")
# gather column names generated by transformers to defined structure
# and solve name duplicities in more sophisticated way
Columns = namedtuple("Columns", "col_name trans_name trans_index passthrough")
feature_tuples = []
for index, (name, trans, apply_cols, _) in enumerate(self._iter(fitted=True)):
if trans == "passthrough":
col_tuple = Columns(
col_name=apply_cols,
trans_name=name,
trans_index=index,
passthrough=True,
)
elif trans == "drop":
continue
elif hasattr(trans, "get_feature_names"):
col_tuple = Columns(
col_name=trans.get_feature_names(),
trans_name=name,
trans_index=index,
passthrough=False,
)
else:
# TODO: for transformers that reduce/inflate dimensions,
# this might cause unwanted behavior
# Temporary fix for PCA
if hasattr(trans, "n_components"):
if trans.n_components != len(apply_cols):
apply_cols = [name + "_" + str(i) for i in range(trans.n_components)]
col_tuple = Columns(
col_name=apply_cols,
trans_name=name,
trans_index=index,
passthrough=False,
)
feature_tuples.append(col_tuple)
# make sure passthrough column names have precendece over other transformers
# when duplicate colum names occur
df = (
pd.DataFrame(feature_tuples)
.explode("col_name")
.reset_index(drop=True)
.sort_values("passthrough", ascending=False)
)
duplicates = df.duplicated(subset=["col_name"])
df.loc[duplicates, "col_name"] += "_" + df.loc[duplicates, "trans_name"]
feature_names = df.sort_index()["col_name"].tolist()
return feature_names
def _hstack(self, Xs):
"""Stack Xs horizontally.
This allows subclasses to control the stacking behavior, while reusing
everything else from ColumnTransformer and returning pandas.DataFrame
version of data at the end.
Parameters
----------
Xs : List
List of numpy arrays, sparse arrays, or DataFrames
Returns
-------
pandas.DataFrame
Stacked data with correct column names
Raises
------
ValueError
Raises ValueError when columns are not numeric for sparse output
"""
if self.sparse_output_:
try:
# since all columns should be numeric before stacking them
# in a sparse matrix, `check_array` is used for the
# dtype conversion if necessary.
converted_Xs = [check_array(X, accept_sparse=True, force_all_finite=False) for X in Xs]
except ValueError:
raise ValueError(
"For a sparse output, all columns should" " be a numeric or convertible to a numeric."
)
return pd.DataFrame(sparse.hstack(converted_Xs).tocsr(), columns=self.get_feature_names())
else:
Xs = [f.toarray() if sparse.issparse(f) else f for f in Xs]
# addition, that turns nparray to dataframe with correct column names
return pd.DataFrame(np.hstack(Xs), columns=self.get_feature_names())
def transform(self, X):
"""Run index aware transform
Parameters
----------
X : pandas.DataFrame
Input features.
Returns
-------
pandas.DataFrame
Transformed data by given transformer on given column
"""
df = super().transform(X)
df.index = X.index
return df
def fit_transform(self, X, y=None):
"""Run index aware fit_transform
Parameters
----------
X : pandas.DataFrame
Input features.
y : pandas.Series or numpy.array
Target values
Returns
-------
pandas.DataFrame
Transformed data by given transformer on given column
"""
df = super().fit_transform(X, y)
df.index = X.index
return df
| 293 | 0 | 26 |
1dee287b2d62edc7d71ebd34ed6960379afab04b | 1,489 | py | Python | seapy/subsystems/subsystemstructural.py | FRidh/seapy | de63acdb3722c2558fc1ad1e1eca92abdd027932 | [
"BSD-3-Clause"
] | 8 | 2015-07-02T13:34:06.000Z | 2021-05-17T21:34:07.000Z | seapy/subsystems/subsystemstructural.py | FRidh/seapy | de63acdb3722c2558fc1ad1e1eca92abdd027932 | [
"BSD-3-Clause"
] | 2 | 2015-11-09T17:16:07.000Z | 2020-02-19T14:00:20.000Z | seapy/subsystems/subsystemstructural.py | FRidh/seapy | de63acdb3722c2558fc1ad1e1eca92abdd027932 | [
"BSD-3-Clause"
] | 2 | 2021-02-03T08:56:10.000Z | 2022-01-22T02:21:43.000Z | """
Structural subsystem
--------------------
"""
import numpy as np
from .subsystem import Subsystem
class SubsystemStructural(Subsystem):
"""
Abstract base class for all structural subsystems.
"""
@property
def conductance_point_average(self):
"""
Average point conductance of a structural component.
.. math:: \\overline{G} = \\frac{1}{4} M \overline{\\delta f}
See Lyon, page 149, equation 8.5.2 as well as page 200.
"""
return 0.25 * self.component.mass * self.average_frequency_spacing
@property
def resistance_point_average(self):
"""
Average point resistance.
"""
return 1.0 / self.conductance_point_average
@property
def velocity(self):
"""
Vibrational velocity :math:`v`.
.. math:: v = \\sqrt{\\frac{E}{m}}
Craik, equation 3.11, page 55.
"""
return np.sqrt(self.energy / self.component.mass)
@property
def velocity_level(self):
"""
Velocity level :math:`L_v`.
:rtype: :class:`numpy.ndarray`
The structural velocity level is calculated as
.. math:: L_v = 20 \\log_{10}{\\left( \\frac{v}{v_0} \\right) }
.. seealso:: :attr:`seapy.system.System.reference_velocity`
"""
return 20.0 * np.log10(self.velocity / self.system.reference_velocity)
| 24.409836 | 78 | 0.549362 | """
Structural subsystem
--------------------
"""
import numpy as np
from .subsystem import Subsystem
class SubsystemStructural(Subsystem):
"""
Abstract base class for all structural subsystems.
"""
@property
def conductance_point_average(self):
"""
Average point conductance of a structural component.
.. math:: \\overline{G} = \\frac{1}{4} M \overline{\\delta f}
See Lyon, page 149, equation 8.5.2 as well as page 200.
"""
return 0.25 * self.component.mass * self.average_frequency_spacing
@property
def resistance_point_average(self):
"""
Average point resistance.
"""
return 1.0 / self.conductance_point_average
@property
def velocity(self):
"""
Vibrational velocity :math:`v`.
.. math:: v = \\sqrt{\\frac{E}{m}}
Craik, equation 3.11, page 55.
"""
return np.sqrt(self.energy / self.component.mass)
@property
def velocity_level(self):
"""
Velocity level :math:`L_v`.
:rtype: :class:`numpy.ndarray`
The structural velocity level is calculated as
.. math:: L_v = 20 \\log_{10}{\\left( \\frac{v}{v_0} \\right) }
.. seealso:: :attr:`seapy.system.System.reference_velocity`
"""
return 20.0 * np.log10(self.velocity / self.system.reference_velocity)
| 0 | 0 | 0 |
2de3988f5f3f55418bcf982e177c0e4958dcc16d | 658 | py | Python | src/bpp/migrations/0209_auto_20200421_0013.py | iplweb/django-bpp | 85f183a99d8d5027ae4772efac1e4a9f21675849 | [
"BSD-3-Clause"
] | 1 | 2017-04-27T19:50:02.000Z | 2017-04-27T19:50:02.000Z | src/bpp/migrations/0209_auto_20200421_0013.py | mpasternak/django-bpp | 434338821d5ad1aaee598f6327151aba0af66f5e | [
"BSD-3-Clause"
] | 41 | 2019-11-07T00:07:02.000Z | 2022-02-27T22:09:39.000Z | src/bpp/migrations/0209_auto_20200421_0013.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.0.4 on 2020-04-20 22:13
from django.db import migrations, models
| 24.37037 | 57 | 0.541033 | # Generated by Django 3.0.4 on 2020-04-20 22:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("bpp", "0208_auto_20200329_1719"),
]
operations = [
migrations.AddField(
model_name="jednostka",
name="kolejnosc",
field=models.PositiveIntegerField(default=0),
),
migrations.AlterModelOptions(
name="jednostka",
options={
"ordering": ["kolejnosc", "nazwa"],
"verbose_name": "jednostka",
"verbose_name_plural": "jednostki",
},
),
]
| 0 | 544 | 23 |
4442a9eecfc51a9e00395d578a891bed115d0a95 | 3,159 | py | Python | setup.py | aerophile/python-irtrans | 20eee51b91d58d2b8dc18612003e67f9282f542f | [
"MIT"
] | null | null | null | setup.py | aerophile/python-irtrans | 20eee51b91d58d2b8dc18612003e67f9282f542f | [
"MIT"
] | null | null | null | setup.py | aerophile/python-irtrans | 20eee51b91d58d2b8dc18612003e67f9282f542f | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import warnings
from setuptools import setup, Extension
from Cython.Distutils import build_ext
dist_dir = os.path.dirname(os.path.abspath(__file__))
os.system("gunzip -kf %s/irtrans/models/* 2> /dev/null" %dist_dir)
try:
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print("Cannot import py2exe", file=sys.stderr)
exit(1)
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe'],
}
py2exe_console = [{
"script": "./irtrans/__main__.py",
"dest_base": "irtrans",
}]
py2exe_params = {
'console': py2exe_console,
'options': {"py2exe": py2exe_options},
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
files_spec = [
('share/doc/irtrans', ['README.rst'])
]
root = os.path.dirname(os.path.abspath(__file__))
data_files = []
for dirname, files in files_spec:
resfiles = []
for fn in files:
if not os.path.exists(fn):
warnings.warn('Skipping file %s since it is not present. Type make to build all automatically generated files.' % fn)
else:
resfiles.append(fn)
data_files.append((dirname, resfiles))
params = {
'data_files': data_files,
}
params['entry_points'] = {'console_scripts': ['irtrans = irtrans:main']}
# Get the package version
exec(compile(open('irtrans/version.py').read(),
'irtrans/version.py', 'exec'))
setup(
name = "irtrans",
version = __version__,
description="Transliteration Tool: Hindi to Urdu transliterator and vice-versa",
long_description = open('README.rst', 'rb').read().decode('utf8'),
keywords = ['Language Transliteration', 'Computational Linguistics',
'Indic', 'Roman'],
author=['Riyaz Ahmad', 'Irshad Ahmad'],
author_email='irshad.bhat@research.iiit.ac.in',
maintainer='Irshad Ahmad',
maintainer_email='irshad.bhat@research.iiit.ac.in',
license = "MIT",
url="https://github.com/irshadbhat/irtrans",
package_dir={"irtrans":"irtrans"},
packages=['irtrans', 'irtrans._utils', 'irtrans._decode'],
package_data={'irtrans': ['models/*.npy']},
classifiers=[
"Topic :: Text Processing :: Linguistic",
"Topic :: Software Development :: Libraries :: Python Modules",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Operating System :: Unix"
],
cmdclass={'build_ext': build_ext},
ext_modules=[
Extension("irtrans._decode.viterbi", ["irtrans/_decode/viterbi.pyx"]),
],
install_requires=["cython", "numpy", "scipy"],
#requires=["cython", "numpy", "scipy"],
**params
)
| 30.375 | 135 | 0.623615 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
import os
import sys
import warnings
from setuptools import setup, Extension
from Cython.Distutils import build_ext
dist_dir = os.path.dirname(os.path.abspath(__file__))
os.system("gunzip -kf %s/irtrans/models/* 2> /dev/null" %dist_dir)
try:
import py2exe
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print("Cannot import py2exe", file=sys.stderr)
exit(1)
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe'],
}
py2exe_console = [{
"script": "./irtrans/__main__.py",
"dest_base": "irtrans",
}]
py2exe_params = {
'console': py2exe_console,
'options': {"py2exe": py2exe_options},
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
files_spec = [
('share/doc/irtrans', ['README.rst'])
]
root = os.path.dirname(os.path.abspath(__file__))
data_files = []
for dirname, files in files_spec:
resfiles = []
for fn in files:
if not os.path.exists(fn):
warnings.warn('Skipping file %s since it is not present. Type make to build all automatically generated files.' % fn)
else:
resfiles.append(fn)
data_files.append((dirname, resfiles))
params = {
'data_files': data_files,
}
params['entry_points'] = {'console_scripts': ['irtrans = irtrans:main']}
# Get the package version
exec(compile(open('irtrans/version.py').read(),
'irtrans/version.py', 'exec'))
setup(
name = "irtrans",
version = __version__,
description="Transliteration Tool: Hindi to Urdu transliterator and vice-versa",
long_description = open('README.rst', 'rb').read().decode('utf8'),
keywords = ['Language Transliteration', 'Computational Linguistics',
'Indic', 'Roman'],
author=['Riyaz Ahmad', 'Irshad Ahmad'],
author_email='irshad.bhat@research.iiit.ac.in',
maintainer='Irshad Ahmad',
maintainer_email='irshad.bhat@research.iiit.ac.in',
license = "MIT",
url="https://github.com/irshadbhat/irtrans",
package_dir={"irtrans":"irtrans"},
packages=['irtrans', 'irtrans._utils', 'irtrans._decode'],
package_data={'irtrans': ['models/*.npy']},
classifiers=[
"Topic :: Text Processing :: Linguistic",
"Topic :: Software Development :: Libraries :: Python Modules",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Intended Audience :: Science/Research",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3.3",
"Operating System :: Unix"
],
cmdclass={'build_ext': build_ext},
ext_modules=[
Extension("irtrans._decode.viterbi", ["irtrans/_decode/viterbi.pyx"]),
],
install_requires=["cython", "numpy", "scipy"],
#requires=["cython", "numpy", "scipy"],
**params
)
| 0 | 0 | 0 |
ff4ccc3079c014b4b5c7ee90d4078134c821d5d6 | 425 | py | Python | src/Research Sims/Hydra.py | wrp94/YAFS | 8f2151a3b53dbe33d5f9b12bad6a304784504485 | [
"MIT"
] | null | null | null | src/Research Sims/Hydra.py | wrp94/YAFS | 8f2151a3b53dbe33d5f9b12bad6a304784504485 | [
"MIT"
] | null | null | null | src/Research Sims/Hydra.py | wrp94/YAFS | 8f2151a3b53dbe33d5f9b12bad6a304784504485 | [
"MIT"
] | null | null | null | import os
import time
import json
import random
import logging.config
import networkx as nx
from pathlib import Path
import matplotlib.pyplot as plt
from yafs.core import Sim
from yafs.application import create_applications_from_json
from yafs.topology import Topology
from yafs.placement import JSONPlacement
from yafs.path_routing import DeviceSpeedAwareRouting
from yafs.distribution import deterministic_distribution
| 22.368421 | 58 | 0.863529 | import os
import time
import json
import random
import logging.config
import networkx as nx
from pathlib import Path
import matplotlib.pyplot as plt
from yafs.core import Sim
from yafs.application import create_applications_from_json
from yafs.topology import Topology
from yafs.placement import JSONPlacement
from yafs.path_routing import DeviceSpeedAwareRouting
from yafs.distribution import deterministic_distribution
| 0 | 0 | 0 |
19e75cc217ce5dc119f9366c8c80e75e7a2d1ef1 | 1,286 | py | Python | ycsb/data.py | basicthinker/scripts | ba4057e37234754f689205dee7b63577e0856854 | [
"Apache-2.0"
] | null | null | null | ycsb/data.py | basicthinker/scripts | ba4057e37234754f689205dee7b63577e0856854 | [
"Apache-2.0"
] | null | null | null | ycsb/data.py | basicthinker/scripts | ba4057e37234754f689205dee7b63577e0856854 | [
"Apache-2.0"
] | null | null | null | # Created on Jan 12 2015
import statistics
import math
import os
__author__ = "Jinglei Ren"
__copyright__ = "Copyright (c) 2015 Jinglei Ren"
__email__ = "jinglei@ren.systems"
RESULTS_FILE = 'ycsb-redis.results'
| 27.956522 | 75 | 0.598756 | # Created on Jan 12 2015
import statistics
import math
import os
__author__ = "Jinglei Ren"
__copyright__ = "Copyright (c) 2015 Jinglei Ren"
__email__ = "jinglei@ren.systems"
RESULTS_FILE = 'ycsb-redis.results'
def parse(output):
run = {}
for line in output.split('\n'):
segs = [seg.strip() for seg in line.split(',')]
if len(segs) < 3:
continue
if segs[1] == 'RunTime(ms)' or segs[1] == 'Throughput(ops/sec)':
assert segs[0] == '[OVERALL]'
run[segs[1]] = float(segs[2])
elif segs[1] == 'AverageLatency(us)':
run[segs[0].strip('[]') + segs[1]] = float(segs[2])
assert len(run) >= 3
return run
def append(results, param, aof, run):
workload = param['command'] + '-' + os.path.basename(param['workload'])
if not workload in results:
results[workload] = {}
if not aof in results[workload]:
results[workload][aof] = []
results[workload][aof].append(run)
def calculate(runs):
throughputs = []
for run in runs:
throughputs.append(run['Throughput(ops/sec)'])
mean = statistics.mean(throughputs)
stderr = 0
if len(throughputs) >= 2:
stderr = statistics.stdev(throughputs) / math.sqrt(len(runs))
return [mean, stderr]
| 1,001 | 0 | 69 |
94fdd0fca38bc277dfec464921116db4b10e633c | 11,123 | py | Python | shell/export_candidates.py | malks/carreiras | 014b95c985c36186fabe46b1f238b8fe871e724e | [
"MIT"
] | null | null | null | shell/export_candidates.py | malks/carreiras | 014b95c985c36186fabe46b1f238b8fe871e724e | [
"MIT"
] | null | null | null | shell/export_candidates.py | malks/carreiras | 014b95c985c36186fabe46b1f238b8fe871e724e | [
"MIT"
] | null | null | null | from oracle_conn import run_select as oc_select, run_insert as oc_insert, run_sql as oc_sql, new_conn as oc_conn
from mysql_connection import run_select as sql_select,run_sql,new_conn as sql_conn
from datetime import datetime
import re
#Get country code from senior based on country name
#Get city code from senior based on city name
#Get state code from senior based on state name
#Get district code from senior based on district name
#Get deficiency id from senior based on carreiras's code, using desc as value
#Define dados de candidato do senior para carreiras
#Arruma valor, arrancando caracteres invalidos para inserir no banco
#Monta update do on duplicate keys para o banco
#Atualiza dados do candidato no carreiras
#Adiciona status de sincronizado ao banco do carreiras para a inscrição nessa vaga desse candidato
#Altera status do exportador para candidatos exportados
#Na senior não tem autoincrement na PK, então eu obtenho o valor maximo dela aqui e somo 1 antes de inserir
#Insere/atualiza candidato na senior
if __name__ == "__main__":
#Instancia Conexões Principais
main_sql_conn=sql_conn()
main_oc_conn=oc_conn()
#Obtém data do ultimo sync de importação ativo
last_sync=sql_select("SELECT last_sync FROM senior_sync WHERE type='export' AND active=1",main_sql_conn)
if last_sync[0]['last_sync']==None:
last_sync[0]['last_sync']=datetime.strptime('2021-05-01','%Y-%m-%d')
#Candidatos que foram adicionados à lista de exportação
candidates_carreiras_avulsos=sql_select("SELECT candidates.* FROM candidates JOIN exportables ON candidates.id=exportables.candidate_id WHERE exportables.status=0 AND candidates.senior_num_can IS NULL",main_sql_conn)
candidates_senior_avulsos=carreiras_to_senior_candidate(candidates_carreiras_avulsos)
export_candidates_to_senior(candidates_senior_avulsos,main_oc_conn)
update_exportable(candidates_senior_avulsos)
#Candidatos do carreiras que estão inscritos em vagas ativas e última sincronização com senior foi anterior a ultima atualização/inscrição do candidato em
candidates_carreiras=sql_select("SELECT DISTINCT candidates.*,group_concat(subscribed_has_states.subscribed_id) as subscriptions FROM candidates JOIN subscribed ON subscribed.candidate_id=candidates.id JOIN subscribed_has_states ON subscribed_has_states.subscribed_id=subscribed.id LEFT JOIN subscribed_has_states AS denied_states ON denied_states.subscribed_id=subscribed.id AND denied_states.state_id IN (5,2) LEFT JOIN states ON states.id=subscribed_has_states.state_id WHERE candidates.senior_num_can IS NULL AND states.sync_to_senior=1 AND denied_states.id IS NULL AND (candidates.last_senior_synced<=candidates.updated_at OR candidates.last_senior_synced<=subscribed.updated_at OR candidates.last_senior_synced<=subscribed_has_states.updated_at OR candidates.last_senior_synced IS NULL) GROUP BY candidates.id",main_sql_conn)
candidates_senior=carreiras_to_senior_candidate(candidates_carreiras)
export_candidates_to_senior(candidates_senior,main_oc_conn)
add_carreiras_subscribed_state(candidates_carreiras,main_sql_conn)
#test=oc_select("SELECT NOMCAN,COUNT(NUMCAN) as CONNTA from R122CEX GROUP BY NOMCAN ORDER BY CONNTA DESC FETCH NEXT 3 ROWS ONLY ",main_oc_conn)
#test=oc_select("select * from R122CEX WHERE NOMCAN LIKE '%KARL%'",main_oc_conn)
#candidates_senior=oc_select("select * from R122CEX ORDER BY NUMCAN DESC FETCH NEXT 1 ROWS ONLY",main_oc_conn)
#print(test)
#Candidatos
#candidate_senior=oc_select("SELECT * from R122CEX WHERE DATINC >= '"+last_sync[0]['last_sync'].strftime('%Y-%m-%d')+"' ",main_oc_conn)
#candidates_carreiras=senior_to_carreiras_candidate(candidate_senior)
#import_candidates(candidates_carreiras,main_sql_conn)
#Vagas
#job_senior=oc_select("SELECT * FROM R126RQU WHERE DATRQU>='"+last_sync[0]['last_sync'].strftime('%Y-%m-%d')+"' AND SITRQU IN('0','1')",main_oc_conn)
#full_job=senior_fill_job_data(job_senior,main_oc_conn)
#jobs_carreiras=senior_to_carreiras_job(full_job)
#import_jobs(jobs_carreiras,main_sql_conn)
##Atualiza controle de sincronizador
#update_senior_sync(main_sql_conn)
#Fecha as conexões principais
main_oc_conn.close()
main_sql_conn.close()
| 45.773663 | 838 | 0.711229 | from oracle_conn import run_select as oc_select, run_insert as oc_insert, run_sql as oc_sql, new_conn as oc_conn
from mysql_connection import run_select as sql_select,run_sql,new_conn as sql_conn
from datetime import datetime
import re
#Get country code from senior based on country name
def get_country_code_from_name(country_name):
ret='0'
if country_name!=None:
country_conn=oc_conn()
country_code=oc_select("SELECT CODPAI FROM R074PAI WHERE NOMPAI LIKE '"+str(country_name)+"'",country_conn)
if len(country_code)>0 and country_code[0]['CODPAI']!=None:
ret=country_code[0]['CODPAI']
country_conn.close()
return ret
#Get city code from senior based on city name
def get_city_code_from_name(city_name):
ret='0'
if city_name!=None:
city_conn=oc_conn()
city_code=oc_select("SELECT CODCID FROM R074CID WHERE NOMCID LIKE '"+str(city_name)+"'",city_conn)
if len(city_code)>0 and city_code[0]['CODCID']!=None:
ret=city_code[0]['CODCID']
city_conn.close()
return ret
#Get state code from senior based on state name
def get_state_code_from_name(state_name):
ret='0'
if state_name!=None:
state_conn=oc_conn()
state_code=oc_select("SELECT CODEST FROM R074EST WHERE DESEST LIKE '"+str(state_name)+"'",state_conn)
if len(state_code)>0 and state_code[0]['CODEST']!=None:
ret=state_code[0]['CODEST']
state_conn.close()
return ret
#Get district code from senior based on district name
def get_district_code_from_name(district_name,city_code):
ret='0'
if district_name!=None:
district_conn=oc_conn()
if city_code!=None and city_code!=0:
district_code=oc_select("SELECT CODBAI FROM R074BAI WHERE CODCID='"+city_code+"' AND NOMBAI LIKE '"+str(district_name)+"'",district_conn)
else:
district_code=oc_select("SELECT CODBAI FROM R074BAI WHERE NOMBAI LIKE '"+str(district_name)+"'",district_conn)
if len(district_code)>0 and district_code[0]['CODBAI']!=None:
ret=district_code[0]['CODBAI']
district_conn.close()
return ret
#Get deficiency id from senior based on carreiras's code, using desc as value
def get_deficiency_senior_id_from_carreiras_code(deficiency_id):
ret='0'
if deficiency_id!=None:
carreiras_conn=sql_conn()
senior_conn=oc_conn()
deficiency_desc=sql_select("SELECT name FROM lunellicarreiras.deficiencies WHERE id='"+deficiency_id+"'",carreiras_conn)
senior_code=oc_select("SELECT CODDEF FROM R022DEF WHERE DESDEF LIKE '"+deficiency_desc[0]['name']+"'")
if len(senior_code)>0 and (senior_code[0]['CODDEF']!=None):
ret=senior_code[0]['CODDEF']
carreiras_conn.close()
senior_conn.close()
return ret
#Define dados de candidato do senior para carreiras
def carreiras_to_senior_candidate(data_carreiras):
ret=[]
for data in data_carreiras:
helper={}
num_can=""
helper['PAINAS']=get_country_code_from_name(data['natural_country'])
helper['ESTNAS']=get_state_code_from_name(data['natural_state'])
helper['CIDNAS']=get_city_code_from_name(data['natural_city'])
helper['CODPAI']=get_country_code_from_name(data['address_country'])
helper['CODEST']=get_state_code_from_name(data['address_state'])
helper['CODCID']=get_city_code_from_name(data['address_city'])
helper['CODBAI']=get_district_code_from_name(data['address_district'],str(helper['CODCID']))
helper['ENDCAN']=data['address_street']
helper['ENDNUM']=data['address_number']
helper['CODCEP']=data['zip']
helper['NOMCAN']=data['name']
helper['DATNAS']=data['dob']
helper['TIPSEX']=data['gender']
helper['ALTCAN']=data['height']
helper['PESCAN']=data['weight']
helper['ESTCIV']=data['civil_state']
helper['SITCEX']=1
helper['DDDCO1']=data['ddd_mobile']
helper['TELCO1']=data['mobile']
helper['DDDCO2']=data['ddd_phone']
helper['TELCO2']=data['phone']
helper['EMACAN']=data['email']
helper['CPFCAN']=data['cpf']
helper['PISCAN']=data['pis']
helper['IDECAN']=data['rg']
helper['EMICID']=data['rg_emitter']
helper['NUMCTP']=data['work_card']
helper['SERCTP']=data['work_card_series']
helper['DIGCAR']=data['work_card_digit']
helper['NUMELE']=data['elector_card']
helper['NUMRES']=data['veteran_card']
helper['DATCHE']=data['arrival_date']
helper['DVLEST']=data['visa_expiration']
helper['VISEST']=data['foreign_register']
helper['NOMPAI']=data['father_name']
helper['NOMMAE']=data['mother_name']
helper['DATNPA']=data['father_dob']
helper['DATNMA']=data['mother_dob']
helper['NOMCJG']=data['spouse_name']
helper['CARCON']=data['spouse_job']
helper['DATINC']=datetime.now().strftime('%Y-%m-%d')
if (data['deficiency']==1):
helper['CODDEF']=get_deficiency_senior_id_from_carreiras_code(data['deficiency_id'])
#if "senior_num_can" in data:
# num_can=data['senior_num_can']
helper['carreiras_id']=data['id']
#helper['NUMCAN']=num_can
#helper['ULTATU']=data['updated_at']
ret.append(helper)
return ret
#Arruma valor, arrancando caracteres invalidos para inserir no banco
def string_and_strip(what):
if what==None:
return ''
return re.sub("'|\"|-|`","",str(what))
#Monta update do on duplicate keys para o banco
def mount_updates(key,value):
return string_and_strip(key)+"='"+string_and_strip(value)+"'"
#Atualiza dados do candidato no carreiras
def update_candidate_carreiras(data):
carreiras_conn=sql_conn()
run_sql("UPDATE lunellicarreiras.candidates SET last_senior_synced=NOW(),senior_num_can='"+str(data['NUMCAN'])+"' WHERE id='"+str(data['carreiras_id'])+"'",carreiras_conn)
return
#Adiciona status de sincronizado ao banco do carreiras para a inscrição nessa vaga desse candidato
def add_carreiras_subscribed_state(candidates,conn):
for candidate in candidates:
subscriptions=str(candidate['subscriptions']).split(",")
for subscription in subscriptions:
run_sql("INSERT INTO subscribed_has_states (subscribed_id,state_id,created_at,updated_at) VALUES('"+subscription+"',5,now(),now())",conn)
return
#Altera status do exportador para candidatos exportados
def update_exportable(candidates,conn):
for candidate in candidates:
run_sql("UPDATE exportables SET status=1 WHERE candidate_id="+candidate["id"]+",updated_at=now()",conn)
return
#Na senior não tem autoincrement na PK, então eu obtenho o valor maximo dela aqui e somo 1 antes de inserir
def get_senior_candidate_next_key(conn):
last_id=oc_select("SELECT MAX(NUMCAN) FROM R122CEX",conn)
if len(last_id)>0 and last_id[0]['MAX(NUMCAN)']!=None:
return last_id[0]['MAX(NUMCAN)']+1
#Insere/atualiza candidato na senior
def export_candidates_to_senior(candidates,conn):
for candidate in candidates:
candidate['NUMCAN']=get_senior_candidate_next_key(conn)
carreiras_id=candidate.pop('carreiras_id', '0')
keys = ",".join(map(string_and_strip,list(candidate.keys())))
values = "','".join(map(string_and_strip,list(candidate.values())))
#updates = ",".join(map(mount_updates,list(candidate.keys()),list(candidate.values())))
sql="INSERT INTO R122CEX ("+keys+") VALUES ('"+values+"') "
oc_sql(sql,conn)
if carreiras_id!='0':
candidate['carreiras_id']=carreiras_id
update_candidate_carreiras(candidate)
#candidate['NUMCAN']=oc_insert(sql,conn,'NUMCAN')
return
if __name__ == "__main__":
#Instancia Conexões Principais
main_sql_conn=sql_conn()
main_oc_conn=oc_conn()
#Obtém data do ultimo sync de importação ativo
last_sync=sql_select("SELECT last_sync FROM senior_sync WHERE type='export' AND active=1",main_sql_conn)
if last_sync[0]['last_sync']==None:
last_sync[0]['last_sync']=datetime.strptime('2021-05-01','%Y-%m-%d')
#Candidatos que foram adicionados à lista de exportação
candidates_carreiras_avulsos=sql_select("SELECT candidates.* FROM candidates JOIN exportables ON candidates.id=exportables.candidate_id WHERE exportables.status=0 AND candidates.senior_num_can IS NULL",main_sql_conn)
candidates_senior_avulsos=carreiras_to_senior_candidate(candidates_carreiras_avulsos)
export_candidates_to_senior(candidates_senior_avulsos,main_oc_conn)
update_exportable(candidates_senior_avulsos)
#Candidatos do carreiras que estão inscritos em vagas ativas e última sincronização com senior foi anterior a ultima atualização/inscrição do candidato em
candidates_carreiras=sql_select("SELECT DISTINCT candidates.*,group_concat(subscribed_has_states.subscribed_id) as subscriptions FROM candidates JOIN subscribed ON subscribed.candidate_id=candidates.id JOIN subscribed_has_states ON subscribed_has_states.subscribed_id=subscribed.id LEFT JOIN subscribed_has_states AS denied_states ON denied_states.subscribed_id=subscribed.id AND denied_states.state_id IN (5,2) LEFT JOIN states ON states.id=subscribed_has_states.state_id WHERE candidates.senior_num_can IS NULL AND states.sync_to_senior=1 AND denied_states.id IS NULL AND (candidates.last_senior_synced<=candidates.updated_at OR candidates.last_senior_synced<=subscribed.updated_at OR candidates.last_senior_synced<=subscribed_has_states.updated_at OR candidates.last_senior_synced IS NULL) GROUP BY candidates.id",main_sql_conn)
candidates_senior=carreiras_to_senior_candidate(candidates_carreiras)
export_candidates_to_senior(candidates_senior,main_oc_conn)
add_carreiras_subscribed_state(candidates_carreiras,main_sql_conn)
#test=oc_select("SELECT NOMCAN,COUNT(NUMCAN) as CONNTA from R122CEX GROUP BY NOMCAN ORDER BY CONNTA DESC FETCH NEXT 3 ROWS ONLY ",main_oc_conn)
#test=oc_select("select * from R122CEX WHERE NOMCAN LIKE '%KARL%'",main_oc_conn)
#candidates_senior=oc_select("select * from R122CEX ORDER BY NUMCAN DESC FETCH NEXT 1 ROWS ONLY",main_oc_conn)
#print(test)
#Candidatos
#candidate_senior=oc_select("SELECT * from R122CEX WHERE DATINC >= '"+last_sync[0]['last_sync'].strftime('%Y-%m-%d')+"' ",main_oc_conn)
#candidates_carreiras=senior_to_carreiras_candidate(candidate_senior)
#import_candidates(candidates_carreiras,main_sql_conn)
#Vagas
#job_senior=oc_select("SELECT * FROM R126RQU WHERE DATRQU>='"+last_sync[0]['last_sync'].strftime('%Y-%m-%d')+"' AND SITRQU IN('0','1')",main_oc_conn)
#full_job=senior_fill_job_data(job_senior,main_oc_conn)
#jobs_carreiras=senior_to_carreiras_job(full_job)
#import_jobs(jobs_carreiras,main_sql_conn)
##Atualiza controle de sincronizador
#update_senior_sync(main_sql_conn)
#Fecha as conexões principais
main_oc_conn.close()
main_sql_conn.close()
| 6,538 | 0 | 286 |
e7a4b5fb09634abd1ca90bf016b92845b12b05de | 9,803 | py | Python | dbservice/settings/base.py | aagaard/dbservice | 47daadab307e6744ef151dd4e0aacff27dcda881 | [
"MIT"
] | 1 | 2020-04-27T16:30:50.000Z | 2020-04-27T16:30:50.000Z | dbservice/settings/base.py | aagaard/dbservice | 47daadab307e6744ef151dd4e0aacff27dcda881 | [
"MIT"
] | null | null | null | dbservice/settings/base.py | aagaard/dbservice | 47daadab307e6744ef151dd4e0aacff27dcda881 | [
"MIT"
] | 1 | 2021-01-13T02:16:56.000Z | 2021-01-13T02:16:56.000Z | import json
import os
import sys
from django.conf.global_settings import * # noqa
from django.core.exceptions import ImproperlyConfigured
from unipath import Path
import dbservice as project_module
"""Base settings shared by all environments"""
# =============================================================================
# Generic Django project settings
# =============================================================================
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'UTC'
USE_TZ = False
USE_I18N = False
USE_L10N = False
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
# based on Django REST Framework interpretation of ISO 8601
DATE_INPUT_FORMATS = (
'%Y', # '2006'
'%Y-%m', # '2006-10'
'%Y-%m-%d', # '2006-10-25'
)
TIME_INPUT_FORMATS = (
'%H:%M', # '14:30'
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
)
DATETIME_INPUT_FORMATS = (
'%Y%m%dT%H:%M:%S.%f', # '20061025T14:30:59.000200'
'%Y%m%dT%H:%M:%S', # '20061025T14:30:59'
'%Y%m%dT%H:%M', # '20061025T14:30'
'%Y%m%dT%H', # '20061025T14'
'%Y%m%d', # '20061025'
'%Y-%m-%dT%H:%M:%S.%f', # '2006-10-25T14:30:59.00200'
'%Y-%m-%dT%H:%M:%S', # '2006-10-25T14:30:59'
'%Y-%m-%dT%H:%M', # '2006-10-25T14:30'
'%Y-%m-%dT%H', # '2006-10-25T14'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%YT%H:%M:%S.%f', # '10/25/2006T14:30:59.000200'
'%m/%d/%YT%H:%M:%S', # '10/25/2006T14:30:59'
'%m/%d/%YT%H:%M', # '10/25/2006T14:30'
'%m/%d/%YT%H', # '10/25/2006T14'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%yT%H:%M:%S.%f', # '10/25/06T14:30:59.000200'
'%m/%d/%yT%H:%M:%S', # '10/25/06T14:30:59'
'%m/%d/%yT%H:%M', # '10/25/06T14:30'
'%m/%d/%yT%H', # '10/25/06T14'
'%m/%d/%y', # '10/25/06'
)
INSTALLED_APPS = (
'dbservice.apps.users',
'dbservice.apps.private',
'dbservice.apps.homes',
'rest_framework',
'rest_framework_jwt',
'django_filters',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'django.contrib.admindocs',
)
# =============================================================================
# Calculation of directories relative to the project module location
# =============================================================================
PROJECT_DIR = os.path.dirname(os.path.realpath(project_module.__file__))
LOGS_DIR = os.path.join(PROJECT_DIR, os.pardir, 'logs')
PYTHON_BIN = os.path.dirname(sys.executable)
VE_PATH = os.path.dirname(os.path.dirname(os.path.dirname(PROJECT_DIR)))
# Assume that the presence of 'activate_this.py' in the python bin/
# directory means that we're running in a virtual environment.
if os.path.exists(os.path.join(PYTHON_BIN, 'activate_this.py')):
# We're running with a virtualenv python executable.
VAR_ROOT = os.path.join(os.path.dirname(PYTHON_BIN), 'var')
elif VE_PATH and os.path.exists(os.path.join(VE_PATH, 'bin',
'activate_this.py')):
# We're running in [virtualenv_root]/src/[project_name].
VAR_ROOT = os.path.join(VE_PATH, 'var')
else:
# Set the variable root to a path in the project which is
# ignored by the repository.
VAR_ROOT = os.path.join(PROJECT_DIR, 'var')
if not os.path.exists(VAR_ROOT):
os.mkdir(VAR_ROOT)
if not os.path.exists(LOGS_DIR):
os.mkdir(LOGS_DIR)
# =============================================================================
# Logging
# =============================================================================
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
'simple_sql': {
'format': '[%(asctime)s] duration(sec): %(duration).6f|sql: %(sql)s|params: %(params)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(LOGS_DIR, 'general_debug.log'),
'formatter': 'verbose'
},
'file_database': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'simple_sql',
'filename': os.path.join(LOGS_DIR, 'debug_database.log'),
'level': 'DEBUG',
'maxBytes': 1024 * 1000 * 10,
'backupCount': 3
},
'email_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
'formatter': 'verbose'
},
},
'loggers': {
'django.request': {
'handlers': ['email_admins'],
'level': 'ERROR',
'propogate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'email_admins', 'file'],
'propagate': True,
},
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['file_database'],
'propagate': False,
}
}
}
import logging.config
logging.config.dictConfig(LOGGING)
# =============================================================================
# Project URLS and media settings
# =============================================================================
ROOT_URLCONF = 'dbservice.urls'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.path.join(VAR_ROOT, 'static')
MEDIA_ROOT = os.path.join(VAR_ROOT, 'uploads')
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
# =============================================================================
# Templates
# =============================================================================
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
# =============================================================================
# Middleware
# =============================================================================
MIDDLEWARE_CLASSES += (
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
# =============================================================================
# Auth / security
# =============================================================================
AUTHENTICATION_BACKENDS += (
)
# =============================================================================
# Miscellaneous project settings
# =============================================================================
AUTH_USER_MODEL = 'users.User'
# =============================================================================
# Third party app settings
# =============================================================================
REST_FRAMEWORK = {
'FILTER_BACKEND': 'rest_framework.filters.DjangoFilterBackend',
'PAGINATE_BY': 20,
'PAGINATE_BY_PARAM': 'page_size',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.UserRateThrottle',
'rest_framework.throttling.ScopedRateThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'measurements': '100/minute',
'user': '10000/minute',
}
}
JWT_AUTH = {
# To simplify things we turn off token expiration. We can turn this on and
# write token refresh mechanisms later.
'JWT_VERIFY_EXPIRATION': False,
}
# SECRETS ##############################
_secrets = None
_secrets_filename = Path('~', 'dbservice.json').expand()
| 31.931596 | 99 | 0.504437 | import json
import os
import sys
from django.conf.global_settings import * # noqa
from django.core.exceptions import ImproperlyConfigured
from unipath import Path
import dbservice as project_module
"""Base settings shared by all environments"""
# =============================================================================
# Generic Django project settings
# =============================================================================
DEBUG = True
TEMPLATE_DEBUG = DEBUG
# SITE_ID = 1
# Local time zone for this installation. Choices can be found here:
# http://en.wikipedia.org/wiki/List_of_tz_zones_by_name
TIME_ZONE = 'UTC'
USE_TZ = False
USE_I18N = False
USE_L10N = False
LANGUAGE_CODE = 'en'
LANGUAGES = (
('en', 'English'),
)
# based on Django REST Framework interpretation of ISO 8601
DATE_INPUT_FORMATS = (
'%Y', # '2006'
'%Y-%m', # '2006-10'
'%Y-%m-%d', # '2006-10-25'
)
TIME_INPUT_FORMATS = (
'%H:%M', # '14:30'
'%H:%M:%S', # '14:30:59'
'%H:%M:%S.%f', # '14:30:59.000200'
)
DATETIME_INPUT_FORMATS = (
'%Y%m%dT%H:%M:%S.%f', # '20061025T14:30:59.000200'
'%Y%m%dT%H:%M:%S', # '20061025T14:30:59'
'%Y%m%dT%H:%M', # '20061025T14:30'
'%Y%m%dT%H', # '20061025T14'
'%Y%m%d', # '20061025'
'%Y-%m-%dT%H:%M:%S.%f', # '2006-10-25T14:30:59.00200'
'%Y-%m-%dT%H:%M:%S', # '2006-10-25T14:30:59'
'%Y-%m-%dT%H:%M', # '2006-10-25T14:30'
'%Y-%m-%dT%H', # '2006-10-25T14'
'%Y-%m-%d', # '2006-10-25'
'%m/%d/%YT%H:%M:%S.%f', # '10/25/2006T14:30:59.000200'
'%m/%d/%YT%H:%M:%S', # '10/25/2006T14:30:59'
'%m/%d/%YT%H:%M', # '10/25/2006T14:30'
'%m/%d/%YT%H', # '10/25/2006T14'
'%m/%d/%Y', # '10/25/2006'
'%m/%d/%yT%H:%M:%S.%f', # '10/25/06T14:30:59.000200'
'%m/%d/%yT%H:%M:%S', # '10/25/06T14:30:59'
'%m/%d/%yT%H:%M', # '10/25/06T14:30'
'%m/%d/%yT%H', # '10/25/06T14'
'%m/%d/%y', # '10/25/06'
)
INSTALLED_APPS = (
'dbservice.apps.users',
'dbservice.apps.private',
'dbservice.apps.homes',
'rest_framework',
'rest_framework_jwt',
'django_filters',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
# 'django.contrib.sites',
'django.contrib.messages',
'django.contrib.staticfiles',
'django.contrib.admin',
# 'django.contrib.admindocs',
)
# =============================================================================
# Calculation of directories relative to the project module location
# =============================================================================
PROJECT_DIR = os.path.dirname(os.path.realpath(project_module.__file__))
LOGS_DIR = os.path.join(PROJECT_DIR, os.pardir, 'logs')
PYTHON_BIN = os.path.dirname(sys.executable)
VE_PATH = os.path.dirname(os.path.dirname(os.path.dirname(PROJECT_DIR)))
# Assume that the presence of 'activate_this.py' in the python bin/
# directory means that we're running in a virtual environment.
if os.path.exists(os.path.join(PYTHON_BIN, 'activate_this.py')):
# We're running with a virtualenv python executable.
VAR_ROOT = os.path.join(os.path.dirname(PYTHON_BIN), 'var')
elif VE_PATH and os.path.exists(os.path.join(VE_PATH, 'bin',
'activate_this.py')):
# We're running in [virtualenv_root]/src/[project_name].
VAR_ROOT = os.path.join(VE_PATH, 'var')
else:
# Set the variable root to a path in the project which is
# ignored by the repository.
VAR_ROOT = os.path.join(PROJECT_DIR, 'var')
if not os.path.exists(VAR_ROOT):
os.mkdir(VAR_ROOT)
if not os.path.exists(LOGS_DIR):
os.mkdir(LOGS_DIR)
# =============================================================================
# Logging
# =============================================================================
LOGGING_CONFIG = None
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'filters': {
'require_debug_false': {
'()': 'django.utils.log.RequireDebugFalse'
}
},
'formatters': {
'verbose': {
'format': "[%(asctime)s] %(levelname)s [%(name)s:%(lineno)s] %(message)s",
'datefmt': "%Y-%m-%d %H:%M:%S"
},
'simple': {
'format': '%(levelname)s %(message)s'
},
'simple_sql': {
'format': '[%(asctime)s] duration(sec): %(duration).6f|sql: %(sql)s|params: %(params)s'
},
},
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
'formatter': 'verbose',
},
'file': {
'level': 'DEBUG',
'class': 'logging.FileHandler',
'filename': os.path.join(LOGS_DIR, 'general_debug.log'),
'formatter': 'verbose'
},
'file_database': {
'class': 'logging.handlers.RotatingFileHandler',
'formatter': 'simple_sql',
'filename': os.path.join(LOGS_DIR, 'debug_database.log'),
'level': 'DEBUG',
'maxBytes': 1024 * 1000 * 10,
'backupCount': 3
},
'email_admins': {
'level': 'ERROR',
'filters': ['require_debug_false'],
'class': 'django.utils.log.AdminEmailHandler',
'include_html': True,
'formatter': 'verbose'
},
},
'loggers': {
'django.request': {
'handlers': ['email_admins'],
'level': 'ERROR',
'propogate': True
},
'django.security.DisallowedHost': {
'level': 'ERROR',
'handlers': ['console', 'email_admins', 'file'],
'propagate': True,
},
'django.db.backends': {
'level': 'DEBUG',
'handlers': ['file_database'],
'propagate': False,
}
}
}
import logging.config
logging.config.dictConfig(LOGGING)
# =============================================================================
# Project URLS and media settings
# =============================================================================
ROOT_URLCONF = 'dbservice.urls'
LOGIN_URL = '/login/'
LOGOUT_URL = '/logout/'
LOGIN_REDIRECT_URL = '/'
STATIC_URL = '/static/'
MEDIA_URL = '/uploads/'
STATIC_ROOT = os.path.join(VAR_ROOT, 'static')
MEDIA_ROOT = os.path.join(VAR_ROOT, 'uploads')
STATICFILES_DIRS = (
os.path.join(PROJECT_DIR, 'static'),
)
# =============================================================================
# Templates
# =============================================================================
TEMPLATE_DIRS = (
os.path.join(PROJECT_DIR, 'templates'),
)
TEMPLATE_CONTEXT_PROCESSORS += (
'django.core.context_processors.request',
)
# =============================================================================
# Middleware
# =============================================================================
MIDDLEWARE_CLASSES += (
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
)
# =============================================================================
# Auth / security
# =============================================================================
AUTHENTICATION_BACKENDS += (
)
# =============================================================================
# Miscellaneous project settings
# =============================================================================
AUTH_USER_MODEL = 'users.User'
# =============================================================================
# Third party app settings
# =============================================================================
REST_FRAMEWORK = {
'FILTER_BACKEND': 'rest_framework.filters.DjangoFilterBackend',
'PAGINATE_BY': 20,
'PAGINATE_BY_PARAM': 'page_size',
'DEFAULT_AUTHENTICATION_CLASSES': (
'rest_framework.authentication.SessionAuthentication',
'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
),
'DEFAULT_PERMISSION_CLASSES': (
'rest_framework.permissions.IsAuthenticated',
),
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.UserRateThrottle',
'rest_framework.throttling.ScopedRateThrottle',
),
'DEFAULT_THROTTLE_RATES': {
'measurements': '100/minute',
'user': '10000/minute',
}
}
JWT_AUTH = {
# To simplify things we turn off token expiration. We can turn this on and
# write token refresh mechanisms later.
'JWT_VERIFY_EXPIRATION': False,
}
# SECRETS ##############################
_secrets = None
_secrets_filename = Path('~', 'dbservice.json').expand()
def _load_secrets():
global _secrets
if os.path.exists(_secrets_filename):
try:
with open(_secrets_filename) as f:
_secrets = json.load(f)
except ValueError:
error_msg = 'Failed to parse JSON in {0}.'.format(
_secrets_filename)
raise ImproperlyConfigured(error_msg)
else:
error_msg = 'Missing secrets configuration file {0}.'.format(
_secrets_filename)
raise ImproperlyConfigured(error_msg)
def get_secret(setting):
if _secrets is None:
_load_secrets()
try:
return _secrets[setting]
except KeyError:
error_msg = 'Set {0} in secrets configuration file {0}.'.format(
setting, _secrets_filename)
raise ImproperlyConfigured(error_msg)
| 767 | 0 | 46 |
5fd11a23880c15d1d662e913d958ca72bd0a6c2f | 5,568 | py | Python | Chapter04/Activity4.02/Activity4.02_Unit_test.py | PacktWorkshops/Applied-Deep-Learning-with-Keras | d1372a6109e2ee9434ae47df59440577566badaa | [
"MIT"
] | 1 | 2021-02-16T13:28:41.000Z | 2021-02-16T13:28:41.000Z | Chapter04/Activity4.02/Activity4.02_Unit_test.py | olavomendes/The-Deep-Learning-with-Keras-Workshop | d1372a6109e2ee9434ae47df59440577566badaa | [
"MIT"
] | 6 | 2021-04-30T21:35:53.000Z | 2022-02-10T02:55:31.000Z | Chapter04/Activity4.02/Activity4.02_Unit_test.py | olavomendes/The-Deep-Learning-with-Keras-Workshop | d1372a6109e2ee9434ae47df59440577566badaa | [
"MIT"
] | 2 | 2020-01-03T10:22:19.000Z | 2020-01-13T06:30:33.000Z | import unittest
import numpy as np
import pandas as pd
import numpy.testing as np_testing
import pandas.testing as pd_testing
import os
import import_ipynb
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Activation
from tensorflow import random
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.preprocessing import StandardScaler
if __name__ == '__main__':
unittest.main()
| 40.642336 | 135 | 0.668463 | import unittest
import numpy as np
import pandas as pd
import numpy.testing as np_testing
import pandas.testing as pd_testing
import os
import import_ipynb
from sklearn.model_selection import train_test_split
from keras.models import Sequential
from keras.layers import Dense, Activation
from tensorflow import random
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.preprocessing import StandardScaler
def build_model_1(activation='relu', optimizer='adam'):
model = Sequential()
model.add(Dense(4, input_dim=28, activation=activation))
model.add(Dense(4, activation=activation))
model.add(Dense(4, activation=activation))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
def build_model_2(activation='relu', optimizer='adam'):
model = Sequential()
model.add(Dense(4, input_dim=28, activation=activation))
model.add(Dense(2, activation=activation))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
def build_model_3(activation='relu', optimizer='adam'):
model = Sequential()
model.add(Dense(8, input_dim=28, activation=activation))
model.add(Dense(8, activation=activation))
model.add(Dense(1, activation='sigmoid'))
model.compile(loss='binary_crossentropy', optimizer=optimizer, metrics=['accuracy'])
return model
class Test(unittest.TestCase):
def _dirname_if_file(self, filename):
if os.path.isdir(filename):
return filename
else:
return os.path.dirname(os.path.abspath(filename))
def setUp(self):
import Activity4_02
self.activity = Activity4_02
dirname = self._dirname_if_file('../data/HCV_feats.csv')
self.X_loc = os.path.join(dirname, 'HCV_feats.csv')
self.y_loc = os.path.join(dirname, 'HCV_target.csv')
self.X = pd.read_csv(self.X_loc)
self.y = pd.read_csv(self.y_loc)
sc = StandardScaler()
self.X = pd.DataFrame(sc.fit_transform(self.X), columns=self.X.columns)
self.seed = 2
self.n_folds = 5
def test_input_frames(self):
pd_testing.assert_frame_equal(self.activity.X, self.X)
pd_testing.assert_frame_equal(self.activity.y, self.y)
def test_model_iter(self):
np.random.seed(self.seed)
random.set_seed(self.seed)
batch_size=20
epochs=50
self.results_1 =[]
models = [build_model_1, build_model_2, build_model_3]
for m in range(len(models)):
classifier = KerasClassifier(build_fn=models[m], epochs=epochs, batch_size=batch_size, verbose=0, shuffle=False)
kfold = StratifiedKFold(n_splits=self.n_folds, shuffle=True, random_state=self.seed)
result = cross_val_score(classifier, self.X, self.y, cv=kfold)
self.results_1.append(result)
np_testing.assert_array_almost_equal(
np.array(self.results_1).mean(axis=1), np.array(self.activity.results_1).mean(axis=1), decimal=1)
np_testing.assert_array_almost_equal(
np.array(self.results_1).std(axis=1), np.array(self.activity.results_1).std(axis=1), decimal=1)
def test_batch_epoch_iter(self):
np.random.seed(self.seed)
random.set_seed(self.seed)
n_folds = 5
epochs = [100, 200]
batches = [10, 20]
self.results_2 =[]
for e in range(len(epochs)):
for b in range(len(batches)):
classifier = KerasClassifier(build_fn=build_model_2, epochs=epochs[e], batch_size=batches[b], verbose=0, shuffle=False)
kfold = StratifiedKFold(n_splits=self.n_folds, shuffle=True, random_state=self.seed)
result = cross_val_score(classifier, self.X, self.y, cv=kfold)
self.results_2.append(result)
np_testing.assert_array_almost_equal(
np.array(self.results_2).mean(axis=1), np.array(self.activity.results_2).mean(axis=1), decimal=1)
np_testing.assert_array_almost_equal(
np.array(self.results_2).std(axis=1), np.array(self.activity.results_2).std(axis=1), decimal=1)
def test_opt_act_iter(self):
np.random.seed(self.seed)
random.set_seed(self.seed)
n_folds = 5
batch_size=20
epochs=100
self.results_3 =[]
optimizers = ['rmsprop', 'adam','sgd']
activations = ['relu', 'tanh']
for o in range(len(optimizers)):
for a in range(len(activations)):
optimizer = optimizers[o]
activation = activations[a]
classifier = KerasClassifier(build_fn=build_model_2, epochs=epochs, batch_size=batch_size, verbose=0, shuffle=False)
kfold = StratifiedKFold(n_splits=self.n_folds, shuffle=True, random_state=self.seed)
result = cross_val_score(classifier, self.X, self.y, cv=kfold)
self.results_3.append(result)
np_testing.assert_array_almost_equal(
np.array(self.results_3).mean(axis=1), np.array(self.activity.results_3).mean(axis=1), decimal=1)
np_testing.assert_array_almost_equal(
np.array(self.results_3).std(axis=1), np.array(self.activity.results_3).std(axis=1), decimal=1)
if __name__ == '__main__':
unittest.main()
| 4,750 | 9 | 266 |
71beae10da7209eb415ac2446f2c563e76e25076 | 3,832 | py | Python | templates/sentry.conf.py | jasonrogena/ansible-sentry | 7248a19f66109bcdb6a7c693ae0c781771894ffd | [
"MIT"
] | 1 | 2019-11-14T15:18:45.000Z | 2019-11-14T15:18:45.000Z | templates/sentry.conf.py | jasonrogena/ansible-sentry | 7248a19f66109bcdb6a7c693ae0c781771894ffd | [
"MIT"
] | 2 | 2019-09-27T08:49:39.000Z | 2021-03-23T17:09:13.000Z | templates/sentry.conf.py | jasonrogena/ansible-sentry | 7248a19f66109bcdb6a7c693ae0c781771894ffd | [
"MIT"
] | 3 | 2019-10-04T12:12:40.000Z | 2021-01-28T16:33:32.000Z | # This file is just Python, with a touch of Django which means
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import *
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'sentry.db.postgres',
'NAME': '{{ sentry_db_name }}',
'USER': '{{ sentry_db_user }}',
'PASSWORD': '{{ sentry_db_password }}',
'HOST': '{{ sentry_db_host }}',
'PORT': '{{ sentry_db_port }}',
'AUTOCOMMIT': True,
'ATOMIC_REQUESTS': False,
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = True
DEBUG = False
#########
# Cache #
#########
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
# requirement, it will optimize several high throughput patterns.
# If you wish to use memcached, install the dependencies and adjust the config
# as shown:
#
# pip install python-memcached
#
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': ['127.0.0.1:11211'],
# }
# }
# A primary cache is required for things such as processing events
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
#########
# Queue #
#########
# See https://docs.sentry.io/on-premise/server/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
BROKER_URL = '{{ sentry_broker_url }}'
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
###########
# Digests #
###########
# The digest backend powers notification summaries.
SENTRY_DIGESTS = 'sentry.digests.backends.redis.RedisBackend'
##############
# Web Server #
##############
# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto
# header and uncomment the following settings
{% if sentry_behind_ssl_proxy %}
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
{% endif %}
# If you're not hosting at the root of your web server,
# you need to uncomment and set it to the path where Sentry is hosted.
# FORCE_SCRIPT_NAME = '/sentry'
SENTRY_WEB_HOST = '{{ sentry_web_host }}'
SENTRY_WEB_PORT = {{ sentry_web_port }}
SENTRY_WEB_OPTIONS = {
# 'workers': 3, # the number of web workers
# 'protocol': 'uwsgi', # Enable uwsgi protocol instead of http
}
SENTRY_FEATURES["auth:register"] = {{ sentry_auth_register }}
{{ sentry_extra_conf_py }}
| 27.177305 | 80 | 0.683455 | # This file is just Python, with a touch of Django which means
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import *
import os.path
CONF_ROOT = os.path.dirname(__file__)
DATABASES = {
'default': {
'ENGINE': 'sentry.db.postgres',
'NAME': '{{ sentry_db_name }}',
'USER': '{{ sentry_db_user }}',
'PASSWORD': '{{ sentry_db_password }}',
'HOST': '{{ sentry_db_host }}',
'PORT': '{{ sentry_db_port }}',
'AUTOCOMMIT': True,
'ATOMIC_REQUESTS': False,
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = True
DEBUG = False
#########
# Cache #
#########
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
# requirement, it will optimize several high throughput patterns.
# If you wish to use memcached, install the dependencies and adjust the config
# as shown:
#
# pip install python-memcached
#
# CACHES = {
# 'default': {
# 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
# 'LOCATION': ['127.0.0.1:11211'],
# }
# }
# A primary cache is required for things such as processing events
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
#########
# Queue #
#########
# See https://docs.sentry.io/on-premise/server/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
BROKER_URL = '{{ sentry_broker_url }}'
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
###########
# Digests #
###########
# The digest backend powers notification summaries.
SENTRY_DIGESTS = 'sentry.digests.backends.redis.RedisBackend'
##############
# Web Server #
##############
# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto
# header and uncomment the following settings
{% if sentry_behind_ssl_proxy %}
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
{% endif %}
# If you're not hosting at the root of your web server,
# you need to uncomment and set it to the path where Sentry is hosted.
# FORCE_SCRIPT_NAME = '/sentry'
SENTRY_WEB_HOST = '{{ sentry_web_host }}'
SENTRY_WEB_PORT = {{ sentry_web_port }}
SENTRY_WEB_OPTIONS = {
# 'workers': 3, # the number of web workers
# 'protocol': 'uwsgi', # Enable uwsgi protocol instead of http
}
SENTRY_FEATURES["auth:register"] = {{ sentry_auth_register }}
{{ sentry_extra_conf_py }}
| 0 | 0 | 0 |
1719680479b4b558551123effe8d23c6a8b5ce10 | 3,402 | py | Python | markov.py | mbichoffe/markov_chains_twitter | 1a206e20061ec71505e4a28f1a2a96d999d99a2e | [
"MIT",
"Unlicense"
] | null | null | null | markov.py | mbichoffe/markov_chains_twitter | 1a206e20061ec71505e4a28f1a2a96d999d99a2e | [
"MIT",
"Unlicense"
] | 1 | 2021-02-08T20:21:10.000Z | 2021-02-08T20:21:10.000Z | markov.py | mbichoffe/markov_chains_twitter | 1a206e20061ec71505e4a28f1a2a96d999d99a2e | [
"MIT",
"Unlicense"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import sys
from text_stoppers import *
from text_validators import *
from random import choice
class MarkovGenerator(object):
"""A Markov chain text generator"""
def open_and_read_file(self, filename):
"""Take file(s) as tuples; return text as string.
Takes a string that is a file path, opens the file,
and turns the files' contents as one string of text.
"""
f = codecs.open(filename, encoding='utf-8')
text = f.read()
f.close()
self.make_chains(text)
def make_chains(self, text_string, n=2):
"""Take input text as string; return dictionary of Markov chains.
A chain will be a key that consists of a tuple of (word1, word2)
and the value would be a list of the word(s) that follow those two
words in the input text.
n is an integer indicating the number of items used to generate the n-grams.
It is usually 2 or 3. If no number is specified, a bigram will be generated.
For example:
chains = make_chains("hi there mary hi there juanita")
Each bigram (except the last) will be a key in chains:
sorted(chains.keys())
[('hi', 'there'), ('mary', 'hi'), ('there', 'mary')]
Each item in chains is a list of all possible following words:
chains[('hi', 'there')]
['mary', 'juanita']
chains[('there','juanita')]
[None]
"""
self.chains = {}
words = text_string.split()
for i in range(len(words)-n):
ngram = (words[i], words[i+1])
next_word = words[i+n]
self.chains.setdefault(ngram, [])
self.chains[ngram].append(next_word)
def make_text(self):
"""Take dictionary of markov chains; returns random text."""
words = []
are_valid_words = False
char_limit = 280
while not are_valid_words:
link = choice(self.chains.keys())
word1 = link[0]
word2 = link[1]
print 'Checking words: ', word1, word2
# Is the first word an acceptable POS? Are the words valid?
are_valid_words = all([is_valid_p_o_s(word1), is_valid_word(word2),
is_valid_word(word1)])
words += word1.capitalize(), word2
while link in self.chains:
# Keep looping until we have a key that isn't in the chains
# Or until we reach one of the text stopper conditions
# Or we reach the 280 chars limit
# If picked word is invalid, choose a new one
next_word = choice(self.chains[link])
if is_valid_word(next_word):
words.append(next_word)
# Should we stop here?
if stop_text(next_word, words):
break
link = (link[1], next_word)#create new ngram
return " ".join(words)
if __name__ == "__main__":
main()
| 29.842105 | 84 | 0.574956 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import codecs
import sys
from text_stoppers import *
from text_validators import *
from random import choice
class MarkovGenerator(object):
"""A Markov chain text generator"""
def open_and_read_file(self, filename):
"""Take file(s) as tuples; return text as string.
Takes a string that is a file path, opens the file,
and turns the files' contents as one string of text.
"""
f = codecs.open(filename, encoding='utf-8')
text = f.read()
f.close()
self.make_chains(text)
def make_chains(self, text_string, n=2):
"""Take input text as string; return dictionary of Markov chains.
A chain will be a key that consists of a tuple of (word1, word2)
and the value would be a list of the word(s) that follow those two
words in the input text.
n is an integer indicating the number of items used to generate the n-grams.
It is usually 2 or 3. If no number is specified, a bigram will be generated.
For example:
chains = make_chains("hi there mary hi there juanita")
Each bigram (except the last) will be a key in chains:
sorted(chains.keys())
[('hi', 'there'), ('mary', 'hi'), ('there', 'mary')]
Each item in chains is a list of all possible following words:
chains[('hi', 'there')]
['mary', 'juanita']
chains[('there','juanita')]
[None]
"""
self.chains = {}
words = text_string.split()
for i in range(len(words)-n):
ngram = (words[i], words[i+1])
next_word = words[i+n]
self.chains.setdefault(ngram, [])
self.chains[ngram].append(next_word)
def make_text(self):
"""Take dictionary of markov chains; returns random text."""
words = []
are_valid_words = False
char_limit = 280
while not are_valid_words:
link = choice(self.chains.keys())
word1 = link[0]
word2 = link[1]
print 'Checking words: ', word1, word2
# Is the first word an acceptable POS? Are the words valid?
are_valid_words = all([is_valid_p_o_s(word1), is_valid_word(word2),
is_valid_word(word1)])
words += word1.capitalize(), word2
while link in self.chains:
# Keep looping until we have a key that isn't in the chains
# Or until we reach one of the text stopper conditions
# Or we reach the 280 chars limit
# If picked word is invalid, choose a new one
next_word = choice(self.chains[link])
if is_valid_word(next_word):
words.append(next_word)
# Should we stop here?
if stop_text(next_word, words):
break
link = (link[1], next_word)#create new ngram
return " ".join(words)
def main(): # debugging
args = sys.argv[1:]
if not args:
print "usage: textfile.txt [textfile2.txt...]"
sys.exit(1)
print "\n\n\nMarkov Generator"
generator = MarkovGenerator()
generator.open_and_read_file(args[0])
for i in range(5):
print generator.make_text()
print
if __name__ == "__main__":
main()
| 305 | 0 | 23 |
cde00c76f75eb8efb107ab70115d81a15e0bb8b3 | 3,238 | py | Python | tor_ocr/core/config.py | GrafeasGroup/tor_ocr | e20631ab69a7977d6f470112ff719833f80d0f58 | [
"MIT"
] | 9 | 2017-11-25T15:18:12.000Z | 2022-01-04T09:49:20.000Z | tor_ocr/core/config.py | GrafeasGroup/tor_ocr | e20631ab69a7977d6f470112ff719833f80d0f58 | [
"MIT"
] | 22 | 2018-01-16T11:31:20.000Z | 2022-03-15T12:19:13.000Z | tor_ocr/core/config.py | GrafeasGroup/tor_ocr | e20631ab69a7977d6f470112ff719833f80d0f58 | [
"MIT"
] | 4 | 2017-12-01T13:39:14.000Z | 2022-02-04T23:45:03.000Z | from typing import Any, Callable, Dict
import datetime
import os
from blossom_wrapper import BlossomAPI
from praw import Reddit
# Load configuration regardless of if bugsnag is setup correctly
try:
import bugsnag
except ImportError:
# If loading from setup.py or bugsnag isn't installed, we
# don't want to bomb out completely
bugsnag = None
import pkg_resources
_missing = object()
# @see https://stackoverflow.com/a/17487613/1236035
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is no
# entry with the same name in the instance's __dict__. this allows
# us to completely get rid of the access function call overhead. If
# one choses to invoke __get__ by hand the property will still work
# as expected because the lookup logic is replicated in __get__ for
# manual invocation.
class Config(object):
"""
A singleton object for checking global configuration from
anywhere in the application
"""
# API key for later overwriting based on contents of filesystem
bugsnag_api_key: str = None
debug_mode: bool = False
# Global flag to enable/disable placing the triggers
# for the OCR bot
OCR: bool = True
ocr_delay: int = None
# Name of the bot
name: str = None
bot_version: str = '0.0.0' # this should get overwritten by the bot process
blossom: BlossomAPI = None
r: Reddit = None
me: Dict = None # blossom object of transcribot
last_post_scan_time: datetime.datetime = datetime.datetime(1970, 1, 1, 1, 1, 1)
@cached_property
try:
Config.bugsnag_api_key = open('bugsnag.key').readline().strip()
except OSError:
Config.bugsnag_api_key = os.environ.get('BUGSNAG_API_KEY', None)
if bugsnag and Config.bugsnag_api_key:
bugsnag.configure(
api_key=Config.bugsnag_api_key,
app_version=pkg_resources.get_distribution('tor_ocr').version
)
# ----- Compatibility -----
config = Config()
| 29.706422 | 83 | 0.675108 | from typing import Any, Callable, Dict
import datetime
import os
from blossom_wrapper import BlossomAPI
from praw import Reddit
# Load configuration regardless of if bugsnag is setup correctly
try:
import bugsnag
except ImportError:
# If loading from setup.py or bugsnag isn't installed, we
# don't want to bomb out completely
bugsnag = None
import pkg_resources
_missing = object()
# @see https://stackoverflow.com/a/17487613/1236035
class cached_property(object):
"""A decorator that converts a function into a lazy property. The
function wrapped is called the first time to retrieve the result
and then that calculated result is used the next time you access
the value::
class Foo(object):
@cached_property
def foo(self):
# calculate something important here
return 42
The class has to have a `__dict__` in order for this property to
work.
"""
# implementation detail: this property is implemented as non-data
# descriptor. non-data descriptors are only invoked if there is no
# entry with the same name in the instance's __dict__. this allows
# us to completely get rid of the access function call overhead. If
# one choses to invoke __get__ by hand the property will still work
# as expected because the lookup logic is replicated in __get__ for
# manual invocation.
def __init__(self, func: Callable, name: str=None, doc: str=None) -> None:
self.__name__ = name or func.__name__
self.__module__ = func.__module__
self.__doc__ = doc or func.__doc__
self.func = func
def __get__(self, obj: Any, _type: Any=None) -> Any:
if obj is None:
return self
value = obj.__dict__.get(self.__name__, _missing)
if value is _missing:
value = self.func(obj)
obj.__dict__[self.__name__] = value
return value
class Config(object):
"""
A singleton object for checking global configuration from
anywhere in the application
"""
# API key for later overwriting based on contents of filesystem
bugsnag_api_key: str = None
debug_mode: bool = False
# Global flag to enable/disable placing the triggers
# for the OCR bot
OCR: bool = True
ocr_delay: int = None
# Name of the bot
name: str = None
bot_version: str = '0.0.0' # this should get overwritten by the bot process
blossom: BlossomAPI = None
r: Reddit = None
me: Dict = None # blossom object of transcribot
last_post_scan_time: datetime.datetime = datetime.datetime(1970, 1, 1, 1, 1, 1)
@cached_property
def tor(self):
if self.debug_mode:
return self.r.subreddit('ModsOfTor')
else:
return self.r.subreddit('transcribersofreddit')
try:
Config.bugsnag_api_key = open('bugsnag.key').readline().strip()
except OSError:
Config.bugsnag_api_key = os.environ.get('BUGSNAG_API_KEY', None)
if bugsnag and Config.bugsnag_api_key:
bugsnag.configure(
api_key=Config.bugsnag_api_key,
app_version=pkg_resources.get_distribution('tor_ocr').version
)
# ----- Compatibility -----
config = Config()
| 624 | 0 | 80 |
d97c3c948b0111a84042fd8c4b136d61148ac89c | 1,260 | py | Python | setup.py | tpyo/pipsy | 2b9abaa5e53f12421f6e890380552551f582b5dc | [
"MIT"
] | null | null | null | setup.py | tpyo/pipsy | 2b9abaa5e53f12421f6e890380552551f582b5dc | [
"MIT"
] | null | null | null | setup.py | tpyo/pipsy | 2b9abaa5e53f12421f6e890380552551f582b5dc | [
"MIT"
] | 1 | 2019-12-12T15:26:31.000Z | 2019-12-12T15:26:31.000Z | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='pipsy',
version="0.1.2",
description='Shows updates for installed packages',
long_description='Shows available updates and security notices for installed packages',
author='Donovan Schönknecht',
author_email='don@tpyo.net',
url='https://github.com/tpyo/pipsy',
packages=['pipsy'],
license='MIT',
include_package_data=True,
install_requires=[
'pip>=9.0.1',
'changelogs>=0.9.0',
'packaging>=16.8',
],
entry_points={
"console_scripts": [
"pipsy=pipsy:main",
],
},
extras_require={
'testing': ['pytest', 'mock'],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Build Tools",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: PyPy"
],
)
| 30 | 91 | 0.585714 | # -*- coding: utf-8 -*-
from setuptools import setup, find_packages
setup(
name='pipsy',
version="0.1.2",
description='Shows updates for installed packages',
long_description='Shows available updates and security notices for installed packages',
author='Donovan Schönknecht',
author_email='don@tpyo.net',
url='https://github.com/tpyo/pipsy',
packages=['pipsy'],
license='MIT',
include_package_data=True,
install_requires=[
'pip>=9.0.1',
'changelogs>=0.9.0',
'packaging>=16.8',
],
entry_points={
"console_scripts": [
"pipsy=pipsy:main",
],
},
extras_require={
'testing': ['pytest', 'mock'],
},
classifiers=[
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Developers",
"License :: OSI Approved :: MIT License",
"Topic :: Software Development :: Build Tools",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: Implementation :: PyPy"
],
)
| 0 | 0 | 0 |
358d4ccfa2bc5618f4f6f68e382750cc2bc16f77 | 939 | py | Python | vad/acoustics/transforms/mel_spectrogram.py | zsl24/voice-activity-detection | a034be23c6283121c6b72e778c6ff6711045cbe3 | [
"MIT"
] | 74 | 2021-02-22T17:35:52.000Z | 2022-03-29T03:08:12.000Z | vad/acoustics/transforms/mel_spectrogram.py | zsl24/voice-activity-detection | a034be23c6283121c6b72e778c6ff6711045cbe3 | [
"MIT"
] | 1 | 2021-08-15T07:56:39.000Z | 2021-08-15T07:56:39.000Z | vad/acoustics/transforms/mel_spectrogram.py | zsl24/voice-activity-detection | a034be23c6283121c6b72e778c6ff6711045cbe3 | [
"MIT"
] | 9 | 2021-07-22T16:46:11.000Z | 2022-03-27T13:19:24.000Z | import librosa
import numpy as np
from vad.acoustics.transforms.transform import Transform
from vad.data_models.audio_data import AudioData
| 29.34375 | 77 | 0.660277 | import librosa
import numpy as np
from vad.acoustics.transforms.transform import Transform
from vad.data_models.audio_data import AudioData
class MelSpectrogramTransform(Transform):
feature_size: int
def __init__(self, n_fft: int, hop_ms: int, window_ms: int, n_mels: int):
self.n_fft = n_fft
self.hop_ms = hop_ms
self.window_ms = window_ms
self.n_mels = n_mels
self.feature_size = n_mels
def apply(self, audio_data: AudioData) -> np.array:
hop_samples = int(self.hop_ms / 1000 * audio_data.sample_rate)
window_samples = int(self.window_ms / 1000 * audio_data.sample_rate)
feature = librosa.feature.melspectrogram(
y=audio_data.audio,
sr=audio_data.sample_rate,
n_mels=self.n_mels,
n_fft=self.n_fft,
hop_length=hop_samples,
win_length=window_samples,
)
return feature
| 678 | 96 | 23 |
811a5527f91208edd02db398831978cdf699e4ae | 3,167 | py | Python | machine_learner/machine_learner/models/plLaRegression.py | FedericoQuin/2dv50e | 4db80de187776ca2303bf14a233057447e6f94f1 | [
"MIT"
] | null | null | null | machine_learner/machine_learner/models/plLaRegression.py | FedericoQuin/2dv50e | 4db80de187776ca2303bf14a233057447e6f94f1 | [
"MIT"
] | null | null | null | machine_learner/machine_learner/models/plLaRegression.py | FedericoQuin/2dv50e | 4db80de187776ca2303bf14a233057447e6f94f1 | [
"MIT"
] | 3 | 2019-03-03T16:48:21.000Z | 2021-11-18T09:35:47.000Z | import traceback
import os
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import SGDRegressor
from machine_learner.utils import repository
DIR_PATH = os.path.join('machine_learner', 'trained_models', 'pllaregression')
| 36.825581 | 117 | 0.668772 | import traceback
import os
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from sklearn.linear_model import SGDRegressor
from machine_learner.utils import repository
DIR_PATH = os.path.join('machine_learner', 'trained_models', 'pllaregression')
def training(features, target_pl, target_la, cycle):
if cycle != 1:
model_pl = repository.get(SGDRegressor.__name__ + '_pl', DIR_PATH)
scaler_pl = repository.get(MinMaxScaler.__name__ + '_pl', DIR_PATH)
model_la = repository.get(SGDRegressor.__name__ + '_la', DIR_PATH)
scaler_la = repository.get(MinMaxScaler.__name__ + '_la', DIR_PATH)
else:
model_pl = SGDRegressor()
scaler_pl = MinMaxScaler()
model_la = SGDRegressor()
scaler_la = MinMaxScaler()
scaler_pl.partial_fit(features)
scaler_la.partial_fit(features)
features_pl = scaler_pl.transform(features)
features_la = scaler_la.transform(features)
model_pl.partial_fit(features_pl, target_pl)
model_la.partial_fit(features_la, target_la)
repository.create(model_pl, SGDRegressor.__name__ + '_pl', DIR_PATH)
repository.create(model_la, SGDRegressor.__name__ + '_la', DIR_PATH)
repository.create(scaler_pl, MinMaxScaler.__name__ + '_pl', DIR_PATH)
repository.create(scaler_la, MinMaxScaler.__name__ + '_la', DIR_PATH)
return {'message': 'training successful'}
def testing(features):
try:
model_pl = repository.get(SGDRegressor.__name__ + '_pl', DIR_PATH)
scaler_pl = repository.get(MinMaxScaler.__name__ + '_pl', DIR_PATH)
model_la = repository.get(SGDRegressor.__name__ + '_la', DIR_PATH)
scaler_la = repository.get(MinMaxScaler.__name__ + '_la', DIR_PATH)
features_pl = scaler_pl.transform(features)
features_la = scaler_la.transform(features)
predictions_pl = model_pl.predict(features_pl)
predictions_la = model_la.predict(features_la)
response = {'predictions_pl': [], 'predictions_la': [], 'adaptation_space': 0, 'adaptation_class': 'noGoals'}
bothGoals = 0
oneGoal = 0
for prediction_pl, prediction_la in zip(predictions_pl, predictions_la):
response['predictions_pl'].append(float(prediction_pl))
response['predictions_la'].append(float(prediction_la))
# TODO centralize goals for the python module as well
# (currently only centralized in the SMC config properties file in the java module)
prediction = (1 if prediction_pl < 10 else 0) + (2 if prediction_la < 5 else 0)
if prediction == 3:
bothGoals += 1
elif prediction == 2 or prediction == 1:
oneGoal += 1
if bothGoals == 0:
response['adaptation_space'] = oneGoal
response['adaptation_class'] = 'oneGoal'
else:
response['adaptation_space'] = bothGoals
response['adaptation_class'] = 'twoGoals'
return response
except Exception as e:
traceback.print_tb(e.__traceback__)
return {'message': 'testing failed'}
| 2,851 | 0 | 46 |
08f19e444351c614538acac185696342a390748d | 3,676 | py | Python | migrations/versions/143f81c3bba3_submission_table.py | nycrecords/GPP | 7b7d1a26f6b1cbde051a0a0642407f9aa36e5b2e | [
"Apache-2.0"
] | null | null | null | migrations/versions/143f81c3bba3_submission_table.py | nycrecords/GPP | 7b7d1a26f6b1cbde051a0a0642407f9aa36e5b2e | [
"Apache-2.0"
] | 1 | 2021-03-20T00:32:17.000Z | 2021-03-20T00:32:17.000Z | migrations/versions/143f81c3bba3_submission_table.py | nycrecords/GPP | 7b7d1a26f6b1cbde051a0a0642407f9aa36e5b2e | [
"Apache-2.0"
] | null | null | null | """submission table
Revision ID: 143f81c3bba3
Revises:
Create Date: 2019-02-25 21:25:17.891423
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '143f81c3bba3'
down_revision = None
branch_labels = None
depends_on = None
| 81.688889 | 929 | 0.726333 | """submission table
Revision ID: 143f81c3bba3
Revises:
Create Date: 2019-02-25 21:25:17.891423
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
# revision identifiers, used by Alembic.
revision = '143f81c3bba3'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('submission',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('title', sa.String(length=150), nullable=False),
sa.Column('subtitles', postgresql.ARRAY(sa.String(length=150)), nullable=True),
sa.Column('additional_creators', postgresql.ARRAY(sa.String(length=150)), nullable=True),
sa.Column('subjects', postgresql.ARRAY(sa.String()), nullable=False),
sa.Column('description', sa.String(length=300), nullable=False),
sa.Column('date_published', sa.Date(), nullable=False),
sa.Column('report_type', sa.Enum('ADJUDICATIONS_DECISIONS', 'AUDIT_REPORT', 'BROCHURES', 'BUDGET_FINANCE', 'BULLETINS', 'CALENDARS', 'DATA_STATISTICS', 'DIRECTIVES', 'ENVIRONMENTAL_IMPACT_STATEMENTS_DRAFT', 'ENVIRONMENTAL_IMPACT_STATEMENTS_FINAL', 'EXECUTIVE_ORDERS', 'GUIDES', 'LAWS_LEGISLATION', 'MANUALS_DIRECTORIES', 'MINUTES', 'NEWSLETTERS_OTHER_SERIAL_PUBLICATIONS', 'OTHER', 'PLANS', 'PRESS_RELEASES', 'PROCEEDINGS', 'REPORTS_ANNUAL', 'REPORTS_BIENNIAL', 'REPORTS_MONTHLY', 'REPORTS_WEEKLY', 'REPORTS_OTHER_CONSULTANT_STAFF', 'STUDIES', name='reporttypeenum'), nullable=False),
sa.Column('languages', postgresql.ARRAY(sa.String()), nullable=False),
sa.Column('fiscal_years', postgresql.ARRAY(sa.SmallInteger()), nullable=True),
sa.Column('calendar_years', postgresql.ARRAY(sa.SmallInteger()), nullable=True),
sa.Column('boroughs', postgresql.ARRAY(sa.Enum('BRONX', 'BROOKLYN', 'MANHATTAN', 'QUEENS', 'STATEN_ISLAND', name='boroughenum')), nullable=True),
sa.Column('school_districts', postgresql.ARRAY(sa.Enum('DISTRICT_1', 'DISTRICT_2', 'DISTRICT_3', 'DISTRICT_4', 'DISTRICT_5', 'DISTRICT_6', 'DISTRICT_7', 'DISTRICT_8', 'DISTRICT_9', 'DISTRICT_10', 'DISTRICT_11', 'DISTRICT_12', 'DISTRICT_13', 'DISTRICT_14', 'DISTRICT_15', 'DISTRICT_16', 'DISTRICT_17', 'DISTRICT_18', 'DISTRICT_19', 'DISTRICT_20', 'DISTRICT_21', 'DISTRICT_22', 'DISTRICT_23', 'DISTRICT_24', 'DISTRICT_25', 'DISTRICT_26', 'DISTRICT_27', 'DISTRICT_28', 'DISTRICT_29', 'DISTRICT_30', 'DISTRICT_31', 'DISTRICT_32', 'DISTRICT_75', 'DISTRICT_79', name='schooldistrictenum')), nullable=True),
sa.Column('community_board_districts', postgresql.ARRAY(sa.Enum('BRONX_1', 'BRONX_2', 'BRONX_3', 'BRONX_4', 'BRONX_5', 'BRONX_6', 'BRONX_7', 'BRONX_8', 'BRONX_9', 'BRONX_10', 'BRONX_11', 'BRONX_12', 'BROOKLYN_1', 'BROOKLYN_2', 'BROOKLYN_3', 'BROOKLYN_4', 'BROOKLYN_5', 'BROOKLYN_6', 'BROOKLYN_7', 'BROOKLYN_8', 'BROOKLYN_9', 'BROOKLYN_10', 'BROOKLYN_11', 'BROOKLYN_12', 'BROOKLYN_13', 'BROOKLYN_14', 'BROOKLYN_15', 'BROOKLYN_16', 'BROOKLYN_17', 'BROOKLYN_18', 'MANHATTAN_1', 'MANHATTAN_2', 'MANHATTAN_3', 'MANHATTAN_4', 'MANHATTAN_5', 'MANHATTAN_6', 'MANHATTAN_7', 'MANHATTAN_8', 'MANHATTAN_9', 'MANHATTAN_10', 'MANHATTAN_11', 'MANHATTAN_12', 'QUEENS_1', 'QUEENS_2', 'QUEENS_3', 'QUEENS_4', 'QUEENS_5', 'QUEENS_6', 'QUEENS_7', 'QUEENS_8', 'QUEENS_9', 'QUEENS_10', 'QUEENS_11', 'QUEENS_12', 'QUEENS_13', 'QUEENS_14', 'STATEN_ISLAND_1', 'STATEN_ISLAND_2', 'STATEN_ISLAND_3', name='communityboarddistrictenum')), nullable=True),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('submission')
# ### end Alembic commands ###
| 3,308 | 0 | 46 |
7c64a6b1ab3058e8ebef0ebd3cd11fb5ea4faca3 | 2,614 | py | Python | mongodb-practice/user_activity_db.py | amgad01/nosql-practice | cb188c4a615aa92d0f2022e2bfd84591474f5c38 | [
"MIT"
] | null | null | null | mongodb-practice/user_activity_db.py | amgad01/nosql-practice | cb188c4a615aa92d0f2022e2bfd84591474f5c38 | [
"MIT"
] | null | null | null | mongodb-practice/user_activity_db.py | amgad01/nosql-practice | cb188c4a615aa92d0f2022e2bfd84591474f5c38 | [
"MIT"
] | null | null | null | from pymongo import MongoClient
import os
USERNAME = os.environ.get('USERNAME')
PASSWORD = os.environ.get('PASSWORD')
CLUSTER_INFO = os.environ.get('CLUSTER_INFO')
DB_NAME = os.environ.get('DB_NAME')
COLLECTION_NAME = os.environ.get('COLLECTION_NAME')
cluster = MongoClient(f'mongodb+srv://{USERNAME}:{PASSWORD}{CLUSTER_INFO}')
database = cluster[DB_NAME]
collection = database[COLLECTION_NAME]
# add a document to the collection
try:
post = {
'_id': 0,
"username": 'user1',
'activity': 'using mongodb'
}
collection.insert_one(post)
except:
pass # document of the same id already exists
# adding many documents to the collection
try:
posts = [{'_id': 1, "username": 'user1', 'activity': 'using python'},
{'_id': 2, "username": 'user1', 'activity': 'using nosql-practice'}]
collection.insert_many([posts[0], posts[1]])
except:
pass # document of the same id already exists
# searching for a document in the collection that meets a certain criteria
results = collection.find({'username': 'user1'})
# print all the attributes of the results that holds information for the user_name: user1
for result in results:
print(f"user id: {result['_id']}")
print(f"username: {result['username']}")
print(f"activity: {result['activity']}")
# getting all the current entries in the db:
view_db()
# Delete document from the db :
# collection.delete_one({"_id": 1})
print("collection after deleting id:1")
# view_db()
# delete many documents from the db that match a given criteria:
# collection.delete_many({"username": 'user1'})
print("Collection all users with username: user1")
# view_db()
# Update documents in collection
collection.update_many({"_id": 0}, {'$set': {"username": 'User1'}})
view_db()
# Update many documents in collection
collection.update_many({"username": 'user1'}, {'$set': {"username": 'User1'}})
view_db()
collection.update_many({"username": 'User1'}, {'$set': {"location": 'Germany'}})
view_db()
# count documents in db
documents_count = collection.count_documents({})
print(documents_count)
#
collection.update_one({"_id": 0}, {'$set': {"age": 15}})
collection.update_one({"_id": 1}, {'$set': {"age": 22}})
collection.update_one({"_id": 2}, {'$set': {"age": 30}})
view_db()
# displays all users with an age greater than or equal to 21?
users = collection.find({'age': {'$gt': 21}})
print('users with an age greater than or equal to 21 are: ')
print(users)
for user in users:
print(user)
| 31.119048 | 89 | 0.686687 | from pymongo import MongoClient
import os
USERNAME = os.environ.get('USERNAME')
PASSWORD = os.environ.get('PASSWORD')
CLUSTER_INFO = os.environ.get('CLUSTER_INFO')
DB_NAME = os.environ.get('DB_NAME')
COLLECTION_NAME = os.environ.get('COLLECTION_NAME')
cluster = MongoClient(f'mongodb+srv://{USERNAME}:{PASSWORD}{CLUSTER_INFO}')
database = cluster[DB_NAME]
collection = database[COLLECTION_NAME]
# add a document to the collection
try:
post = {
'_id': 0,
"username": 'user1',
'activity': 'using mongodb'
}
collection.insert_one(post)
except:
pass # document of the same id already exists
# adding many documents to the collection
try:
posts = [{'_id': 1, "username": 'user1', 'activity': 'using python'},
{'_id': 2, "username": 'user1', 'activity': 'using nosql-practice'}]
collection.insert_many([posts[0], posts[1]])
except:
pass # document of the same id already exists
# searching for a document in the collection that meets a certain criteria
results = collection.find({'username': 'user1'})
# print all the attributes of the results that holds information for the user_name: user1
for result in results:
print(f"user id: {result['_id']}")
print(f"username: {result['username']}")
print(f"activity: {result['activity']}")
# getting all the current entries in the db:
def view_db():
db_documents = collection.find()
for db_document in db_documents:
print(db_document)
view_db()
# Delete document from the db :
# collection.delete_one({"_id": 1})
print("collection after deleting id:1")
# view_db()
# delete many documents from the db that match a given criteria:
# collection.delete_many({"username": 'user1'})
print("Collection all users with username: user1")
# view_db()
# Update documents in collection
collection.update_many({"_id": 0}, {'$set': {"username": 'User1'}})
view_db()
# Update many documents in collection
collection.update_many({"username": 'user1'}, {'$set': {"username": 'User1'}})
view_db()
collection.update_many({"username": 'User1'}, {'$set': {"location": 'Germany'}})
view_db()
# count documents in db
documents_count = collection.count_documents({})
print(documents_count)
#
collection.update_one({"_id": 0}, {'$set': {"age": 15}})
collection.update_one({"_id": 1}, {'$set': {"age": 22}})
collection.update_one({"_id": 2}, {'$set': {"age": 30}})
view_db()
# displays all users with an age greater than or equal to 21?
users = collection.find({'age': {'$gt': 21}})
print('users with an age greater than or equal to 21 are: ')
print(users)
for user in users:
print(user)
| 94 | 0 | 22 |
7dc90d10b2d08adc43f1f0611471659f468173a4 | 3,911 | py | Python | restclients/test/bridge/models.py | uw-it-cte/uw-restclients | 2b09348bf066e5508304401f93f281805e965af5 | [
"Apache-2.0"
] | null | null | null | restclients/test/bridge/models.py | uw-it-cte/uw-restclients | 2b09348bf066e5508304401f93f281805e965af5 | [
"Apache-2.0"
] | null | null | null | restclients/test/bridge/models.py | uw-it-cte/uw-restclients | 2b09348bf066e5508304401f93f281805e965af5 | [
"Apache-2.0"
] | null | null | null | from datetime import datetime
from django.test import TestCase
from django.utils.dateparse import parse_datetime
from restclients.exceptions import DataFailureException
from restclients.models.bridge import BridgeUser, BridgeCustomField,\
BridgeUserRole
from restclients.test import fdao_pws_override
| 39.505051 | 75 | 0.489645 | from datetime import datetime
from django.test import TestCase
from django.utils.dateparse import parse_datetime
from restclients.exceptions import DataFailureException
from restclients.models.bridge import BridgeUser, BridgeCustomField,\
BridgeUserRole
from restclients.test import fdao_pws_override
class TestBridgeModel(TestCase):
def test_bridge_user_role(self):
role = BridgeUserRole(role_id='user', name='user')
self.assertEqual(role.to_json(),
{"id": "user", "name": "user"})
def test_bridge_custom_field(self):
bcf = BridgeCustomField(value_id="1",
field_id="5",
name="REGID",
value="787")
self.assertEqual(bcf.to_json(),
{'id': '1',
'value': '787',
'name': 'REGID',
'custom_field_id': '5'})
self.assertTrue(bcf.is_regid())
bcf = BridgeCustomField(field_id="5",
name="REGID")
self.assertEqual(bcf.to_json(),
{'name': 'REGID',
'custom_field_id': '5',
'value': None})
self.assertIsNotNone(str(bcf))
def test_bridge_user(self):
bcf = BridgeCustomField(
field_id="5",
name="REGID",
value="12345678901234567890123456789012")
user = BridgeUser()
user.netid = "iamstudent"
user.full_name = "Iam Student"
user.first_name = "Iam A"
user.last_name = "Student"
user.email = "iamstudent@uw.edu"
user.custom_fields.append(bcf)
user.updated_at = parse_datetime("2016-08-08T13:58:20.635-07:00")
self.assertEqual(
user.to_json_post(),
{'users': [
{'custom_fields': [
{'custom_field_id': '5',
'name': 'REGID',
'value': '12345678901234567890123456789012'}],
'uid': 'iamstudent@uw.edu',
'email': 'iamstudent@uw.edu',
'first_name': 'Iam A',
'full_name': 'Iam Student',
'last_name': 'Student'
}]})
self.assertIsNotNone(str(user))
self.assertFalse(user.has_course_summary())
self.assertFalse(user.no_learning_history())
self.assertEqual(user.get_uid(), "iamstudent@uw.edu")
user = BridgeUser()
user.netid = "iamstudent"
user.full_name = "Iam Student"
user.email = "iamstudent@uw.edu"
user.custom_fields.append(bcf)
self.assertEqual(
user.to_json_post(),
{'users': [
{'custom_fields': [
{'custom_field_id': '5',
'name': 'REGID',
'value': '12345678901234567890123456789012'}],
'email': 'iamstudent@uw.edu',
'full_name': 'Iam Student',
'uid': 'iamstudent@uw.edu'}]})
user.bridge_id = 123
self.assertEqual(
user.to_json_post(),
{'users': [
{'custom_fields': [
{'custom_field_id': '5',
'name': 'REGID',
'value': '12345678901234567890123456789012'}],
'id': 123,
'email': 'iamstudent@uw.edu',
'full_name': 'Iam Student',
'uid': 'iamstudent@uw.edu'}]})
user.completed_courses_count = 3
self.assertTrue(user.has_course_summary())
self.assertFalse(user.no_learning_history())
self.assertIsNotNone(str(user))
| 3,490 | 11 | 104 |
40980b6b967b533d1cc5c012af6e07cf8d2c9df3 | 519 | py | Python | python3.4Smartforest/lib/python3.4/site-packages/django/utils/duration.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | python3.4Smartforest/lib/python3.4/site-packages/django/utils/duration.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | python3.4Smartforest/lib/python3.4/site-packages/django/utils/duration.py | letouriste001/SmartForest_2.0 | 109b78bf1e8c8404800f377ab969395ccbb617be | [
"MIT"
] | null | null | null | """Version of str(timedelta) which is not English specific."""
def duration_string(duration):
days = duration.days
seconds = duration.seconds
microseconds = duration.microseconds
minutes = seconds // 60
seconds = seconds % 60
hours = minutes // 60
minutes = minutes % 60
string = '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
if days:
string = '{} '.format(days) + string
if microseconds:
string += '.{:06d}'.format(microseconds)
return string
| 519 | 519 | 0.626204 | """Version of str(timedelta) which is not English specific."""
def duration_string(duration):
days = duration.days
seconds = duration.seconds
microseconds = duration.microseconds
minutes = seconds // 60
seconds = seconds % 60
hours = minutes // 60
minutes = minutes % 60
string = '{:02d}:{:02d}:{:02d}'.format(hours, minutes, seconds)
if days:
string = '{} '.format(days) + string
if microseconds:
string += '.{:06d}'.format(microseconds)
return string
| 0 | 0 | 0 |
fc87b40add6ed0b9a6c53461503078e30928e2cd | 8,957 | py | Python | dit/pid/measures/idep.py | Ejjaffe/dit | c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1 | [
"BSD-3-Clause"
] | 1 | 2020-03-13T10:30:11.000Z | 2020-03-13T10:30:11.000Z | dit/pid/measures/idep.py | Ejjaffe/dit | c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1 | [
"BSD-3-Clause"
] | null | null | null | dit/pid/measures/idep.py | Ejjaffe/dit | c9d206f03d1de5a0a298b1d0ea9d79ea5e789ee1 | [
"BSD-3-Clause"
] | null | null | null | """
The dependency-decomposition based unique measure partial information decomposition.
"""
from ...multivariate import coinformation
from ..pid import BaseUniquePID
from ...profiles import DependencyDecomposition
__all__ = (
'PID_dep',
'PID_RA',
)
class PID_dep(BaseUniquePID):
"""
The dependency partial information decomposition, as defined by James at al.
"""
_name = "I_dep"
@staticmethod
def _measure(d, sources, target, maxiter=None):
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over the dependency decomposition.
Parameters
----------
d : Distribution
The distribution to compute i_dep for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idep : dict
The value of I_dep for each individual source.
"""
uniques = {}
measure = {'I': lambda d: coinformation(d, [[0, 1], [2]])}
source_0_target = frozenset((frozenset((0, 2)),))
source_1_target = frozenset((frozenset((1, 2)),))
if len(sources) == 2:
dm = d.coalesce(sources + (target,)) # put it into [0, 1], [2] order
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u_0 = min(dd.delta(edge, 'I') for edge in dd.edges(source_0_target))
u_1 = min(dd.delta(edge, 'I') for edge in dd.edges(source_1_target))
uniques[sources[0]] = u_0
uniques[sources[1]] = u_1
else:
for source in sources:
others = sum((i for i in sources if i != source), ())
dm = d.coalesce([source, others, target])
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u = min(dd.delta(edge, 'I') for edge in dd.edges(source_0_target))
uniques[source] = u
return uniques
class PID_RA(BaseUniquePID):
"""
The "reproducibility analysis" partial information decomposition, derived
from the work of Zwick.
"""
_name = "I_RA"
@staticmethod
def _measure(d, sources, target, maxiter=None):
"""
This computes unique information as the change in I[sources : target]
when adding the source-target constraint.
Parameters
----------
d : Distribution
The distribution to compute i_RA for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
ira : dict
The value of I_RA for each individual source.
"""
uniques = {}
measure = {'I': lambda d: coinformation(d, [[0, 1], [2]])}
source_0_target = frozenset([frozenset((0, 2))])
source_1_target = frozenset([frozenset((1, 2))])
all_pairs = frozenset([frozenset((0, 1))]) | source_0_target | source_1_target
if len(sources) == 2:
dm = d.coalesce(sources + (target,))
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u_0 = dd.delta((all_pairs, all_pairs - source_0_target), 'I')
u_1 = dd.delta((all_pairs, all_pairs - source_1_target), 'I')
uniques[sources[0]] = u_0
uniques[sources[1]] = u_1
else:
for source in sources:
others = sum((i for i in sources if i != source), ())
dm = d.coalesce([source, others, target])
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u = dd.delta((all_pairs, all_pairs - source_0_target), 'I')
uniques[source] = u
return uniques
class PID_dep_a(BaseUniquePID):
"""
The dependency partial information decomposition, as defined by James at al.
Notes
-----
This alternative method behaves oddly with three or more sources.
"""
_name = "I_dep_a"
@staticmethod
def _measure(d, sources, target): # pragma: no cover
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over the dependency decomposition.
Parameters
----------
d : Distribution
The distribution to compute i_dep_a for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idepa : dict
The value of I_dep_a for each individual source.
"""
var_to_index = {var: i for i, var in enumerate(sources + (target,))}
d = d.coalesce(sorted(var_to_index.keys(), key=lambda k: var_to_index[k]))
invars = [var_to_index[var] for var in sources]
outvar = [var_to_index[(var,)] for var in target]
measure = {'I': lambda d: coinformation(d, [invars, outvar])}
dd = DependencyDecomposition(d, list(var_to_index.values()), measures=measure)
uniques = {}
for source in sources:
constraint = frozenset((frozenset((var_to_index[source], var_to_index[target])),))
u = min(dd.delta(edge, 'I') for edge in dd.edges(constraint))
uniques[source] = u
return uniques
class PID_dep_b(BaseUniquePID):
"""
The reduced dependency partial information decomposition, as defined by James at al.
Notes
-----
This decomposition is known to be inconsistent.
"""
_name = "I_dep_b"
@staticmethod
def _measure(d, sources, target): # pragma: no cover
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over a restricted dependency decomposition which never constrains dependencies
among the sources.
Parameters
----------
d : Distribution
The distribution to compute i_dep_b for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idepb : dict
The value of I_dep_b for each individual source.
"""
var_to_index = {var: i for i, var in enumerate(sources + (target,))}
target_index = var_to_index[target]
d = d.coalesce(sorted(var_to_index.keys(), key=lambda k: var_to_index[k]))
invars = [var_to_index[var] for var in sources]
outvar = [var_to_index[(var,)] for var in target]
measure = {'I': lambda d: coinformation(d, [invars, outvar])}
dd = DependencyDecomposition(d, list(var_to_index.values()), measures=measure)
uniques = {}
for source in sources:
constraint = frozenset((frozenset((var_to_index[source], target_index)),))
broja_style = lambda edge: all({target_index} < set(_) for _ in edge[0] if len(_) > 1)
edge_set = (edge for edge in dd.edges(constraint) if broja_style(edge))
u = min(dd.delta(edge, 'I') for edge in edge_set)
uniques[source] = u
return uniques
class PID_dep_c(BaseUniquePID):
"""
The reduced dependency partial information decomposition, as defined by James at al.
Notes
-----
This decomposition can result in subadditive redundancy.
"""
_name = "I_dep_c"
@staticmethod
def _measure(d, sources, target): # pragma: no cover
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over a restricted dependency decomposition which never constrains dependencies
among the sources.
Parameters
----------
d : Distribution
The distribution to compute i_dep_c for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idepc : dict
The value of I_dep_c for each individual source.
"""
var_to_index = {var: i for i, var in enumerate(sources + (target,))}
d = d.coalesce(sorted(var_to_index.keys(), key=lambda k: var_to_index[k]))
invars = [var_to_index[var] for var in sources]
outvar = [var_to_index[(var,)] for var in target]
measure = {'I': lambda d: coinformation(d, [invars, outvar])}
dd = DependencyDecomposition(d, list(var_to_index.values()), measures=measure)
uniques = {}
for source in sources:
constraint = frozenset((frozenset((var_to_index[source], var_to_index[target])),))
edge_set = (edge for edge in dd.edges(constraint) if tuple(invars) in edge[0])
u = min(dd.delta(edge, 'I') for edge in edge_set)
uniques[source] = u
return uniques
| 35.12549 | 98 | 0.592609 | """
The dependency-decomposition based unique measure partial information decomposition.
"""
from ...multivariate import coinformation
from ..pid import BaseUniquePID
from ...profiles import DependencyDecomposition
__all__ = (
'PID_dep',
'PID_RA',
)
class PID_dep(BaseUniquePID):
"""
The dependency partial information decomposition, as defined by James at al.
"""
_name = "I_dep"
@staticmethod
def _measure(d, sources, target, maxiter=None):
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over the dependency decomposition.
Parameters
----------
d : Distribution
The distribution to compute i_dep for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idep : dict
The value of I_dep for each individual source.
"""
uniques = {}
measure = {'I': lambda d: coinformation(d, [[0, 1], [2]])}
source_0_target = frozenset((frozenset((0, 2)),))
source_1_target = frozenset((frozenset((1, 2)),))
if len(sources) == 2:
dm = d.coalesce(sources + (target,)) # put it into [0, 1], [2] order
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u_0 = min(dd.delta(edge, 'I') for edge in dd.edges(source_0_target))
u_1 = min(dd.delta(edge, 'I') for edge in dd.edges(source_1_target))
uniques[sources[0]] = u_0
uniques[sources[1]] = u_1
else:
for source in sources:
others = sum((i for i in sources if i != source), ())
dm = d.coalesce([source, others, target])
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u = min(dd.delta(edge, 'I') for edge in dd.edges(source_0_target))
uniques[source] = u
return uniques
class PID_RA(BaseUniquePID):
"""
The "reproducibility analysis" partial information decomposition, derived
from the work of Zwick.
"""
_name = "I_RA"
@staticmethod
def _measure(d, sources, target, maxiter=None):
"""
This computes unique information as the change in I[sources : target]
when adding the source-target constraint.
Parameters
----------
d : Distribution
The distribution to compute i_RA for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
ira : dict
The value of I_RA for each individual source.
"""
uniques = {}
measure = {'I': lambda d: coinformation(d, [[0, 1], [2]])}
source_0_target = frozenset([frozenset((0, 2))])
source_1_target = frozenset([frozenset((1, 2))])
all_pairs = frozenset([frozenset((0, 1))]) | source_0_target | source_1_target
if len(sources) == 2:
dm = d.coalesce(sources + (target,))
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u_0 = dd.delta((all_pairs, all_pairs - source_0_target), 'I')
u_1 = dd.delta((all_pairs, all_pairs - source_1_target), 'I')
uniques[sources[0]] = u_0
uniques[sources[1]] = u_1
else:
for source in sources:
others = sum((i for i in sources if i != source), ())
dm = d.coalesce([source, others, target])
dd = DependencyDecomposition(dm, measures=measure, maxiter=maxiter)
u = dd.delta((all_pairs, all_pairs - source_0_target), 'I')
uniques[source] = u
return uniques
class PID_dep_a(BaseUniquePID):
"""
The dependency partial information decomposition, as defined by James at al.
Notes
-----
This alternative method behaves oddly with three or more sources.
"""
_name = "I_dep_a"
@staticmethod
def _measure(d, sources, target): # pragma: no cover
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over the dependency decomposition.
Parameters
----------
d : Distribution
The distribution to compute i_dep_a for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idepa : dict
The value of I_dep_a for each individual source.
"""
var_to_index = {var: i for i, var in enumerate(sources + (target,))}
d = d.coalesce(sorted(var_to_index.keys(), key=lambda k: var_to_index[k]))
invars = [var_to_index[var] for var in sources]
outvar = [var_to_index[(var,)] for var in target]
measure = {'I': lambda d: coinformation(d, [invars, outvar])}
dd = DependencyDecomposition(d, list(var_to_index.values()), measures=measure)
uniques = {}
for source in sources:
constraint = frozenset((frozenset((var_to_index[source], var_to_index[target])),))
u = min(dd.delta(edge, 'I') for edge in dd.edges(constraint))
uniques[source] = u
return uniques
class PID_dep_b(BaseUniquePID):
"""
The reduced dependency partial information decomposition, as defined by James at al.
Notes
-----
This decomposition is known to be inconsistent.
"""
_name = "I_dep_b"
@staticmethod
def _measure(d, sources, target): # pragma: no cover
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over a restricted dependency decomposition which never constrains dependencies
among the sources.
Parameters
----------
d : Distribution
The distribution to compute i_dep_b for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idepb : dict
The value of I_dep_b for each individual source.
"""
var_to_index = {var: i for i, var in enumerate(sources + (target,))}
target_index = var_to_index[target]
d = d.coalesce(sorted(var_to_index.keys(), key=lambda k: var_to_index[k]))
invars = [var_to_index[var] for var in sources]
outvar = [var_to_index[(var,)] for var in target]
measure = {'I': lambda d: coinformation(d, [invars, outvar])}
dd = DependencyDecomposition(d, list(var_to_index.values()), measures=measure)
uniques = {}
for source in sources:
constraint = frozenset((frozenset((var_to_index[source], target_index)),))
broja_style = lambda edge: all({target_index} < set(_) for _ in edge[0] if len(_) > 1)
edge_set = (edge for edge in dd.edges(constraint) if broja_style(edge))
u = min(dd.delta(edge, 'I') for edge in edge_set)
uniques[source] = u
return uniques
class PID_dep_c(BaseUniquePID):
"""
The reduced dependency partial information decomposition, as defined by James at al.
Notes
-----
This decomposition can result in subadditive redundancy.
"""
_name = "I_dep_c"
@staticmethod
def _measure(d, sources, target): # pragma: no cover
"""
This computes unique information as min(delta(I(sources : target))) where delta
is taken over a restricted dependency decomposition which never constrains dependencies
among the sources.
Parameters
----------
d : Distribution
The distribution to compute i_dep_c for.
sources : iterable of iterables
The source variables.
target : iterable
The target variable.
Returns
-------
idepc : dict
The value of I_dep_c for each individual source.
"""
var_to_index = {var: i for i, var in enumerate(sources + (target,))}
d = d.coalesce(sorted(var_to_index.keys(), key=lambda k: var_to_index[k]))
invars = [var_to_index[var] for var in sources]
outvar = [var_to_index[(var,)] for var in target]
measure = {'I': lambda d: coinformation(d, [invars, outvar])}
dd = DependencyDecomposition(d, list(var_to_index.values()), measures=measure)
uniques = {}
for source in sources:
constraint = frozenset((frozenset((var_to_index[source], var_to_index[target])),))
edge_set = (edge for edge in dd.edges(constraint) if tuple(invars) in edge[0])
u = min(dd.delta(edge, 'I') for edge in edge_set)
uniques[source] = u
return uniques
| 0 | 0 | 0 |
ce9cd9ea62d77873530a9f44bd71aefdee85fa49 | 967 | py | Python | setup.py | dcdanko/capalyzer | 45f0283109542aaaecc5d28e800b819a0be62726 | [
"MIT"
] | 1 | 2019-11-22T16:37:34.000Z | 2019-11-22T16:37:34.000Z | setup.py | dcdanko/capalyzer | 45f0283109542aaaecc5d28e800b819a0be62726 | [
"MIT"
] | 9 | 2018-05-24T20:47:26.000Z | 2019-07-02T21:30:54.000Z | setup.py | dcdanko/capalyzer | 45f0283109542aaaecc5d28e800b819a0be62726 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
setuptools
setuptools.setup(
name='capalyzer',
version='2.16.0',
description="Parsing functionality for the metasub CAP",
author="David C. Danko",
author_email='dcdanko@gmail.com',
url='https://github.com/dcdanko/capalyzer',
packages=setuptools.find_packages(),
package_dir={'capalyzer': 'capalyzer'},
install_requires=[
'click',
'pandas',
'scipy',
'numpy',
'umap-learn',
],
entry_points={
'console_scripts': [
'capalyzer=capalyzer.cli:main'
]
},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
package_data={'capalyzer': [
'packet_parser/ncbi_tree/*.dmp.gz',
'packet_parser/microbe-directory.csv',
]},
)
| 24.794872 | 60 | 0.584281 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import setuptools
setuptools
setuptools.setup(
name='capalyzer',
version='2.16.0',
description="Parsing functionality for the metasub CAP",
author="David C. Danko",
author_email='dcdanko@gmail.com',
url='https://github.com/dcdanko/capalyzer',
packages=setuptools.find_packages(),
package_dir={'capalyzer': 'capalyzer'},
install_requires=[
'click',
'pandas',
'scipy',
'numpy',
'umap-learn',
],
entry_points={
'console_scripts': [
'capalyzer=capalyzer.cli:main'
]
},
classifiers=[
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
package_data={'capalyzer': [
'packet_parser/ncbi_tree/*.dmp.gz',
'packet_parser/microbe-directory.csv',
]},
)
| 0 | 0 | 0 |
c525f1062309bd1fe185d1192e1da1b6b6556d8d | 1,180 | py | Python | terrain/config.py | cisaacstern/terrain-corrector | 6c82babb5ace3306d741760fb14db47b3ef0a7b8 | [
"Apache-2.0"
] | null | null | null | terrain/config.py | cisaacstern/terrain-corrector | 6c82babb5ace3306d741760fb14db47b3ef0a7b8 | [
"Apache-2.0"
] | null | null | null | terrain/config.py | cisaacstern/terrain-corrector | 6c82babb5ace3306d741760fb14db47b3ef0a7b8 | [
"Apache-2.0"
] | null | null | null | import toml
import os
import datetime
import itertools
#load config.toml
cwd = os.getcwd()
path = os.path.join(cwd,'terrain-corrector','terrain','settings','config.toml')
config = toml.load(path)
#add config to locals
locals().update(config)
#...and a few more expressive variations
BOUNDS = [*EAST_BOUNDS, *NORTH_BOUNDS, *ELEV_BOUNDS]
EAST_MIN, EAST_MAX, NORTH_MIN, NORTH_MAX, ELEV_MIN, ELEV_MAX = BOUNDS
#create a list of topo data files
TOPO_PATH = os.path.join(cwd, 'terrain-corrector', 'data', 'topo')
TOPO_LIST = [file for file in os.listdir(TOPO_PATH)]
TOPO_LIST.sort()
#...and a list of timeseries data files
TIME_PATH = os.path.join(cwd, 'terrain-corrector', 'data', 'time')
TIME_LIST = [file for file in os.listdir(TIME_PATH)]
TIME_LIST.sort()
#assert that lengths of these two lists are equal
assert (len(TOPO_LIST)==len(TIME_LIST)), 'Fileset lengths unequal.'
#create list of datetime objects
ENABLED_DATES = [datetime.datetime(
int(fn[0:4]), int(fn[4:6]), int(fn[6:8]), 0, 0, 0, 0,
tzinfo=datetime.timezone.utc) for fn in TOPO_LIST]
#...and iterable index of enabled datetimes
DATE_DICT={date.strftime("%Y-%m-%d"):i for i,date in enumerate(ENABLED_DATES)} | 36.875 | 79 | 0.734746 | import toml
import os
import datetime
import itertools
#load config.toml
cwd = os.getcwd()
path = os.path.join(cwd,'terrain-corrector','terrain','settings','config.toml')
config = toml.load(path)
#add config to locals
locals().update(config)
#...and a few more expressive variations
BOUNDS = [*EAST_BOUNDS, *NORTH_BOUNDS, *ELEV_BOUNDS]
EAST_MIN, EAST_MAX, NORTH_MIN, NORTH_MAX, ELEV_MIN, ELEV_MAX = BOUNDS
#create a list of topo data files
TOPO_PATH = os.path.join(cwd, 'terrain-corrector', 'data', 'topo')
TOPO_LIST = [file for file in os.listdir(TOPO_PATH)]
TOPO_LIST.sort()
#...and a list of timeseries data files
TIME_PATH = os.path.join(cwd, 'terrain-corrector', 'data', 'time')
TIME_LIST = [file for file in os.listdir(TIME_PATH)]
TIME_LIST.sort()
#assert that lengths of these two lists are equal
assert (len(TOPO_LIST)==len(TIME_LIST)), 'Fileset lengths unequal.'
#create list of datetime objects
ENABLED_DATES = [datetime.datetime(
int(fn[0:4]), int(fn[4:6]), int(fn[6:8]), 0, 0, 0, 0,
tzinfo=datetime.timezone.utc) for fn in TOPO_LIST]
#...and iterable index of enabled datetimes
DATE_DICT={date.strftime("%Y-%m-%d"):i for i,date in enumerate(ENABLED_DATES)} | 0 | 0 | 0 |
5329905446ed059c91fefa7dd21cbee32116b9ff | 1,992 | py | Python | pycozmo/CozmoAnim/BodyMotion.py | gimait/pycozmo | 601d9c09903b9300e8990723cae95974212afb09 | [
"MIT"
] | 123 | 2019-08-25T21:28:23.000Z | 2022-03-12T13:54:59.000Z | pycozmo/CozmoAnim/BodyMotion.py | solosito/pycozmo | 5d28118eb8f7a625ae4a66054dabf19b4fe27483 | [
"MIT"
] | 41 | 2019-08-25T21:21:37.000Z | 2022-02-09T14:20:54.000Z | pycozmo/CozmoAnim/BodyMotion.py | solosito/pycozmo | 5d28118eb8f7a625ae4a66054dabf19b4fe27483 | [
"MIT"
] | 51 | 2019-09-04T13:30:02.000Z | 2022-01-09T01:20:24.000Z | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: CozmoAnim
import flatbuffers
| 36.218182 | 145 | 0.697289 | # automatically generated by the FlatBuffers compiler, do not modify
# namespace: CozmoAnim
import flatbuffers
class BodyMotion(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsBodyMotion(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = BodyMotion()
x.Init(buf, n + offset)
return x
# BodyMotion
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# BodyMotion
def TriggerTimeMs(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# BodyMotion
def DurationTimeMs(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint32Flags, o + self._tab.Pos)
return 0
# BodyMotion
def RadiusMm(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(8))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# BodyMotion
def Speed(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(10))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Int16Flags, o + self._tab.Pos)
return 0
def BodyMotionStart(builder): builder.StartObject(4)
def BodyMotionAddTriggerTimeMs(builder, triggerTimeMs): builder.PrependUint32Slot(0, triggerTimeMs, 0)
def BodyMotionAddDurationTimeMs(builder, durationTimeMs): builder.PrependUint32Slot(1, durationTimeMs, 0)
def BodyMotionAddRadiusMm(builder, radiusMm): builder.PrependUOffsetTRelativeSlot(2, flatbuffers.number_types.UOffsetTFlags.py_type(radiusMm), 0)
def BodyMotionAddSpeed(builder, speed): builder.PrependInt16Slot(3, speed, 0)
def BodyMotionEnd(builder): return builder.EndObject()
| 1,430 | 293 | 156 |
9fb83f694c47b019db04efa9f0f92de9216748d1 | 33,041 | py | Python | src/vesper/app.py | onecommons/vesper | 818c09350b8fe53ea484aaff24deb1002a67f471 | [
"Apache-2.0"
] | null | null | null | src/vesper/app.py | onecommons/vesper | 818c09350b8fe53ea484aaff24deb1002a67f471 | [
"Apache-2.0"
] | null | null | null | src/vesper/app.py | onecommons/vesper | 818c09350b8fe53ea484aaff24deb1002a67f471 | [
"Apache-2.0"
] | null | null | null | #:copyright: Copyright 2009-2010 by the Vesper team, see AUTHORS.
#:license: Dual licenced under the GPL or Apache2 licences, see LICENSE.
"""
vesper.app
==========
This module defines the framework used by Vesper to bootstrap a running server from
a given configuration.
"""
import os, os.path, sys, traceback, re
from optparse import OptionParser
import itertools
from vesper import utils
from vesper.utils import MRUCache
from vesper.utils.Uri import UriToOsPath
from vesper.data import base, DataStore, transactions, store
from vesper.data.transaction_processor import TransactionProcessor
from vesper.backports import *
from vesper import VesperError
try:
import cStringIO
StringIO = cStringIO
except ImportError:
import StringIO
import logging
DEFAULT_LOGLEVEL = logging.INFO
#logging.BASIC_FORMAT = "%(asctime)s %(levelname)s %(name)s:%(message)s"
#logging.root.setLevel(DEFAULT_LOGLEVEL)
#logging.basicConfig()
log = logging.getLogger("app")
_defexception = utils.DynaExceptionFactory(__name__)
_defexception('CmdArgError')
_defexception('unusable namespace error')
_defexception('not authorized')
class DoNotHandleException(Exception):
'''
RequestProcessor.doActions() will not invoke error handler actions on
exceptions derived from this class.
'''
############################################################
##Helper classes and functions
############################################################
class Requestor(object):
'''
Requestor is a helper class that allows python code to invoke a
vesper request as if it was function call
Usage:
response = __requestor__.requestname(**kw)
where kw is the optional request parameters
An AttributeError exception is raised if the server does not
recognize the request
'''
#the trailing __ so you can have requests named 'invoke' without conflicting
def defaultActionCacheKeyPredicateFactory(action, cacheKeyPredicate):
'''
Returns a predicate to calculate a key for the action
based on a given request.
This function gives an action a chance to
customize the cacheKeyPredicate for the particulars of the
action instance. At the very least it should bind the action
instance with the cacheKeyPredicate to disambiguate keys from
different actions.
'''
actionid = id(action) #do this to avoid memory leaks
return lambda kw, retVal: (actionid, cacheKeyPredicate(kw, retVal))
class Action(object):
'''
The Action class encapsulates a step in the request processing pipeline.
An Action has two parts, one or more match expressions and an action
function that is invoked if the request metadata matches one of the
match expressions. The action function returns a value which is passed
onto the next Action in the sequence.
'''
NotModified = ('notmodified',)
def __init__(self, action,
cachePredicate=notCacheableKeyPredicate,
sideEffectsPredicate=None, sideEffectsFunc=None,
isValueCacheableCalc=defaultActionValueCacheableCalc,
cachePredicateFactory=defaultActionCacheKeyPredicateFactory,
debug=False):
'''
action must be a function with this signature:
def action(kw, retVal) where:
kw is the dictionary of metadata associated with the request
retVal was the return value of the last action invoked in the in action sequence or None
'''
self.action = action
self.cacheKeyPredicate = cachePredicateFactory(self, cachePredicate)
self.cachePredicateFactory = cachePredicateFactory
self.sideEffectsPredicate = sideEffectsPredicate
self.sideEffectsFunc = sideEffectsFunc
self.isValueCacheableCalc = isValueCacheableCalc
self.debug = debug
def assignAttrs(obj, configDict, varlist, default):
'''
Helper function for adding attributes to an object
given a dictionary of configuration properties
'''
import copy
for name in varlist:
try:
defaultValue = copy.copy(default)
except TypeError:
#probably ok, can't copy certain non-mutable objects like functions
defaultValue = default
value = configDict.get(name, defaultValue)
if default is not None and not isinstance(value, type(default)):
raise VesperError('config variable %s (of type %s)'
'must be compatible with type %s'
% (name, type(value), type(default)))
setattr(obj, name, value)
############################################################
## main class
############################################################
#################################################
##command line handling
#################################################
def argsToKw(argv):
'''
'''
kw = {}
i = iter(argv)
try:
arg = i.next()
while 1:
if arg[0] != '-':
raise CmdArgError('missing arg')
longArg = arg[:2] == '--'
name = arg.lstrip('-')
if not longArg and len(name) > 1:
#e.g. -fname
kw[name[0]] = name[1:]
arg = i.next()
elif longArg and '=' in name:
name, val = name.split('=', 1)
kw[name] = val
arg = i.next()
else:
kw[name] = True
arg = i.next()
if arg[0] != '-':
kw[name] = arg
arg = i.next()
except StopIteration: pass
return kw
_initConfigState()
def _normpath(basedir, path):
"""
return an absolute path given a basedir and a path fragment. If `path` is already absolute
it will be returned unchanged.
"""
if os.path.isabs(path):
return path
else:
tmp = os.path.normpath(os.path.join(basedir, path))
#assert os.path.isabs(tmp), 'not abs path %s, from %s + %s' % (tmp,basedir,path)
return tmp
def _get_module_path(modulename):
"for a modulename like 'vesper.web.admin' return a tuple (absolute_module_path, is_directory)"
import sys, imp
if modulename == "__main__":
m = sys.modules[modulename]
assert hasattr(m, '__file__'), "__main__ module missing __file__ attribute"
path = _normpath(os.getcwd(), m.__file__)
return (path, False)
else:
parts = modulename.split('.')
parts.reverse()
path = None
while parts:
part = parts.pop()
f = None
try:
f, path, descr = imp.find_module(part, path and [path] or None)
finally:
if f: f.close()
return (path, descr[-1] == imp.PKG_DIRECTORY)
def _importApp(baseapp):
'''
Executes the given app config file. If `createApp()` was
called during execution of the config file, the `_current_config`
global will be set to the app configuration returned by `createApp()`.
'''
baseglobals = utils.attrdict(Action=Action, createApp=createApp)
#assuming the baseapp file calls createApp(), it will set _current_config
if os.path.exists(baseapp):
#set this global so we can resolve relative paths against the location
#of the config file they appear in
_current_configpath.append( os.path.dirname(os.path.abspath(baseapp)) )
execfile(baseapp, baseglobals)
else:
(path, isdir) = _get_module_path(baseapp)
# print "_get_module_path for:" + str(baseapp) + " --> path:" + str(path) + " isdir:" + str(isdir)
assert path
#set this global so we can resolve relative paths against the location
#of the config file they appear in
_current_configpath.append( os.path.abspath(path) )
basemod = sys.modules.get(baseapp)
if basemod:
reload(basemod)
else:
__import__(baseapp, baseglobals)
_current_configpath.pop()
def createApp(derivedapp=None, baseapp=None, static_path=(), template_path=(), actions=None, **config):
'''
Create a new `AppConfig`.
:param derivedapp: is the name of the module that is extending the app. (Usually just pass `__name__`)
:param baseapp: is either a module name or a file path to the Python script that defines an app.
This file should have a call to :func:`createApp` in it
:param static_path: list or string prepended to the static resource path of the app.
:param template_path: list or string prepended to the template resource path of the app.
:param actions: actions map of the app, will updates the base app's `action` dictionary.
:param config: Any other keyword arguments will override config values set by the base app
'''
global _current_config
if derivedapp:
(derived_path, isdir) = _get_module_path(derivedapp)
if not isdir:
derived_path = os.path.dirname(derived_path)
else:
derived_path = None
if baseapp:
assert isinstance(baseapp, (str, unicode))
#sets _current_config if the baseapp calls createApp
_importApp(baseapp)
else:
_current_config = AppConfig()
#config variables that shouldn't be simply overwritten should be specified
#as an explicit function argument so they're not overwritten by this line:
_current_config.update(config)
if 'actions' in _current_config:
if actions:
_current_config.actions.update(actions)
else:
_current_config.actions = actions or {}
basedir = _current_configpath[-1] or derived_path
if basedir is not None:
if not os.path.isdir(basedir):
basedir = os.path.dirname(basedir)
addToPath(static_path, 'static_path')
addToPath(template_path, 'template_path')
#storage_template_path should be relative to the app config
#that sets it, not the final (most derived) app
for configdict in itertools.chain([_current_config, config.get('storeDefaults')],
(config.get('stores') or {}).values()):
if not configdict:
continue
storage_template_path = configdict.get('storage_template_path')
if storage_template_path:
abspath = _normpath(basedir, storage_template_path)
configdict['storage_template_path'] = abspath
return _current_config
| 40.244823 | 106 | 0.593293 | #:copyright: Copyright 2009-2010 by the Vesper team, see AUTHORS.
#:license: Dual licenced under the GPL or Apache2 licences, see LICENSE.
"""
vesper.app
==========
This module defines the framework used by Vesper to bootstrap a running server from
a given configuration.
"""
import os, os.path, sys, traceback, re
from optparse import OptionParser
import itertools
from vesper import utils
from vesper.utils import MRUCache
from vesper.utils.Uri import UriToOsPath
from vesper.data import base, DataStore, transactions, store
from vesper.data.transaction_processor import TransactionProcessor
from vesper.backports import *
from vesper import VesperError
try:
import cStringIO
StringIO = cStringIO
except ImportError:
import StringIO
import logging
DEFAULT_LOGLEVEL = logging.INFO
#logging.BASIC_FORMAT = "%(asctime)s %(levelname)s %(name)s:%(message)s"
#logging.root.setLevel(DEFAULT_LOGLEVEL)
#logging.basicConfig()
log = logging.getLogger("app")
_defexception = utils.DynaExceptionFactory(__name__)
_defexception('CmdArgError')
_defexception('unusable namespace error')
_defexception('not authorized')
class DoNotHandleException(Exception):
'''
RequestProcessor.doActions() will not invoke error handler actions on
exceptions derived from this class.
'''
class ActionWrapperException(utils.NestedException):
def __init__(self):
return utils.NestedException.__init__(self,useNested=True)
############################################################
##Helper classes and functions
############################################################
class Requestor(object):
'''
Requestor is a helper class that allows python code to invoke a
vesper request as if it was function call
Usage:
response = __requestor__.requestname(**kw)
where kw is the optional request parameters
An AttributeError exception is raised if the server does not
recognize the request
'''
def __init__(self, server, triggerName = None):
self.server = server
self.triggerName = triggerName
#the trailing __ so you can have requests named 'invoke' without conflicting
def invoke__(self, name, **kw):
return self.invokeEx__(name, kw)[0]
def invokeEx__(self, name, kwargs):
kw = self.server.requestContext[-1].copy()
kw.update(kwargs)#overrides request context kw
kw['_name']=name
if not kw.has_key('_path'):
kw['_path'] = name
#print 'invoke', kw
#defaultTriggerName let's us have different trigger type per thread
#allowing site:/// urls to rely on the defaultTriggerName
triggerName = self.triggerName or self.server.defaultRequestTrigger
result = self.server.runActions(triggerName, kw, newTransaction=False)
if result is not None: #'cause '' is OK
return (result, kw)
else:
raise AttributeError, name
def __getattr__(self, name):
if name in ['__hash__','__nonzero__', '__cmp__', '__del__']:
#undefined but reserved attribute names
raise AttributeError("'Requestor' object has no attribute '%s'" %name)
return lambda **k: self.invoke__(name, **k)
#else:raise AttributeError, name #we can't do this yet since
#we may need the parameters to figure out what to invoke (like a multimethod)
def defaultActionCacheKeyPredicateFactory(action, cacheKeyPredicate):
'''
Returns a predicate to calculate a key for the action
based on a given request.
This function gives an action a chance to
customize the cacheKeyPredicate for the particulars of the
action instance. At the very least it should bind the action
instance with the cacheKeyPredicate to disambiguate keys from
different actions.
'''
actionid = id(action) #do this to avoid memory leaks
return lambda kw, retVal: (actionid, cacheKeyPredicate(kw, retVal))
def notCacheableKeyPredicate(*args, **kw):
raise MRUCache.NotCacheable
def defaultActionValueCacheableCalc(hkey, value, kw, retResult):
if value is retResult:
#when the result hasn't changed, store NotModified in the cache
#instead of the result. This way the retVal won't need to be part
#of the cache key
return Action.NotModified
else:
return value
class Action(object):
'''
The Action class encapsulates a step in the request processing pipeline.
An Action has two parts, one or more match expressions and an action
function that is invoked if the request metadata matches one of the
match expressions. The action function returns a value which is passed
onto the next Action in the sequence.
'''
NotModified = ('notmodified',)
def __init__(self, action,
cachePredicate=notCacheableKeyPredicate,
sideEffectsPredicate=None, sideEffectsFunc=None,
isValueCacheableCalc=defaultActionValueCacheableCalc,
cachePredicateFactory=defaultActionCacheKeyPredicateFactory,
debug=False):
'''
action must be a function with this signature:
def action(kw, retVal) where:
kw is the dictionary of metadata associated with the request
retVal was the return value of the last action invoked in the in action sequence or None
'''
self.action = action
self.cacheKeyPredicate = cachePredicateFactory(self, cachePredicate)
self.cachePredicateFactory = cachePredicateFactory
self.sideEffectsPredicate = sideEffectsPredicate
self.sideEffectsFunc = sideEffectsFunc
self.isValueCacheableCalc = isValueCacheableCalc
self.debug = debug
def __call__(self, kw, retVal):
return self.action(kw, retVal)
class Result(object):
def __init__(self, retVal):
self.value = retVal
@property
def asBytes(self):
value = self.value
if isinstance(value, unicode):
return value.decode('utf8')
elif hasattr(value, 'read'):
self.value = value.read()
return str(self.value)
@property
def asUnicode(self):
if hasattr(self.value, 'read'):
self.value = value.read()
if isinstance(self.value, str):
return self.value.encode('utf8')
elif isinstance(self.value, unicode):
return self.value
else:
return unicode(self.value)
def assignAttrs(obj, configDict, varlist, default):
'''
Helper function for adding attributes to an object
given a dictionary of configuration properties
'''
import copy
for name in varlist:
try:
defaultValue = copy.copy(default)
except TypeError:
#probably ok, can't copy certain non-mutable objects like functions
defaultValue = default
value = configDict.get(name, defaultValue)
if default is not None and not isinstance(value, type(default)):
raise VesperError('config variable %s (of type %s)'
'must be compatible with type %s'
% (name, type(value), type(default)))
setattr(obj, name, value)
############################################################
## main class
############################################################
class RequestProcessor(TransactionProcessor):
DEFAULT_CONFIG_PATH = ''
requestsRecord = None
defaultGlobalVars = ['_name', '_noErrorHandling',
'__current-transaction', '__readOnly'
'__requestor__', '__server__',
'_prevkw', '__argv__', '_errorInfo'
]
nonMergableConfigDicts = ['nameMap']
def __init__(self, appVars):
super(RequestProcessor, self).__init__()
self.baseDir = os.getcwd() #XXX make configurable
self.loadConfig(appVars)
if self.template_path:
from mako.lookup import TemplateLookup
templateArgs = dict(directories=self.template_path,
default_filters=['decode.utf8'],
module_directory =self.mako_module_dir,
output_encoding='utf-8', encoding_errors='replace')
templateArgs.update(self.template_options)
self.template_loader = TemplateLookup(**templateArgs)
self.requestDispatcher = Requestor(self)
self.loadModel()
self.handleCommandLine(self.argsForConfig)
def handleCommandLine(self, argv):
''' the command line is translated into the `_params`
request variable as follows:
* arguments beginning with a '-' are treated as a variable
name with its value being the next argument unless that
argument also starts with a '-'
* the entire command line is assigned to the variable 'cmdline'
'''
kw = utils.attrdict()
kw._params = utils.defaultattrdict(argsToKw(argv))
kw['cmdline'] = '"' + '" "'.join(argv) + '"'
self.runActions('run-cmds', kw)
def loadConfig(self, appVars):
if appVars.get('beforeConfigHook'):
appVars['beforeConfigHook'](appVars)
self.config = utils.defaultattrdict(appVars)
def initConstants(varlist, default):
#add the given list of config properties as attributes
#on this RequestProcessor
return assignAttrs(self, appVars, varlist, default)
initConstants( [ 'actions'], {})
initConstants( ['default_mime_type'], '')
self.initLock(appVars)
self.txnSvc = transactions.ProcessorTransactionService(self)
initConstants( [ 'stores', 'storeDefaults'], {})
addNewResourceHook = self.actions.get('before-new')
self.defaultStore = None
if 'stores' in appVars:
stores = utils.attrdict()
for name, storeConfig in appVars['stores'].items():
stores[name] = self.loadDataStore(storeConfig,
self.storeDefaults, addNewResourceHook)
if storeConfig.get('default_store') or name == 'default':
self.defaultStore = stores[name]
if stores and not self.defaultStore:
if len(stores) > 1:
raise VesperError('default store not set')
else:
self.defaultStore = stores.values()[0]
#XXX in order to allow cmdline and config file storage settings to be useful
#merge appVars into the default store's config before loadDataStore
self.stores = stores
else:
self.defaultStore = self.loadDataStore(appVars,self.storeDefaults,addNewResourceHook)
self.stores = utils.attrdict(default=self.defaultStore)
#app_name is a unique name for this request processor instance
initConstants( ['app_name'], 'root')
self.log = logging.getLogger("app." + self.app_name)
self.defaultRequestTrigger = appVars.get('default_trigger','http-request')
initConstants( ['global_request_vars', 'static_path', 'template_path'], [])
self.work_dir = appVars.get('work_dir', 'vesper_work')
self.mako_module_dir = appVars.get('mako_module_dir', os.path.join(self.work_dir,'mako_modules'))
initConstants( ['template_options'], {})
self.global_request_vars.extend( self.defaultGlobalVars )
self.default_page_name = appVars.get('default_page_name', 'index')
#cache settings:
initConstants( ['secure_file_access', 'use_etags'], True)
self.default_expires_in = appVars.get('default_expires_in', 0)
initConstants( ['action_cache_size'], 0)
self.validate_external_request = appVars.get('validate_external_request',
lambda *args: True)
self.get_principal_func = appVars.get('get_principal_func', lambda kw: '')
self.argsForConfig = appVars.get('argsForConfig', [])
if appVars.get('configHook'):
appVars['configHook'](appVars)
def loadModel(self):
self.actionCache = MRUCache.MRUCache(self.action_cache_size,
digestKey=True)
super(RequestProcessor, self).loadModel()
self.runActions('load-model')
###########################################
## request handling engine
###########################################
def runActions(self, triggerName, kw = None, initVal=None, newTransaction=True):
'''
Retrieve the action sequences associated with the triggerName.
Each Action has a list of RxPath expressions that are evaluated after
mapping runActions keyword parameters to RxPath variables. If an
expression returns a non-empty nodeset the Action is invoked and the
value it returns is passed to the next invoked Action until the end of
the sequence, upon which the final return value is return by this function.
'''
kw = utils.attrdict(kw or {})
sequence = self.actions.get(triggerName)
if sequence:
errorSequence = self.actions.get(triggerName+'-error')
return self.doActions(sequence, kw, retVal=initVal,
errorSequence=errorSequence, newTransaction=newTransaction)
def _doActionsBare(self, sequence, kw, retVal):
try:
if not isinstance(sequence, (list, tuple)):
sequence = sequence(kw)
for action in sequence:
retResult = Result(retVal)
#try to retrieve action result from cache
#when an action is not cachable (the default)
#just calls the action
newRetVal = self.actionCache.getOrCalcValue(
action, kw, retResult,
hashCalc=action.cacheKeyPredicate,
sideEffectsCalc=action.sideEffectsPredicate,
sideEffectsFunc=action.sideEffectsFunc,
isValueCacheableCalc=action.isValueCacheableCalc)
if (newRetVal is not retResult
and newRetVal is not Action.NotModified):
retVal = newRetVal
except:
exc = ActionWrapperException()
exc.state = retVal
raise exc
return retVal
def _doActionsTxn(self, sequence, kw, retVal):
func = lambda: self._doActionsBare(sequence, kw, retVal)
return self.executeTransaction(func, kw, retVal)
def doActions(self, sequence, kw=None, retVal=None,
errorSequence=None, newTransaction=False):
if kw is None:
kw = utils.attrdict()
kw['__requestor__'] = self.requestDispatcher
kw['__server__'] = self
try:
if newTransaction:
retVal = self._doActionsTxn(sequence, kw, retVal)
else:
retVal = self._doActionsBare(sequence, kw, retVal)
except (KeyboardInterrupt, SystemExit):
raise
except:
#print newTransaction, self.txnSvc.state.timestamp
exc_info = sys.exc_info()
if isinstance(exc_info[1], ActionWrapperException):
retVal = exc_info[1].state
exc_info = exc_info[1].nested_exc_info
if self.inErrorHandler or kw.get('_noErrorHandling'):
#avoid endless loops
raise exc_info[1] or exc_info[0], None, exc_info[2]
else:
self.inErrorHandler += 1
try:
if isinstance(exc_info[1], DoNotHandleException):
raise exc_info[1] or exc_info[0], None, exc_info[2]
if errorSequence and sequence is not errorSequence:
import traceback as traceback_module
def extractErrorInfo(type, value):
#value may be either the nested exception
#or the wrapper exception
message = str(value)
module = '.'.join( str(type).split('.')[:-1] )
name = str(type).split('.')[-1].strip("'>")
errorCode = getattr(value, 'errorCode', '')
return message, module, name, errorCode
def getErrorKWs():
type, value, traceback = exc_info
if (isinstance(value, utils.NestedException)
and value.useNested):
message, module, name, errorCode=extractErrorInfo(
value.nested_exc_info[0],
value.nested_exc_info[1])
else:
message, module, name, errorCode=extractErrorInfo(
type, value)
#these should always be the wrapper exception:
(fileName, lineNumber, functionName,
text) = traceback_module.extract_tb(
traceback, 1)[0]
details = ''.join(
traceback_module.format_exception(
type, value, traceback) )
return utils.attrdict(locals())
kw['_errorInfo'] = getErrorKWs()
self.log.warning("invoking error handler on exception:\n"+
kw['_errorInfo']['details'])
try:
#if we're creating a new transaction,
#it has been aborted by now, so start a new one
#however if the error was thrown during commit we're in the midst
#of a bad transaction and its not safe to create a new one
newTransaction = newTransaction and not self.txnSvc.isActive()
return self.callActions(errorSequence, kw, retVal,
newTransaction=newTransaction)
finally:
del kw['_errorInfo']
else:
#traceback.print_exception(*exc_info)
raise exc_info[1] or exc_info[0], None, exc_info[2]
finally:
self.inErrorHandler -= 1
return retVal
def callActions(self, actions, kw, retVal, errorSequence=None, globalVars=None, newTransaction=False):
'''
process another set of actions using the current context as input,
but without modified the current context.
Particularly useful for template processing.
'''
globalVars = self.global_request_vars + (globalVars or [])
#merge previous prevkw, overriding vars as necessary
prevkw = kw.get('_prevkw', {}).copy()
templatekw = utils.attrdict()
for k, v in kw.items():
#initialize the templates variable map copying the
#core request kws and copy the r est (the application
#specific kws) to _prevkw this way the template
#processing doesn't mix with the orginal request but
#are made available in the 'previous' namespace (think
#of them as template parameters)
if k in globalVars:
templatekw[k] = v
elif k != '_metadatachanges':
prevkw[k] = v
templatekw['_prevkw'] = prevkw
templatekw['_contents'] = Result(retVal)
return self.doActions(actions, templatekw,
errorSequence=errorSequence, newTransaction=newTransaction)
#################################################
##command line handling
#################################################
def argsToKw(argv):
'''
'''
kw = {}
i = iter(argv)
try:
arg = i.next()
while 1:
if arg[0] != '-':
raise CmdArgError('missing arg')
longArg = arg[:2] == '--'
name = arg.lstrip('-')
if not longArg and len(name) > 1:
#e.g. -fname
kw[name[0]] = name[1:]
arg = i.next()
elif longArg and '=' in name:
name, val = name.split('=', 1)
kw[name] = val
arg = i.next()
else:
kw[name] = True
arg = i.next()
if arg[0] != '-':
kw[name] = arg
arg = i.next()
except StopIteration: pass
return kw
def initLogConfig(logConfig):
import logging.config as log_config
if isinstance(logConfig,(str,unicode)) and logConfig.lstrip()[:1] in ';#[':
#looks like a logging configuration
import textwrap
logConfig = StringIO.StringIO(textwrap.dedent(logConfig))
log_config.fileConfig(logConfig)
#any logger already created and not explicitly
#specified in the log config file is disabled this
#seems like a bad design -- certainly took me a while
#to why understand things weren't getting logged so
#re-enable the loggers
for logger in logging.Logger.manager.loggerDict.itervalues():
logger.disabled = 0
class AppConfig(utils.attrdict):
_server = None
cmd_usage = "%prog [options] [settings]"
cmd_help = '''Settings:\n--name=VALUE Add [name] to config settings'''
#XXX add to docs
parser = OptionParser()
parser.add_option("-s", "--storage", dest="storage", help="storage path or url")
parser.add_option("-c", "--config", dest="config", help="path to configuration file")
parser.add_option("-p", "--port", dest="port", type="int", help="http server listener port")
parser.add_option("-v", "--verbose", action="store_true", dest="verbose",
default=False, help="set logging level to DEBUG")
parser.add_option("-x", "--exit", action="store_true", dest="exit",
help="exit without running HTTP server")
def updateFromConfigFile(self, filepath):
config = {}
execfile(filepath, config, config)
utils.recursiveUpdate(self, config, RequestProcessor.nonMergableConfigDicts)
def isBuiltinCmdOption(self, arg):
return self.parser.has_option(arg)
def handleCmdOptions(self, args):
(options, args) = self.parser.parse_args(args)
if options.config:
self.updateFromConfigFile(options.config)
if options.verbose:
self.loglevel = logging.DEBUG
if options.storage:
self.storage_url = options.storage
if options.port:
self.port = int(options.port)
if options.exit:
self.exec_cmd_and_exit = True
def _parseCmdLine(self, cmdline):
appargs = []
mainargs = []
want = False
while cmdline:
arg = cmdline.pop(0)
if arg.startswith('-'):
want = self.isBuiltinCmdOption(arg)
if want:
mainargs.append(arg)
else:
appargs.append(arg)
elif want:
mainargs.append(arg)
else:
appargs.append(arg)
self.parser.set_usage(self.get('cmd_usage', self.cmd_usage))
self.parser.epilog = self.get('cmd_help', self.cmd_help)
if mainargs:
self.handleCmdOptions(mainargs)
handler = self.get('cmdline_handler', lambda app, appargs: appargs)
appLeftOver = handler(self, appargs)
if appLeftOver:
#if cmd_args not set, set it to appLeftOver
if 'cmd_args' not in self:
self.cmd_args = appLeftOver
#also treat appLeftOver as config settings
try:
moreConfig = argsToKw(appLeftOver)
self.update( moreConfig )
except CmdArgError, e:
print "Error:", e.msg
self.parser.print_help()
sys.exit()
def load(self, cmdline=False):
'''
`cmdline` is a boolean or a list of command line arguments
If `cmdline` is True, the system command line is used.
If False command line processing is disabled.
'''
if isinstance(cmdline, bool):
if cmdline:
cmdline = sys.argv[1:]
else:
cmdline = []
else:
cmdline = cmdline[:]
if cmdline:
self._parseCmdLine(cmdline)
if self.get('logconfig'):
initLogConfig(self['logconfig'])
else:
log = logging.getLogger()
log.setLevel(self.get('loglevel', logging.INFO))
# format="%(asctime)s %(levelname)s %(name)s %(message)s"
# datefmt="%d %b %H:%M:%S"
# stream = logging.StreamHandler(sys.stdout)
# stream.setFormatter(logging.Formatter(format, datefmt))
# log.addHandler(stream)
if self.get('storage_url'):
try:
(proto, path) = re.split(r':(?://)?', self['storage_url'],1)
except ValueError: # split didn't work, assume its file path
proto = 'file'
path = self['storage_url']
if 'model_factory' not in self:
self['model_factory'] = store.get_factory(proto)
if proto == 'file':
path = UriToOsPath(path)
self['storage_path'] = path
from web import HTTPRequestProcessor
root = HTTPRequestProcessor(appVars=self.copy())
dict.__setattr__(self, '_server', root)
#configuration complete, clear global configuration state:
_initConfigState()
return self._server
def run(self, startserver=True, cmdline=True, out=sys.stdout):
'''
`cmdline` is a boolean or a list of command line arguments
If `cmdline` is True, the system command line is used.
If False command line processing is disabled.
'''
root = self._server
if not root:
root = self.load(cmdline)
if 'debug_filename' in self:
self.playbackRequestHistory(self['debug_filename'], out)
if self.get('record_requests'):
root.requestsRecord = []
root.requestRecordPath = 'debug-vesper-requests.pkl'
if not self.get('exec_cmd_and_exit', not startserver):
port = self.get('port', 8000)
middleware = self.get('wsgi_middleware')
httpserver = self.get('httpserver')
log.info("Starting HTTP on port %d..." % port)
#runs forever:
root.runWsgiServer(port, httpserver, middleware)
return root
def createStore(json='', **kw):
for name, default in [('storage_url', 'mem://'), ('storage_template', json),
('storage_template_options', dict(toplevelBnodes=False))]:
if name not in kw:
kw[name] = default
root = createApp(**kw).run(False, False)
return root.defaultStore
def _initConfigState():
global _current_config, _current_configpath
_current_config = AppConfig()
_current_configpath = [None]
_initConfigState()
def _normpath(basedir, path):
"""
return an absolute path given a basedir and a path fragment. If `path` is already absolute
it will be returned unchanged.
"""
if os.path.isabs(path):
return path
else:
tmp = os.path.normpath(os.path.join(basedir, path))
#assert os.path.isabs(tmp), 'not abs path %s, from %s + %s' % (tmp,basedir,path)
return tmp
def _get_module_path(modulename):
"for a modulename like 'vesper.web.admin' return a tuple (absolute_module_path, is_directory)"
import sys, imp
if modulename == "__main__":
m = sys.modules[modulename]
assert hasattr(m, '__file__'), "__main__ module missing __file__ attribute"
path = _normpath(os.getcwd(), m.__file__)
return (path, False)
else:
parts = modulename.split('.')
parts.reverse()
path = None
while parts:
part = parts.pop()
f = None
try:
f, path, descr = imp.find_module(part, path and [path] or None)
finally:
if f: f.close()
return (path, descr[-1] == imp.PKG_DIRECTORY)
def _importApp(baseapp):
'''
Executes the given app config file. If `createApp()` was
called during execution of the config file, the `_current_config`
global will be set to the app configuration returned by `createApp()`.
'''
baseglobals = utils.attrdict(Action=Action, createApp=createApp)
#assuming the baseapp file calls createApp(), it will set _current_config
if os.path.exists(baseapp):
#set this global so we can resolve relative paths against the location
#of the config file they appear in
_current_configpath.append( os.path.dirname(os.path.abspath(baseapp)) )
execfile(baseapp, baseglobals)
else:
(path, isdir) = _get_module_path(baseapp)
# print "_get_module_path for:" + str(baseapp) + " --> path:" + str(path) + " isdir:" + str(isdir)
assert path
#set this global so we can resolve relative paths against the location
#of the config file they appear in
_current_configpath.append( os.path.abspath(path) )
basemod = sys.modules.get(baseapp)
if basemod:
reload(basemod)
else:
__import__(baseapp, baseglobals)
_current_configpath.pop()
def getCurrentApp():
return _current_config
def createApp(derivedapp=None, baseapp=None, static_path=(), template_path=(), actions=None, **config):
'''
Create a new `AppConfig`.
:param derivedapp: is the name of the module that is extending the app. (Usually just pass `__name__`)
:param baseapp: is either a module name or a file path to the Python script that defines an app.
This file should have a call to :func:`createApp` in it
:param static_path: list or string prepended to the static resource path of the app.
:param template_path: list or string prepended to the template resource path of the app.
:param actions: actions map of the app, will updates the base app's `action` dictionary.
:param config: Any other keyword arguments will override config values set by the base app
'''
global _current_config
if derivedapp:
(derived_path, isdir) = _get_module_path(derivedapp)
if not isdir:
derived_path = os.path.dirname(derived_path)
else:
derived_path = None
if baseapp:
assert isinstance(baseapp, (str, unicode))
#sets _current_config if the baseapp calls createApp
_importApp(baseapp)
else:
_current_config = AppConfig()
#config variables that shouldn't be simply overwritten should be specified
#as an explicit function argument so they're not overwritten by this line:
_current_config.update(config)
if 'actions' in _current_config:
if actions:
_current_config.actions.update(actions)
else:
_current_config.actions = actions or {}
basedir = _current_configpath[-1] or derived_path
if basedir is not None:
if not os.path.isdir(basedir):
basedir = os.path.dirname(basedir)
def addToPath(path, configvar):
if isinstance(path, (str, unicode)):
path = [path]
path = list(path) + _current_config.get(configvar,[])
path = [_normpath(basedir, x) for x in path]
_current_config[configvar] = path
for p in path:
if not os.path.isdir(p):
raise VesperError('bad config variable "%s": '
'%s is not a valid directory' % (configvar, p))
addToPath(static_path, 'static_path')
addToPath(template_path, 'template_path')
#storage_template_path should be relative to the app config
#that sets it, not the final (most derived) app
for configdict in itertools.chain([_current_config, config.get('storeDefaults')],
(config.get('stores') or {}).values()):
if not configdict:
continue
storage_template_path = configdict.get('storage_template_path')
if storage_template_path:
abspath = _normpath(basedir, storage_template_path)
configdict['storage_template_path'] = abspath
return _current_config
| 14,859 | 7,178 | 431 |
bcd6579d525079638df83054d35c397e48a72c3d | 735 | py | Python | DataMining/Demo_1/step_10_dec_dimension.py | AlexYu-beta/Python_Study | 6d90b605e47d9341b20a41e7384be269243b14ba | [
"Unlicense"
] | null | null | null | DataMining/Demo_1/step_10_dec_dimension.py | AlexYu-beta/Python_Study | 6d90b605e47d9341b20a41e7384be269243b14ba | [
"Unlicense"
] | null | null | null | DataMining/Demo_1/step_10_dec_dimension.py | AlexYu-beta/Python_Study | 6d90b605e47d9341b20a41e7384be269243b14ba | [
"Unlicense"
] | null | null | null | from step_02_data_analyzer import data, target
from step_05_classify_data import t
from sklearn.decomposition import PCA
from pylab import plot, show, figure, subplot
pca = PCA(n_components=2)
pcad = pca.fit_transform(data)
figure()
subplot(211)
plot(pcad[target=='setosa',0],pcad[target=='setosa',1],'bo')
plot(pcad[target=='versicolor',0],pcad[target=='versicolor',1],'ro')
plot(pcad[target=='virginica',0],pcad[target=='virginica',1],'go')
print pca.explained_variance_ratio_
print 1-sum(pca.explained_variance_ratio_)
data_inv = pca.inverse_transform(pcad)
subplot(212)
plot(data[t==1,0],data[t==1,2],'bo')
plot(data[t==2,0],data[t==2,2],'ro')
plot(data[t==3,0],data[t==3,2],'go')
show()
print abs(sum(sum(data - data_inv)))
| 27.222222 | 68 | 0.730612 | from step_02_data_analyzer import data, target
from step_05_classify_data import t
from sklearn.decomposition import PCA
from pylab import plot, show, figure, subplot
pca = PCA(n_components=2)
pcad = pca.fit_transform(data)
figure()
subplot(211)
plot(pcad[target=='setosa',0],pcad[target=='setosa',1],'bo')
plot(pcad[target=='versicolor',0],pcad[target=='versicolor',1],'ro')
plot(pcad[target=='virginica',0],pcad[target=='virginica',1],'go')
print pca.explained_variance_ratio_
print 1-sum(pca.explained_variance_ratio_)
data_inv = pca.inverse_transform(pcad)
subplot(212)
plot(data[t==1,0],data[t==1,2],'bo')
plot(data[t==2,0],data[t==2,2],'ro')
plot(data[t==3,0],data[t==3,2],'go')
show()
print abs(sum(sum(data - data_inv)))
| 0 | 0 | 0 |
304600ad47ed3e4d99b4c1352f494e355e985269 | 22,113 | py | Python | notifier/access.py | slideinc/notifier | 7d370cc910d0133eaae50da24496948a353fa666 | [
"BSD-3-Clause"
] | 1 | 2016-09-23T07:22:46.000Z | 2016-09-23T07:22:46.000Z | notifier/access.py | slideinc/notifier | 7d370cc910d0133eaae50da24496948a353fa666 | [
"BSD-3-Clause"
] | null | null | null | notifier/access.py | slideinc/notifier | 7d370cc910d0133eaae50da24496948a353fa666 | [
"BSD-3-Clause"
] | null | null | null | # -*- Mode: Python; tab-width: 4 -*-
# Copyright (c) 2005-2010 Slide, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""access
Basic access to object services.
"""
import exceptions
import random as _random
import time
import sys
import os
import socket
from gogreen import coro
import pyinfo
import error
BLANK_TOKEN = None
CORO_LOCAL_TDATA = 'access-trace-data'
CORO_LOCAL_TCTRL = 'access-trace-ctrl'
CORO_LOCAL_SOURCE = 'access-call-source'
DEFAULT_RETRY = 2
ACCESS_TRACE_LIMITS = {'obj' : None, 'cmd' : None, 'vids' : None }
ACCESS_TRACE_OFF = 0
ACCESS_TRACE_TERSE = 10
ACCESS_TRACE_INFO = 20
ACCESS_TRACE_DEBUG = 30
ACCESS_TRACE_VERBOSE = 40
#
# set local request parameter to use service DB slaves if possible.
#
#
# Public trace data/info function(s)
#
def trace_dump(clear = True):
'''trace_dump
Dump to stdout current trace data at the current trace level. An optional
clear parameter to reset or preserve the trace data. (default: True)
See Also: enable_trace()
'''
if clear:
tdata = coro.pop_local(CORO_LOCAL_TDATA, {})
tlevl = _clr_trace_local()
else:
tdata = coro.get_local(CORO_LOCAL_TDATA, {})
tlevl = _get_trace_local()
if not tlevl:
return None
total, idcnt, count = 0, 0, 0
for obj, data in tdata.items():
for cmd, (elapse, ids, cnt) in data.items():
total += elapse
count += cnt
idcnt += ids
if tlevl > ACCESS_TRACE_TERSE:
print 'Access | %0.4f | %4d | %4d | Summary (%s.%s)' % (
elapse, cnt, ids, obj, cmd)
if not total:
return None
lmt = has_trace_limits()
if lmt is None:
lmt = ''
else:
lmt = 'limit: %s' % lmt
print 'Access | %.4f | %4d | %4d | Summary (TOTAL) %s' % (
total, count, idcnt, lmt)
def enable_trace(value, clear = True):
'''enable_trace
Set trace level.
0 - No debug info.
10 - Info about each access call is saved. A call to trace_dump() will
produce a single line summary (number of access calls, number of
IDs, and total time spent in access calls) of the saved data.
20 - A call to trace_dump() will dump a line for each object/command
with the same information as the 'total' result above.
30 - For each call to access echo the call parameters and elapse time
to stdout.
40 - When echoing access calls to stdout include the call stack.
Note: Each level will produce all the data that the previous level
produces, plus the additional documented data.
constants:
ACCESS_TRACE_OFF = 0
ACCESS_TRACE_TERSE = 10
ACCESS_TRACE_INFO = 20
ACCESS_TRACE_DEBUG = 30
ACCESS_TRACE_VERBOSE = 40
Optional parameters:
clear - Reset/Clear the local override when trace_dump is called with
the clear parameter set to true. (default: True)
See Also: trace_dump()
'''
_set_trace_local(value, clear)
return _get_trace_local(), value
#
# Private/Local trace data/info support function(s)
#
sum_tuple = lambda *a: tuple(map(sum, zip(*a)))
def _execute_trace_decorator():
'''_execute_trace_decorator
trace decorator explicitly for the execute function.
'''
return function
def _complete_trace_decorator():
'''_complete_trace_decorator
trace decorator explicitly for *Access complete(s) methods.
'''
return function
_trace_limits = ACCESS_TRACE_LIMITS
def trace_limit(obj = None, cmd = None, vids = None):
'''trace_limit
Limit access tracing to calls only with the supplied signature. None
values (the default) implies ignore that component; i.e., match a call
with any value for that component.
obj: Only trace calls for this access object.
cmd: Only trace calls against this command. Note that the cmd value needs
to make sense with the obj value. If the cmd limit isn't a command
associated with the obj then it effectively will not trace anything.
vids: Only trace for this vid(s). Can be a list. Note since calls can
be made with lists of vids this limit will match if ANY of the vids
in the call match those in the limit.
'''
_trace_limits['obj'] = obj
_trace_limits['cmd'] = cmd
_trace_limits['vids'] = vids
def has_trace_limits():
'''returns a string flagging which trace limits have been enabled. the
string will have the following:
o: if obj limits enabled
c: if cmd limits enabled
v: if vids limits enabled
so the string 'oc' would means obj and cmd limits, while 'ov' would mean
obj and vids limits.
None means no limits enabled.
'''
r = ''.join([str(x[0])[0] for x in _trace_limits.items() if x[1] is not None]) or None
return r
#
# exception propagation
#
def _unwrap(result):
'''_unwrap
Given a result fetched over RPC from service.base, check for an
envelope, and if present processes the result. When the result
is in an envelope; on success the result is removed and returned
to the called, on failure an appropriate exception is raised.
'''
if not isinstance(result, dict):
return result
if not result.get('envl', False):
return result
if not result.get('rc', 0):
return result.get('result', result)
raise error.lookup(result.get('rc'), *result.get('args', ()))
@_execute_trace_decorator()
class SplitAccess(object):
'''SplitAccess
Provide a notifier/publisher wrapper which splits access calls into
two component parts; 1) an initiation component which will send the
resquest and return tracking sequence number(s) for the request(s),
2) a completion component which will wait for and return responses
for the request sequence numbers which are provided.
Examples:
split = SplitAccess(notifier)
s1 = access.test.ping(split, 0, data1)
s2 = access.test.ping(split, 0, data2)
[r1, r2] = split.complete([s1, s2])
s1 = access.test.ping(split, 0, data1)
s2 = access.test.ping(split, 0, data2)
r1, r2 = split.completes(s1, s2)
s1 = access.test.ping(split, 0, data1)
s2 = access.test.ping(split, 0, data2)
[s3, s4] = access.test.ping(split, [0, 1], data)
r1, r2, [r3, r4] = split.completes(s1, s2, [s3, s4])
'''
#
# straight wrap for publish
#
#
# RPC start
#
#
# RPC reap
#
@_complete_trace_decorator()
def complete(self, seq_list, **kwargs):
'''complete
Given a list of one or more sequence numbers, produced by
access requests for RPC(s), wait for the request completion
and return the result(s) as a list in the same order as the
requested sequence numbers.
Examples:
[r1, r2, r3, r4] = self.complete([s1, s2, s3, s4])
Optional arguments are passed to the underlying publisher RPC
completion function:
timeout - No single socket recieve should take longer then
timeout seconds. (float or int are valid as well
as None which denotes infinite/no timeout)
See Also: rpc(), rpcs()
'''
if not seq_list:
return None
retry = self._retry + 1
results = map(lambda i: None, xrange(len(seq_list)))
pending = dict(zip(seq_list, xrange(len(seq_list))))
errored = {}
if len(results) != len(pending):
raise ValueError(
'length mismatch. duplicates? <%d:%d>',
len(results),
len(pending))
while retry and pending:
retry -= 1
result = self._notifier.rpcs_recv(pending.keys(), **kwargs)
for seq, req, rep in result:
pos = pending.pop(seq)
tmp = errored.pop(pos, None)
results[pos] = rep
if rep or not req:
continue
seq = self.rpc(*req, **kwargs).pop()
if not seq:
continue
pending[seq] = pos
errored[pos] = (req, rep)
#
# If no errors are present, and by extension nothing is left
# pending, then return the results to the user.
#
if not errored:
return map(lambda i: _unwrap(i[0]), results)
#
# All remaining cases are errors which will be reported as a
# dictionary of sequence IDs mapped to the request tuple.
#
errored = dict(map(
lambda i: (seq_list[i[0]], i[1]),
errored.iteritems()))
#
# Determine if any sequence ID(s) provided are reported to
# have a local error. (either network error, or invalid
# sequence number)
#
if filter(lambda i: i[1] is None, errored.itervalues()):
raise SplitClientError(seq_list, results, errored)
#
# If any sequence has an empty response, meaning no service
# was available to response, then raise an error for the
# entire request. (default behavior, if other behaviour is
# desired add a non-default mode to control it)
#
raise SplitServerError(seq_list, results, errored)
# not decorated to prevent trace from doubled counting
def completes(self, *args, **kwargs):
'''completes
Given sequence number(s), produced by access requests for
RPC(s), wait for the request completion and return the result
in the same order as the arguments were presented. The sequence
number arguments can be presented as either individual arguments
and/or lists/tuples of sequence numbers.
Examples:
r1 = self.completes(s1)
r1, r2, = self.completes(s1, s2)
r1, r2, (r3, r4) = self.completes(s1, s2, (s3, s4))
Optional arguments are passed to the underlying publisher RPC
completion function:
timeout - No single socket recieve should take longer then
timeout seconds. (float or int are valid as well
as None which denotes infinite/no timeout)
See Also: rpc(), rpcs()
'''
if not args:
return None
flat = _flatten(args)
result = _fatten(args, dict(zip(flat, self.complete(flat, **kwargs))))
if len(args) == 1:
return result.pop()
else:
return result
@_complete_trace_decorator()
def any(self, seq_set):
'''any
EXPERIMENTAL
Given a set of sequence numbers, produced by access requests for
RPC(s), return a tuple of any one sequence number and the matching
response. Also remove the sequence number from the given set.
Note: No retries, minimal testing.
'''
if not seq_set:
raise ValueError('empty sequence set', seq_set)
seq, req, rep = self._notifier.rpcs_pop(seq_set)
if rep is None:
raise ClientError(seq, req, rep)
if not rep:
raise ServerError(seq, req, rep)
seq_set.remove(seq)
return (seq, _unwrap(rep.pop()))
def clear(self, seqs):
'''clear
Clear any request/response state associated with a set or list
of sequence numbers
'''
return self._notifier.rpcs_clear(seqs)
| 30.458678 | 90 | 0.608149 | # -*- Mode: Python; tab-width: 4 -*-
# Copyright (c) 2005-2010 Slide, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of the author nor the names of other
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""access
Basic access to object services.
"""
import exceptions
import random as _random
import time
import sys
import os
import socket
from gogreen import coro
import pyinfo
import error
class ServerUnavailable(Exception):
pass
class ServerError(ServerUnavailable):
pass
class ClientError(ServerUnavailable):
pass
class SplitAccessError(Exception):
pass
class SplitServerError(ServerError, SplitAccessError):
pass
class SplitClientError(ClientError, SplitAccessError):
pass
class LookAside(Exception):
pass
BLANK_TOKEN = None
CORO_LOCAL_TDATA = 'access-trace-data'
CORO_LOCAL_TCTRL = 'access-trace-ctrl'
CORO_LOCAL_SOURCE = 'access-call-source'
DEFAULT_RETRY = 2
ACCESS_TRACE_LIMITS = {'obj' : None, 'cmd' : None, 'vids' : None }
ACCESS_TRACE_OFF = 0
ACCESS_TRACE_TERSE = 10
ACCESS_TRACE_INFO = 20
ACCESS_TRACE_DEBUG = 30
ACCESS_TRACE_VERBOSE = 40
def _tlb_status(object):
return coro.get_local('tlb-%s' % object, False)
def _set_trace_local(value, clear):
if not coro.has_local(CORO_LOCAL_TCTRL):
coro.set_local(CORO_LOCAL_TCTRL, {})
data = coro.get_local(CORO_LOCAL_TCTRL)
data['value'] = value
data['clear'] = clear
def _get_trace_local():
data = coro.get_local(CORO_LOCAL_TCTRL, {})
return data.get('value', False)
def _clr_trace_local():
if coro.get_local(CORO_LOCAL_TCTRL, {}).get('clear', False):
data = coro.pop_local(CORO_LOCAL_TCTRL, {})
else:
data = coro.get_local(CORO_LOCAL_TCTRL, {})
return data.get('value', False)
#
# set local request parameter to use service DB slaves if possible.
#
def slave_read(status = None):
if status is None:
return coro.get_local('slave-read', False)
else:
return coro.set_local('slave-read', status)
#
# Public trace data/info function(s)
#
def trace_dump(clear = True):
'''trace_dump
Dump to stdout current trace data at the current trace level. An optional
clear parameter to reset or preserve the trace data. (default: True)
See Also: enable_trace()
'''
if clear:
tdata = coro.pop_local(CORO_LOCAL_TDATA, {})
tlevl = _clr_trace_local()
else:
tdata = coro.get_local(CORO_LOCAL_TDATA, {})
tlevl = _get_trace_local()
if not tlevl:
return None
total, idcnt, count = 0, 0, 0
for obj, data in tdata.items():
for cmd, (elapse, ids, cnt) in data.items():
total += elapse
count += cnt
idcnt += ids
if tlevl > ACCESS_TRACE_TERSE:
print 'Access | %0.4f | %4d | %4d | Summary (%s.%s)' % (
elapse, cnt, ids, obj, cmd)
if not total:
return None
lmt = has_trace_limits()
if lmt is None:
lmt = ''
else:
lmt = 'limit: %s' % lmt
print 'Access | %.4f | %4d | %4d | Summary (TOTAL) %s' % (
total, count, idcnt, lmt)
def enable_trace(value, clear = True):
'''enable_trace
Set trace level.
0 - No debug info.
10 - Info about each access call is saved. A call to trace_dump() will
produce a single line summary (number of access calls, number of
IDs, and total time spent in access calls) of the saved data.
20 - A call to trace_dump() will dump a line for each object/command
with the same information as the 'total' result above.
30 - For each call to access echo the call parameters and elapse time
to stdout.
40 - When echoing access calls to stdout include the call stack.
Note: Each level will produce all the data that the previous level
produces, plus the additional documented data.
constants:
ACCESS_TRACE_OFF = 0
ACCESS_TRACE_TERSE = 10
ACCESS_TRACE_INFO = 20
ACCESS_TRACE_DEBUG = 30
ACCESS_TRACE_VERBOSE = 40
Optional parameters:
clear - Reset/Clear the local override when trace_dump is called with
the clear parameter set to true. (default: True)
See Also: trace_dump()
'''
_set_trace_local(value, clear)
return _get_trace_local(), value
#
# Private/Local trace data/info support function(s)
#
sum_tuple = lambda *a: tuple(map(sum, zip(*a)))
def _trace_data(start, obj, cmd, vids, **kwargs):
if not _get_trace_local():
return None
if not _trace_check_limits(obj, cmd, vids):
return None
elapse = time.time() - start
if not coro.has_local(CORO_LOCAL_TDATA):
coro.set_local(CORO_LOCAL_TDATA, {})
data = coro.get_local(CORO_LOCAL_TDATA)
data = data.setdefault(obj, {})
look = not isinstance(vids, (list, tuple)) and 1 or len(vids)
data[cmd] = sum_tuple(data.get(cmd, (0, 0, 0)), (elapse, look, 1))
if _get_trace_local() > ACCESS_TRACE_DEBUG:
stack = pyinfo.rawstack()
stack = stack[:-5] # remove 5 levels of mod_python, publisher
while stack and stack[0][0].startswith('access'):
stack.pop(0)
stack = ' @ %s' % '|'.join(['%s:%s:%s' % x for x in stack])
else:
stack = ''
if _get_trace_local() < ACCESS_TRACE_DEBUG:
return None
print 'Access | %.4f | obj: %s cmd: %s vid: %s args: %s kwargs: %s%s' % (
elapse, obj, cmd, vids,
kwargs.get('args', ()), kwargs.get('kwargs', {}),
stack)
def _execute_trace_decorator():
'''_execute_trace_decorator
trace decorator explicitly for the execute function.
'''
def function(method):
def tracer(obj, n, cmd, vids, *args, **kwargs):
start = _get_trace_local() and time.time() or 0
try:
return method(obj, n, cmd, vids, *args, **kwargs)
finally:
try:
_trace_data(start, obj, cmd, vids, **kwargs)
except:
raise
return tracer
return function
def _complete_trace_decorator():
'''_complete_trace_decorator
trace decorator explicitly for *Access complete(s) methods.
'''
def function(method):
def tracer(*args, **kwargs):
start = _get_trace_local() and time.time() or 0
try:
return method(*args, **kwargs)
finally:
try:
_trace_data(
start,
args[0].__class__.__name__,
method.__name__,
[])
except:
raise
return tracer
return function
_trace_limits = ACCESS_TRACE_LIMITS
def trace_limit(obj = None, cmd = None, vids = None):
'''trace_limit
Limit access tracing to calls only with the supplied signature. None
values (the default) implies ignore that component; i.e., match a call
with any value for that component.
obj: Only trace calls for this access object.
cmd: Only trace calls against this command. Note that the cmd value needs
to make sense with the obj value. If the cmd limit isn't a command
associated with the obj then it effectively will not trace anything.
vids: Only trace for this vid(s). Can be a list. Note since calls can
be made with lists of vids this limit will match if ANY of the vids
in the call match those in the limit.
'''
_trace_limits['obj'] = obj
_trace_limits['cmd'] = cmd
_trace_limits['vids'] = vids
def trace_limits_clear():
global _trace_limits
_trace_limits = {'obj' : None, 'cmd' : None, 'vids' : None }
def has_trace_limits():
'''returns a string flagging which trace limits have been enabled. the
string will have the following:
o: if obj limits enabled
c: if cmd limits enabled
v: if vids limits enabled
so the string 'oc' would means obj and cmd limits, while 'ov' would mean
obj and vids limits.
None means no limits enabled.
'''
r = ''.join([str(x[0])[0] for x in _trace_limits.items() if x[1] is not None]) or None
return r
def _trace_check_limits(obj, cmd, vids):
# object check
#
obj_limit = _trace_limits.get('obj')
if obj_limit is not None and obj_limit != obj:
return False
# cmd check
#
cmd_limit = _trace_limits.get('cmd')
if cmd_limit is not None and cmd_limit != cmd:
return False
# vids check
#
vids_l = _trace_limits.get('vids')
vids_l = (isinstance(vids_l, (list, tuple)) and [vids_l] or [[vids_l]])[0]
vids_l = set(filter(lambda x: x is not None, vids_l))
vids = (isinstance(vids, (list, tuple)) and [vids] or [[vids]])[0]
vids = set(filter(lambda x: x is not None, vids))
if vids_l and not vids_l.intersection(vids):
return False
return True
#
# exception propagation
#
def _unwrap(result):
'''_unwrap
Given a result fetched over RPC from service.base, check for an
envelope, and if present processes the result. When the result
is in an envelope; on success the result is removed and returned
to the called, on failure an appropriate exception is raised.
'''
if not isinstance(result, dict):
return result
if not result.get('envl', False):
return result
if not result.get('rc', 0):
return result.get('result', result)
raise error.lookup(result.get('rc'), *result.get('args', ()))
@_execute_trace_decorator()
def execute(
object, notifier, command, vids,
args = (), kwargs = {}, timeout = None, retry = None, raw = False):
#
# emtpy sets fail to no RPC. still, match the type the caller expects.
#
if not vids and isinstance(vids, list):
return []
#
# default retry when none specificed
#
if retry is None:
retry = DEFAULT_RETRY
#
# lookaside determination/setup. Three possible values:
#
# None - No lookup currently in progress
# False - Lookup in progress but first order
# True - Second order lookup in progress, do not
# initiate a recursive lookup.
#
tlb = 'tlb-%s' % object
val = coro.get_local(tlb)
if val is None:
val = False
elif not val:
val = True
else:
raise LookAside((tlb, val))
look = (isinstance(vids, (list, tuple)) and [vids] or [[vids]])[0]
data = {
'tlb': (tlb, val),
'slave': ('slave-read', slave_read()),
'source': socket.gethostname(),
'command': command,
'args': args,
'kwargs': kwargs}
retry += 1
results = map(lambda i: None, xrange(len(look)))
pending = {}
#
# quick pass on empty lookup set.
#
if not look:
return results
#
# create a dictionary of pending vector elements to result
# array offsets, de-duplicate the lookup vector, order is
# no longer imporatant since order is now maintained in the
# pending dictionary
#
filter(
lambda i: pending.setdefault(i[0], set([])).add(i[1]),
zip(look, xrange(len(look))))
#
# process blank requests
#
for pos in pending.pop(BLANK_TOKEN, []):
results[pos] = [{}]
#
# process remaining retry number of times.
#
while retry and pending:
retry -= 1
lookup = pending.keys()
values = notifier.rpcs(
object, lookup, 'execute', data,
timeout = timeout,
local = not val)
if not values:
continue
#
# map values to correct result location
#
for i in xrange(len(lookup)):
if not values[i]:
push = pending.get(lookup[i])
else:
push = pending.pop(lookup[i])
for pos in push:
results[pos] = values[i]
#
# look for absence of any results, raize an error even if only
# one element is in error
#
errors = filter(lambda i: not i, results)
if errors:
#
# since we can only raise one type of error, decide based on
# the type of the first list element in the error set.
# (empty list is a server error, none is a client error
#
if errors[0] is None:
raise ClientError(look, data, errors)
else:
raise ServerError(look, data, errors)
#
# if we're not looking for raw data (function called with raw=True) then
# strip out all but the first result and _unwrap it. otherwise just
# _unwrap all the results and return them.
#
if not raw:
results = map(lambda i: _unwrap((i and [i[0]] or [None])[0]), results)
else:
results = map(lambda i: map(_unwrap, i), results)
#
# when request is for a single ID and not a list of IDs rip the
# result to match the request.
#
if not isinstance(vids, list):
results = results[0]
return results
def random(
object, notifier, command, args=(), kwargs={},
timeout=None, retry=None, raw=False):
return execute(
object, notifier, command,
_random.randint(0, (8 << 60) - 1),
args, kwargs, timeout, retry, raw)
def _flatten(data):
if isinstance(data, (int, long)):
return (data,)
if not data:
return ()
result = reduce(lambda x,y: _flatten(x) + _flatten(y), data)
if isinstance(result, (tuple, list)):
return tuple(result)
else:
return (result,)
def _fatten(request, result):
if isinstance(request, (int, long)):
return result[request]
else:
return map(lambda i: _fatten(i, result), request)
class SplitAccess(object):
'''SplitAccess
Provide a notifier/publisher wrapper which splits access calls into
two component parts; 1) an initiation component which will send the
resquest and return tracking sequence number(s) for the request(s),
2) a completion component which will wait for and return responses
for the request sequence numbers which are provided.
Examples:
split = SplitAccess(notifier)
s1 = access.test.ping(split, 0, data1)
s2 = access.test.ping(split, 0, data2)
[r1, r2] = split.complete([s1, s2])
s1 = access.test.ping(split, 0, data1)
s2 = access.test.ping(split, 0, data2)
r1, r2 = split.completes(s1, s2)
s1 = access.test.ping(split, 0, data1)
s2 = access.test.ping(split, 0, data2)
[s3, s4] = access.test.ping(split, [0, 1], data)
r1, r2, [r3, r4] = split.completes(s1, s2, [s3, s4])
'''
def __init__(self, notifier, **kwargs):
self._notifier = notifier
self._retry = kwargs.get('retry', DEFAULT_RETRY)
def __repr__(self):
return repr(self._notifier)
#
# straight wrap for publish
#
def publish(self, object, id, cmd, args):
return self._notifier.publish(object, id, cmd, args)
#
# RPC start
#
def rpcs(self, obj, vid_list, cmd, args, *more, **kwargs):
# legacy parameter check
if more: kwargs['timeout'] = more[0]
seq, cnt = self._notifier.rpcs_push(obj, vid_list, cmd, args, **kwargs)
return map(lambda i: [i], range(seq, seq + cnt))
def rpc(self, obj, vid, cmd, args, *more, **kwargs):
result = self.rpcs(obj, [vid], cmd, args, *more, **kwargs)
if result:
return result[0]
else:
return [0]
#
# RPC reap
#
@_complete_trace_decorator()
def complete(self, seq_list, **kwargs):
'''complete
Given a list of one or more sequence numbers, produced by
access requests for RPC(s), wait for the request completion
and return the result(s) as a list in the same order as the
requested sequence numbers.
Examples:
[r1, r2, r3, r4] = self.complete([s1, s2, s3, s4])
Optional arguments are passed to the underlying publisher RPC
completion function:
timeout - No single socket recieve should take longer then
timeout seconds. (float or int are valid as well
as None which denotes infinite/no timeout)
See Also: rpc(), rpcs()
'''
if not seq_list:
return None
retry = self._retry + 1
results = map(lambda i: None, xrange(len(seq_list)))
pending = dict(zip(seq_list, xrange(len(seq_list))))
errored = {}
if len(results) != len(pending):
raise ValueError(
'length mismatch. duplicates? <%d:%d>',
len(results),
len(pending))
while retry and pending:
retry -= 1
result = self._notifier.rpcs_recv(pending.keys(), **kwargs)
for seq, req, rep in result:
pos = pending.pop(seq)
tmp = errored.pop(pos, None)
results[pos] = rep
if rep or not req:
continue
seq = self.rpc(*req, **kwargs).pop()
if not seq:
continue
pending[seq] = pos
errored[pos] = (req, rep)
#
# If no errors are present, and by extension nothing is left
# pending, then return the results to the user.
#
if not errored:
return map(lambda i: _unwrap(i[0]), results)
#
# All remaining cases are errors which will be reported as a
# dictionary of sequence IDs mapped to the request tuple.
#
errored = dict(map(
lambda i: (seq_list[i[0]], i[1]),
errored.iteritems()))
#
# Determine if any sequence ID(s) provided are reported to
# have a local error. (either network error, or invalid
# sequence number)
#
if filter(lambda i: i[1] is None, errored.itervalues()):
raise SplitClientError(seq_list, results, errored)
#
# If any sequence has an empty response, meaning no service
# was available to response, then raise an error for the
# entire request. (default behavior, if other behaviour is
# desired add a non-default mode to control it)
#
raise SplitServerError(seq_list, results, errored)
# not decorated to prevent trace from doubled counting
def completes(self, *args, **kwargs):
'''completes
Given sequence number(s), produced by access requests for
RPC(s), wait for the request completion and return the result
in the same order as the arguments were presented. The sequence
number arguments can be presented as either individual arguments
and/or lists/tuples of sequence numbers.
Examples:
r1 = self.completes(s1)
r1, r2, = self.completes(s1, s2)
r1, r2, (r3, r4) = self.completes(s1, s2, (s3, s4))
Optional arguments are passed to the underlying publisher RPC
completion function:
timeout - No single socket recieve should take longer then
timeout seconds. (float or int are valid as well
as None which denotes infinite/no timeout)
See Also: rpc(), rpcs()
'''
if not args:
return None
flat = _flatten(args)
result = _fatten(args, dict(zip(flat, self.complete(flat, **kwargs))))
if len(args) == 1:
return result.pop()
else:
return result
@_complete_trace_decorator()
def any(self, seq_set):
'''any
EXPERIMENTAL
Given a set of sequence numbers, produced by access requests for
RPC(s), return a tuple of any one sequence number and the matching
response. Also remove the sequence number from the given set.
Note: No retries, minimal testing.
'''
if not seq_set:
raise ValueError('empty sequence set', seq_set)
seq, req, rep = self._notifier.rpcs_pop(seq_set)
if rep is None:
raise ClientError(seq, req, rep)
if not rep:
raise ServerError(seq, req, rep)
seq_set.remove(seq)
return (seq, _unwrap(rep.pop()))
def clear(self, seqs):
'''clear
Clear any request/response state associated with a set or list
of sequence numbers
'''
return self._notifier.rpcs_clear(seqs)
| 8,243 | 194 | 635 |
77c23b75bb2085f24c3aae732cc4256d128367c4 | 968 | py | Python | python/search/find_k.py | rgabeflores/Structures-Functions-Algorithms | fe93372db16eff81a0c4502f0c6d7319fc5b3841 | [
"MIT"
] | 1 | 2019-12-05T06:49:34.000Z | 2019-12-05T06:49:34.000Z | python/search/find_k.py | rgabeflores/Data-Structures-and-Algorithms | fe93372db16eff81a0c4502f0c6d7319fc5b3841 | [
"MIT"
] | null | null | null | python/search/find_k.py | rgabeflores/Data-Structures-and-Algorithms | fe93372db16eff81a0c4502f0c6d7319fc5b3841 | [
"MIT"
] | null | null | null | import random as r
if __name__ == "__main__":
main() | 18.264151 | 71 | 0.603306 | import random as r
def find_k(a,key):
pivot = get_pivot(a)
left = list()
right = list()
for x in a:
if x <= pivot:
left.append(x)
else:
right.append(x)
length = len(left)
if(length > key):
return find_k(left,key)
elif(length == key):
return pivot
else:
return find_k(right, (key - (len(left))))
def get_pivot(a):
first = a[0]
last = a[len(a) - 1]
mid = a[int(len(a) / 2)]
if (first < mid and mid < last) or (last < mid and mid < first):
return mid
elif (mid < first and first < last) or (last < first and first < mid):
return first
elif (first < last and last < mid) or (mid < last and last < first):
return last
else:
return mid
def main():
n = int(input("\nEnter a positive integer: "))
a = []
for _ in range(n):
a.append(r.randint(-100,100))
print(a)
k = int(input("\nEnter a number between 1 and %d: " % n))
print("The " + str(k) + "th element is " + str(find_k(a,k)))
if __name__ == "__main__":
main() | 845 | 0 | 69 |
675b1020da5a77b7b649dc1568b5d103f6d1849f | 2,020 | py | Python | day01_03/day02/5_post_youdao_translate.py | xiangzaizi/base_spider | fa548f43e1f92016e710f9ffeb912e61eb690433 | [
"MIT"
] | null | null | null | day01_03/day02/5_post_youdao_translate.py | xiangzaizi/base_spider | fa548f43e1f92016e710f9ffeb912e61eb690433 | [
"MIT"
] | null | null | null | day01_03/day02/5_post_youdao_translate.py | xiangzaizi/base_spider | fa548f43e1f92016e710f9ffeb912e61eb690433 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
import json
import time
import random
import hashlib
import urllib
import urllib2
if __name__ == '__main__':
html = send_request()
dict_obj = json.loads(html)
# 翻译的内容
print(dict_obj["translateResult"][0][0]["tgt"])
| 32.580645 | 241 | 0.612871 | # -*- coding:utf-8 -*-
import json
import time
import random
import hashlib
import urllib
import urllib2
def send_request():
# js解密算法数据
r = str(int(time.time() * 1000) + random.randint(0, 10))
D = "ebSeFb%=XZ%T[KZ)c(sy!"
S = "fanyideskweb"
n = raw_input("请输入需要翻译的文字:")
sign = hashlib.md5(r + D + S + n)
form_data = {
"i": n,
"from": "AUTO",
"to": "AUTO",
"smartresult": "dict",
"client": "fanyideskweb",
"salt": r,
"sign": sign,
"doctype": "json",
"version": "2.1",
"keyfrom": "fanyi.web",
"action": "FY_BY_CLICKBUTTION",
"typoResult": "false"
}
data = urllib.urlencode(form_data)
base_url = "http://fanyi.youdao.com/translate_o?smartresult=dict&smartresult=rule"
headers = {
"Accept": "application/json, text/javascript, */*; q=0.01",
# "Accept-Encoding" : "gzip, deflate",
"Accept-Language": "zh-CN,zh;q=0.9,en;q=0.8",
"Connection": "keep-alive",
# "Content-Length": "218",
"Content-Type": "application/x-www-form-urlencoded; charset=UTF-8",
"Cookie": "_ntes_nnid=f77d53cb936304b5333b304b767a4958,1506087321856; OUTFOX_SEARCH_USER_ID_NCOO=971893961.4325761; OUTFOX_SEARCH_USER_ID=-1480774266@10.169.0.83; JSESSIONID=aaaouUJJcJbTucFMz-8kw; ___rl__test__cookies=1523590284588",
"Host": "fanyi.youdao.com",
"Origin": "http://fanyi.youdao.com",
"Referer": "http://fanyi.youdao.com/",
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/65.0.3325.181 Safari/537.36",
"X-Requested-With": "XMLHttpRequest"
}
request = urllib2.Request(base_url, data, headers)
request.add_header('Content-Length', len(data))
response = urllib2.urlopen(request).read()
return response
if __name__ == '__main__':
html = send_request()
dict_obj = json.loads(html)
# 翻译的内容
print(dict_obj["translateResult"][0][0]["tgt"])
| 1,772 | 0 | 23 |
ae287f1bbd7cb8cd3d571bb67523ab57d8016931 | 5,647 | py | Python | run_game_dirichlet.py | tzmhuang/chess-nn | ecd2188063155544dc7759e72602df9a8bcf76c1 | [
"MIT"
] | null | null | null | run_game_dirichlet.py | tzmhuang/chess-nn | ecd2188063155544dc7759e72602df9a8bcf76c1 | [
"MIT"
] | null | null | null | run_game_dirichlet.py | tzmhuang/chess-nn | ecd2188063155544dc7759e72602df9a8bcf76c1 | [
"MIT"
] | null | null | null | from Desktop.chess-nn.minimax_lookup import *
from Desktop.chess-nn.evl_conv_3 import model_fn
with tf.device('/gpu:0'):
evl_conv_temp = tf.estimator.Estimator(
model_fn = model_fn, model_dir = './DNN/evl_conv_5')
player_side = input("Please choose your side(b/w): ")
difficulty = input("Choose Difficulty(1-10): ")
board = chess.Board()
#done? implement a-b pruning for memory saving
#need to consider lose or winning senarios
# Number of moves from board_a to board_b
turn_dict = {'b':False , 'w':True}
#prob of win+draw
sess.close()
| 31.027473 | 102 | 0.561183 | from Desktop.chess-nn.minimax_lookup import *
from Desktop.chess-nn.evl_conv_3 import model_fn
def predict_input_fn(data):
data = data.astype('float32')
tmp = {'x':data}
return tf.estimator.inputs.numpy_input_fn(x = tmp,num_epochs = 1, shuffle = False )
with tf.device('/gpu:0'):
evl_conv_temp = tf.estimator.Estimator(
model_fn = model_fn, model_dir = './DNN/evl_conv_5')
player_side = input("Please choose your side(b/w): ")
difficulty = input("Choose Difficulty(1-10): ")
board = chess.Board()
#done? implement a-b pruning for memory saving
def minimax_lookup(board, depth, alpha, beta, max_state):
if leaf(board) or depth == 0:
in_board = extract(board)
#print("evl")
v = evaluate(in_board)
#print("leaf")
#print(v)
#print("evl_fin")
else:
legal_moves = board.legal_moves
child_v = np.empty([1,0])
if max_state:
#print("max node")
v = float('-inf')
for moves in legal_moves:
child_board = board.copy()
child_board.push(moves)
tmp = minimax_lookup(child_board,depth-1,v,beta, False)
v = max(tmp,v)
if v >= beta:
print("max prune")
return v
if not max_state:
#print("min node")
v = float('inf')
for moves in legal_moves:
child_board = board.copy()
child_board.push(moves)
tmp = minimax_lookup(child_board,depth-1,alpha,v ,True)
v = min(v,tmp)
if v <= alpha:
print("min prune")
return v
return v
#need to consider lose or winning senarios
def evaluate(in_data):
Y = evl_conv_temp.predict(input_fn = predict_input_fn(in_data))
return Y
def get_val(board):
in_data = extract(board)
graph = tf.get_default_graph()
Y = graph.get_tensor_by_name("prob/Softmax:0")
epsilon = tf.constant(0.00000000001)
v = sess.run(Y, feed_dict={x:in_data})
return v
# Number of moves from board_a to board_b
def nxtmv_count(data_np,board_a, board_b):
ind = np.where((data_np == board_a).sum(axis = 1) == 64)[0]
ind = ind+1
temp = data_np[ind]
count = ((temp == board_b).sum(axis = 1)== 64).sum()
return count
def next_move(board):
result = np.empty([0,64], dtype=int)
for move in board.legal_moves:
tmp_board = board.copy()
tmp_board.push(move)
tmp = board_cvrt(tmp_board)
result = np.concatenate((result,tmp), axis = 0)
return result
def drichlet_count(data_np,board):
count = np.empty([0,1])
board_a = board_cvrt(board).reshape([64])
legal_move = next_move(board)
for board_b in legal_move:
tmp = nxtmv_count(data_np,board_a,board_b)
count = np.concatenate((count,np.array([[tmp]])), axis = 0)
return count
def ai_move(board,depth, max_state):
v_list = np.empty([1,0])
i = 0
for moves in board.legal_moves:
c_board = board.copy()
c_board.push(moves)
#already 1-st level
v = minimax_lookup(c_board,depth,float('-inf'),float('inf'),not max_state) #careful with depth
i+=1
#print(i,moves)
v_list = np.concatenate((v_list,np.array([[v]])), axis = 1)
#print(v_list)
return v_list
turn_dict = {'b':False , 'w':True}
#prob of win+draw
def game_start():
while not board.is_game_over():
print(board)
if turn_dict[player_side] == board.turn :
print("Player's Turn")
move = chess.Move.from_uci(input("Choose your move: "))
#check move legality
while move not in board.legal_moves:
print("Illegal move!")
move = chess.Move.from_uci(input("Choose your move: "))
board.push(move)
else:
print("Computer's Turn")
print('Thinking...')
if not turn_dict[player_side]: #ai is white
d = drichlet_count(data_np,board).reshape((1,list(board.legal_moves).__len__()))
v = d+ai_move(board,1,True)
move = list(board.legal_moves)[np.argmax(v)]
else:# ai is black
d = drichlet_count(data_np,board).reshape((1,list(board.legal_moves).__len__()))
v = -d+ai_move(board,1,False)
move = list(board.legal_moves)[np.argmin(v)]
board.push(move)
print(move)
if board.is_checkmate():
print("Checkmate")
else:
if board.is_check():
print("Check")
if board.is_stalemate():
print("Stalemate")
else:
print (board.result())
def self_play():
while not board.is_game_over():
print(board)
if turn_dict[player_side] == board.turn :
print("Computer_1's Turn")
print('Thinking...')
move = list(board.legal_moves)[np.argmax(ai_move(board,0,False))]
board.push(move)
print(move)
else:
print("Computer_2's Turn")
print('Thinking...')
# ai is black
move = list(board.legal_moves)[np.argmin(ai_move(board,0,True))]
board.push(move)
print(move)
if board.is_checkmate():
print("Checkmate")
else:
if board.is_check():
print("Check")
if board.is_stalemate():
print("Stalemate")
else:
print (board.result())
sess.close()
| 4,853 | 0 | 226 |
e1bd0b4f7e3fdaf1db8c14492ba9eb4962748dc8 | 337 | py | Python | contracts/business_template/privacy_computation/Fiat-Shamir-ZK/contract_step45.py | fengqiao/SmartDev-Contract | f2a7eec1dfd1b6dba7ae5c3ddc9b04e27517d8aa | [
"Apache-2.0"
] | 2 | 2021-12-01T06:47:30.000Z | 2021-12-07T06:19:37.000Z | contracts/business_template/privacy_computation/Fiat-Shamir-ZK/contract_step45.py | fengqiao/SmartDev-Contract | f2a7eec1dfd1b6dba7ae5c3ddc9b04e27517d8aa | [
"Apache-2.0"
] | 2 | 2021-08-12T08:19:12.000Z | 2021-08-18T10:54:37.000Z | contracts/business_template/privacy_computation/Fiat-Shamir-ZK/contract_step45.py | fengqiao/SmartDev-Contract | f2a7eec1dfd1b6dba7ae5c3ddc9b04e27517d8aa | [
"Apache-2.0"
] | 2 | 2020-09-07T11:03:01.000Z | 2020-10-03T14:42:20.000Z | import libnum
import hashlib
import random
n=8269
g=11
password = "Hello"
x = int(hashlib.sha256(password.encode()).hexdigest()[:8], 16) % n
print('\n======Phase 4: Peggy recieves c and calculate r=v-cx, sends r to Victor==================')
c = input("c= ")
v = input("v= ")
r = (int(v) - int(c) * x) % (n-1)
print('r=v-cx =\t\t',r) | 21.0625 | 100 | 0.581602 | import libnum
import hashlib
import random
n=8269
g=11
password = "Hello"
x = int(hashlib.sha256(password.encode()).hexdigest()[:8], 16) % n
print('\n======Phase 4: Peggy recieves c and calculate r=v-cx, sends r to Victor==================')
c = input("c= ")
v = input("v= ")
r = (int(v) - int(c) * x) % (n-1)
print('r=v-cx =\t\t',r) | 0 | 0 | 0 |
640a6657a3bc0f55296326d6b8fe5a3ef57711e7 | 2,180 | py | Python | tests/task/nlp/test_masked_language_modeling.py | mariomeissner/lightning-transformers | 4efda7b4e924b37956c7a008ca01819f5c3f98c8 | [
"Apache-2.0"
] | 451 | 2021-04-21T15:53:59.000Z | 2022-03-29T10:39:45.000Z | tests/task/nlp/test_masked_language_modeling.py | mathemusician/lightning-transformers | b2ef06113433e6a178ce4d3c9df7ede8064e247f | [
"Apache-2.0"
] | 92 | 2021-04-21T18:42:58.000Z | 2022-03-30T05:29:54.000Z | tests/task/nlp/test_masked_language_modeling.py | mathemusician/lightning-transformers | b2ef06113433e6a178ce4d3c9df7ede8064e247f | [
"Apache-2.0"
] | 51 | 2021-04-22T05:35:28.000Z | 2022-03-17T13:08:12.000Z | import sys
from unittest.mock import MagicMock
import pytest
import pytorch_lightning as pl
import torch
from transformers import AutoTokenizer
from lightning_transformers.core.nlp import HFBackboneConfig
from lightning_transformers.task.nlp.masked_language_modeling import (
MaskedLanguageModelingDataModule,
MaskedLanguageModelingTransformer,
)
from lightning_transformers.task.nlp.masked_language_modeling.config import MaskedLanguageModelingDataConfig
@pytest.mark.skipif(sys.platform == "win32", reason="Currently Windows is not supported")
| 35.737705 | 116 | 0.765138 | import sys
from unittest.mock import MagicMock
import pytest
import pytorch_lightning as pl
import torch
from transformers import AutoTokenizer
from lightning_transformers.core.nlp import HFBackboneConfig
from lightning_transformers.task.nlp.masked_language_modeling import (
MaskedLanguageModelingDataModule,
MaskedLanguageModelingTransformer,
)
from lightning_transformers.task.nlp.masked_language_modeling.config import MaskedLanguageModelingDataConfig
def test_smoke_train(hf_cache_path):
class TestModel(MaskedLanguageModelingTransformer):
def configure_optimizers(self):
return torch.optim.AdamW(self.parameters(), lr=1e-5)
tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name_or_path="prajjwal1/bert-tiny")
model = TestModel(backbone=HFBackboneConfig(pretrained_model_name_or_path="prajjwal1/bert-tiny"))
dm = MaskedLanguageModelingDataModule(
cfg=MaskedLanguageModelingDataConfig(
batch_size=1,
dataset_name="wikitext",
dataset_config_name="wikitext-2-raw-v1",
cache_dir=hf_cache_path,
),
tokenizer=tokenizer,
)
trainer = pl.Trainer(fast_dev_run=True)
trainer.fit(model, dm)
@pytest.mark.skipif(sys.platform == "win32", reason="Currently Windows is not supported")
def test_smoke_train_e2e(script_runner):
script_runner.hf_train(task="masked_language_modeling", dataset="wikitext", model="prajjwal1/bert-tiny")
def test_smoke_predict_e2e(script_runner):
y = script_runner.hf_predict(
['+x="The cat sat on [MASK] mat."'], task="masked_language_modeling", model="prajjwal1/bert-tiny"
)
assert len(y) == 5
assert y[0]["token_str"] == "the"
def test_model_has_correct_cfg():
model = MaskedLanguageModelingTransformer(HFBackboneConfig(pretrained_model_name_or_path="prajjwal1/bert-tiny"))
assert model.hparams.downstream_model_type == "transformers.AutoModelForMaskedLM"
def test_datamodule_has_correct_cfg():
tokenizer = MagicMock()
dm = MaskedLanguageModelingDataModule(tokenizer)
assert type(dm.cfg) is MaskedLanguageModelingDataConfig
assert dm.tokenizer is tokenizer
| 1,504 | 0 | 114 |