hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
331716acfd5f9717ca8ca44120a0bc65248ee1dc | 6,108 | py | Python | tfx/experimental/pipeline_testing/pipeline_recorder_utils_test.py | Anon-Artist/tfx | 2692c9ab437d76b5d9517996bfe2596862e0791d | [
"Apache-2.0"
] | 2 | 2021-05-10T21:39:48.000Z | 2021-11-17T11:24:29.000Z | tfx/experimental/pipeline_testing/pipeline_recorder_utils_test.py | Anon-Artist/tfx | 2692c9ab437d76b5d9517996bfe2596862e0791d | [
"Apache-2.0"
] | 1 | 2021-01-28T13:44:51.000Z | 2021-04-28T16:15:47.000Z | tfx/experimental/pipeline_testing/pipeline_recorder_utils_test.py | Anon-Artist/tfx | 2692c9ab437d76b5d9517996bfe2596862e0791d | [
"Apache-2.0"
] | 1 | 2021-01-28T13:41:51.000Z | 2021-01-28T13:41:51.000Z | # Lint as: python3
# Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for tfx.experimental.pipeline_testing.pipeline_recorder_utils."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import mock
import tensorflow as tf
from tfx.dsl.io import fileio
from tfx.experimental.pipeline_testing import pipeline_recorder_utils
from tfx.utils import io_utils
class PipelineRecorderUtilsTest(tf.test.TestCase):
def setUp(self):
super(PipelineRecorderUtilsTest, self).setUp()
self._base_dir = os.path.join(self.get_temp_dir(), 'base_dir')
self.src_uri = os.path.join(self._base_dir, 'input')
self.dest_uri = os.path.join(self._base_dir, 'output')
fileio.makedirs(self.src_uri)
# Writing a string to test.txt file in src_uri
self.content = 'pipeline recorded'
io_utils.write_string_file(
os.path.join(self.src_uri, 'test.txt'), self.content)
# Placeholders for record_pipeline(...) arguments
self.metadata_db_uri = 'metadata_db_uri'
self.host = 'localhost'
self.port = 1234
self.pipeline_name = 'pipeline_name'
self.run_id = 'run_id'
# Return values for mocked get_paths(...)
self.paths = [[self.src_uri, self.dest_uri]]
# Return values for mocked get_execution_dict(...)
self.execution_dict = {self.run_id: []}
@mock.patch.object(pipeline_recorder_utils, 'get_latest_executions')
def testRecordLatestKfpPipeline(self, mock_get_latest_executions):
# Tests recording KFP pipeline outputs for the latest execution.
with mock.patch.object(
pipeline_recorder_utils, '_get_paths',
return_value=self.paths) as mock_get_paths:
pipeline_recorder_utils.record_pipeline(
output_dir=self._base_dir,
host=self.host,
port=self.port,
pipeline_name=self.pipeline_name)
mock_get_paths.assert_called()
mock_get_latest_executions.assert_called()
files = fileio.listdir(self.dest_uri)
self.assertLen(files, 1)
self.assertEqual(
io_utils.read_string_file(os.path.join(self.dest_uri, files[0])),
self.content)
def testRecordKfpPipelineRunId(self):
# Tests recording KFP pipeline outputs given a run_id.
with mock.patch.object(pipeline_recorder_utils, '_get_execution_dict',
return_value=self.execution_dict
) as mock_get_execution_dict,\
mock.patch.object(pipeline_recorder_utils, '_get_paths',
return_value=self.paths) as mock_get_paths:
pipeline_recorder_utils.record_pipeline(
output_dir=self._base_dir,
host=self.host,
port=self.port,
run_id=self.run_id)
mock_get_execution_dict.assert_called()
mock_get_paths.assert_called()
# Verifying that test.txt has been copied from src_uri to dest_uri
files = fileio.listdir(self.dest_uri)
self.assertLen(files, 1)
self.assertEqual(
io_utils.read_string_file(os.path.join(self.dest_uri, files[0])),
self.content)
@mock.patch('tfx.orchestration.metadata.sqlite_metadata_connection_config')
@mock.patch('tfx.orchestration.metadata.Metadata')
@mock.patch.object(pipeline_recorder_utils, 'get_latest_executions')
def testRecordLatestBeamPipeline(self, mock_get_latest_executions,
mock_metadata, mock_config):
# Tests recording Beam pipeline outputs for the latest execution.
with mock.patch.object(
pipeline_recorder_utils, '_get_paths',
return_value=self.paths) as mock_get_paths:
pipeline_recorder_utils.record_pipeline(
output_dir=self._base_dir,
metadata_db_uri=self.metadata_db_uri,
pipeline_name=self.pipeline_name)
mock_config.assert_called_with(self.metadata_db_uri)
mock_metadata.assert_called()
mock_get_paths.assert_called()
mock_get_latest_executions.assert_called()
# Verifying that test.txt has been copied from src_uri to dest_uri
files = fileio.listdir(self.dest_uri)
self.assertLen(files, 1)
self.assertEqual(
io_utils.read_string_file(os.path.join(self.dest_uri, files[0])),
self.content)
@mock.patch('tfx.orchestration.metadata.sqlite_metadata_connection_config')
@mock.patch('tfx.orchestration.metadata.Metadata')
def testRecordBeamPipelineRunId(self, mock_metadata, mock_config):
# Tests recording Beam pipeline outputs given a run_id.
with mock.patch.object(pipeline_recorder_utils, '_get_execution_dict',
return_value=self.execution_dict
) as mock_get_execution_dict,\
mock.patch.object(pipeline_recorder_utils, '_get_paths',
return_value=self.paths
) as mock_get_paths:
pipeline_recorder_utils.record_pipeline(
output_dir=self._base_dir,
metadata_db_uri=self.metadata_db_uri,
run_id=self.run_id)
mock_config.assert_called_with(self.metadata_db_uri)
mock_metadata.assert_called()
mock_get_execution_dict.assert_called()
mock_get_paths.assert_called()
# Verifying that test.txt has been copied from src_uri to dest_uri
files = fileio.listdir(self.dest_uri)
self.assertLen(files, 1)
self.assertEqual(
io_utils.read_string_file(os.path.join(self.dest_uri, files[0])),
self.content)
if __name__ == '__main__':
tf.test.main()
| 40.184211 | 77 | 0.708906 | 5,080 | 0.831696 | 0 | 0 | 3,185 | 0.521447 | 0 | 0 | 1,746 | 0.285855 |
331719a2a5a244761348d8660d5c741b1f74d90f | 2,921 | py | Python | tests/test1.py | pedroramaciotti/Cloudtropy | bce1cc1cd6c5217ac20cf5a98491d10c6a8905b2 | [
"MIT"
] | null | null | null | tests/test1.py | pedroramaciotti/Cloudtropy | bce1cc1cd6c5217ac20cf5a98491d10c6a8905b2 | [
"MIT"
] | null | null | null | tests/test1.py | pedroramaciotti/Cloudtropy | bce1cc1cd6c5217ac20cf5a98491d10c6a8905b2 | [
"MIT"
] | 1 | 2021-03-10T14:04:04.000Z | 2021-03-10T14:04:04.000Z | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# from scipy.stats import entropy
import sys
sys.path.append('../')
import cloudtropy
# data
gen_dim = 2
gen_N = 300
lims = (-2,6)
scale = 0.2
X = np.random.uniform(low=lims[0],high=lims[1],size=(10000,2)) # background
X = np.concatenate([X,scale*np.random.randn(gen_N,gen_dim)+np.array([0,0])] )
X = np.concatenate([X,scale*np.random.randn(gen_N,gen_dim)+np.array([4,0])] )
X = np.concatenate([X,scale*np.random.randn(gen_N,gen_dim)+np.array([0,4])] )
X = np.concatenate([X,scale*np.random.randn(gen_N,gen_dim)+np.array([4,4])] )
# input parameters
N_grid = 80
delta_c = 0.35
# grid,pmf = cloudtropy.pmf(X,N=N_grid,delta_c=delta_c,lims=[(-2,6),(-2,6)])
grid,pmf = cloudtropy.pmf(X,d=0.1,delta_c=delta_c,lims=[(-2,6),(-2,6)])
entropy = cloudtropy.entropy(X,base=2,N=N_grid,delta_c=delta_c,lims=[(-3,7),(-3,7)])
print(cloudtropy.entropy(X,base=2,d=0.1,delta_c=delta_c,lims=[(-3,7),(-3,7)]))
############## All in one
fig = plt.figure(figsize=(14,3))
#
ax1 = fig.add_subplot(1,4,1)
# levels = np.linspace(0,flat_pmf.max(),40)
ax1.scatter(X[:,0], X[:,1],s=1,alpha=0.1,color='k')
ax1.set_xlabel('x'),ax1.set_ylabel('y')
ax1.set_xlim(lims),ax1.set_xlim(lims)
ax1.axis('equal')
#
ax2 = fig.add_subplot(1,3,2,projection='3d')
ax2.plot_surface(grid[0], grid[1], pmf,cmap='coolwarm', edgecolor='none',shade='interp')
ax2.set_xlabel('x'),ax2.set_ylabel('y')#,ax.set_zlabel('PMF',rotation=90)
ax2.view_init(elev=60, azim=-45)
#
ax3 = fig.add_subplot(1,3,3)
cs = ax3.contourf(grid[0], grid[1], pmf, levels=np.linspace(0,pmf.max(),40), cmap='Purples_r')
ax3.set_xlabel('x'),ax3.set_ylabel('y')
ax3.set_title('Entropy = %.3f'%entropy)
ax3.set_xlim(lims),ax3.set_xlim(lims),
ax3.axis('equal')
cbar = fig.colorbar(cs)
#
plt.tight_layout()
# plt.savefig('all.pdf')
plt.savefig('all.png',dpi=400)
############## Separate
fig = plt.figure(figsize=(4,3))
#
ax1 = fig.add_subplot(1,1,1)
# levels = np.linspace(0,flat_pmf.max(),40)
ax1.scatter(X[:,0], X[:,1],s=1,alpha=0.1,color='k')
ax1.set_xlabel('x'),ax1.set_ylabel('y')
ax1.set_xlim(lims),ax1.set_xlim(lims)
ax1.axis('equal')
plt.savefig('scatter.png',dpi=400)
#
fig = plt.figure(figsize=(4,3))
#
ax2 = fig.add_subplot(1,1,1,projection='3d')
ax2.plot_surface(grid[0], grid[1], pmf,cmap='coolwarm', edgecolor='none',shade='interp')
ax2.set_xlabel('x'),ax2.set_ylabel('y')#,ax.set_zlabel('PMF',rotation=90)
ax2.view_init(elev=60, azim=-45)
plt.savefig('surf.png',dpi=400)
#
fig = plt.figure(figsize=(4,3))
#
ax3 = fig.add_subplot(1,1,1)
cs = ax3.contourf(grid[0], grid[1], pmf, levels=np.linspace(0,pmf.max(),40), cmap='Purples_r')
# ax3.set_xlabel('x'),ax3.set_ylabel('y')
# ax3.set_title('Entropy = %.3f'%entropy)
ax3.set_xlim(lims),ax3.set_xlim(lims),
ax3.axis('equal')
cbar = fig.colorbar(cs)
#
plt.tight_layout()
# plt.savefig('all.pdf')
plt.savefig('contour_simple.png',dpi=400)
| 27.819048 | 94 | 0.680589 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 702 | 0.240329 |
331856b5e0304e7640fa3195b77c52b4c37b9bf8 | 3,238 | py | Python | uncertainty_wizard/models/ensemble_utils/_callables.py | p1ndsvin/uncertainty-wizard | 92a7a9bcb411e512cf6ad54e7ba226a3c66d3583 | [
"MIT"
] | 33 | 2020-12-21T20:21:21.000Z | 2022-03-25T17:51:36.000Z | uncertainty_wizard/models/ensemble_utils/_callables.py | swb19/uncertainty-wizard | 5ba9bfc6ee967eb5f226abbedb6f9d5452b3cfea | [
"MIT"
] | 83 | 2020-12-18T18:18:28.000Z | 2022-03-28T21:17:29.000Z | uncertainty_wizard/models/ensemble_utils/_callables.py | swb19/uncertainty-wizard | 5ba9bfc6ee967eb5f226abbedb6f9d5452b3cfea | [
"MIT"
] | 5 | 2021-02-13T13:27:48.000Z | 2021-12-25T16:45:19.000Z | import gc
from dataclasses import dataclass
from typing import Dict, Tuple, Union
import numpy as np
import tensorflow as tf
@dataclass
class DataLoadedPredictor:
"""
The default task to be executed for predictions where the input data is a numpy array.
Leaves the serialization and deserialization of the array to the python multiprocessing library,
and does thus not explicitly implement it here.
"""
x_test: np.ndarray
batch_size: int
steps: int = None
def __call__(self, model_id: int, model: tf.keras.Model):
"""Simple call to keras predict, formulated as __call__ to allow for constructor params."""
return model.predict(
x=self.x_test, batch_size=self.batch_size, steps=self.steps, verbose=1
)
@dataclass
class NumpyFitProcess:
"""
This is a class used as callable for the serialization and deserialization of numpy arrays
which are then used in the keras fit process.
"""
x: Union[str, np.ndarray] = None
y: Union[str, np.ndarray] = None
batch_size: int = None
epochs: int = 1
verbose: int = 1
# Callbacks not supported in this default process (as type does not guarantee picklability)
# callbacks = None,
validation_split: float = 0.0
validation_data: Union[Tuple[str, str], Tuple[np.ndarray, np.ndarray]] = None
shuffle: bool = True
class_weight: Dict[int, float] = None
sample_weight: np.ndarray = None
initial_epoch: int = 0
steps_per_epoch: int = None
validation_steps: int = None
validation_freq: int = 1
# Max_queue_size, workers and use_multiprocessing not supported as we force input to be numpy array
# max_queue_size = 10,
# workers = 1,
# use_multiprocessing = False
def __call__(
self, model_id: int, model: tf.keras.Model
) -> Tuple[tf.keras.Model, tf.keras.callbacks.History]:
"""Simple call to keras fit, formulated as __call__ to allow for constructor params."""
x = np.load(self.x, allow_pickle=True) if isinstance(self.x, str) else self.x
y = np.load(self.y, allow_pickle=True) if isinstance(self.y, str) else self.y
if self.validation_data is not None and isinstance(
self.validation_data[0], str
):
val_x = np.load(self.validation_data[0], allow_pickle=True)
val_y = np.load(self.validation_data[1], allow_pickle=True)
val_data = (val_x, val_y)
else:
val_data = self.validation_data
history = model.fit(
x=x,
y=y,
batch_size=self.batch_size,
epochs=self.epochs,
verbose=self.verbose,
validation_split=self.validation_split,
validation_data=val_data,
shuffle=self.shuffle,
class_weight=self.class_weight,
sample_weight=self.sample_weight,
initial_epoch=self.initial_epoch,
steps_per_epoch=self.steps_per_epoch,
validation_steps=self.validation_steps,
validation_freq=self.validation_freq,
)
del x
del y
if val_data:
del val_data
gc.collect()
return model, history.history
| 34.817204 | 103 | 0.652254 | 3,084 | 0.95244 | 0 | 0 | 3,106 | 0.959234 | 0 | 0 | 863 | 0.266523 |
3318dead767f04f859f302ca3cf27d38474b142d | 5,739 | py | Python | venv/Lib/site-packages/PyQt4/examples/designer/calculatorform/ui_calculatorform.py | prateekfxtd/ns_Startup | 095a62b3a8c7bf0ff7b767355d57d993bbd2423d | [
"MIT"
] | 1 | 2022-03-16T02:10:30.000Z | 2022-03-16T02:10:30.000Z | venv/Lib/site-packages/PyQt4/examples/designer/calculatorform/ui_calculatorform.py | prateekfxtd/ns_Startup | 095a62b3a8c7bf0ff7b767355d57d993bbd2423d | [
"MIT"
] | null | null | null | venv/Lib/site-packages/PyQt4/examples/designer/calculatorform/ui_calculatorform.py | prateekfxtd/ns_Startup | 095a62b3a8c7bf0ff7b767355d57d993bbd2423d | [
"MIT"
] | 2 | 2019-05-28T11:58:59.000Z | 2020-09-23T17:21:19.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'calculatorform.ui'
#
# Created: Mon Jan 23 13:21:45 2006
# by: PyQt4 UI code generator vsnapshot-20060120
#
# WARNING! All changes made in this file will be lost!
import sys
from PyQt4 import QtCore, QtGui
class Ui_CalculatorForm(object):
def setupUi(self, CalculatorForm):
CalculatorForm.setObjectName("CalculatorForm")
CalculatorForm.resize(QtCore.QSize(QtCore.QRect(0,0,400,300).size()).expandedTo(CalculatorForm.minimumSizeHint()))
sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Policy(5),QtGui.QSizePolicy.Policy(5))
sizePolicy.setHorizontalStretch(0)
sizePolicy.setVerticalStretch(0)
sizePolicy.setHeightForWidth(CalculatorForm.sizePolicy().hasHeightForWidth())
CalculatorForm.setSizePolicy(sizePolicy)
self.gridlayout = QtGui.QGridLayout(CalculatorForm)
self.gridlayout.setMargin(9)
self.gridlayout.setSpacing(6)
self.gridlayout.setObjectName("gridlayout")
spacerItem = QtGui.QSpacerItem(40,20,QtGui.QSizePolicy.Expanding,QtGui.QSizePolicy.Minimum)
self.gridlayout.addItem(spacerItem,0,6,1,1)
self.label_3_2 = QtGui.QLabel(CalculatorForm)
self.label_3_2.setGeometry(QtCore.QRect(169,9,20,52))
self.label_3_2.setAlignment(QtCore.Qt.AlignCenter)
self.label_3_2.setObjectName("label_3_2")
self.gridlayout.addWidget(self.label_3_2,0,4,1,1)
self.vboxlayout = QtGui.QVBoxLayout()
self.vboxlayout.setMargin(1)
self.vboxlayout.setSpacing(6)
self.vboxlayout.setObjectName("vboxlayout")
self.label_2_2_2 = QtGui.QLabel(CalculatorForm)
self.label_2_2_2.setGeometry(QtCore.QRect(1,1,36,17))
self.label_2_2_2.setObjectName("label_2_2_2")
self.vboxlayout.addWidget(self.label_2_2_2)
self.outputWidget = QtGui.QLabel(CalculatorForm)
self.outputWidget.setGeometry(QtCore.QRect(1,24,36,27))
self.outputWidget.setFrameShape(QtGui.QFrame.Box)
self.outputWidget.setFrameShadow(QtGui.QFrame.Sunken)
self.outputWidget.setAlignment(QtCore.Qt.AlignAbsolute|QtCore.Qt.AlignBottom|QtCore.Qt.AlignCenter|QtCore.Qt.AlignHCenter|QtCore.Qt.AlignHorizontal_Mask|QtCore.Qt.AlignJustify|QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignRight|QtCore.Qt.AlignTop|QtCore.Qt.AlignTrailing|QtCore.Qt.AlignVCenter|QtCore.Qt.AlignVertical_Mask)
self.outputWidget.setObjectName("outputWidget")
self.vboxlayout.addWidget(self.outputWidget)
self.gridlayout.addLayout(self.vboxlayout,0,5,1,1)
spacerItem1 = QtGui.QSpacerItem(20,40,QtGui.QSizePolicy.Minimum,QtGui.QSizePolicy.Expanding)
self.gridlayout.addItem(spacerItem1,1,2,1,1)
self.vboxlayout1 = QtGui.QVBoxLayout()
self.vboxlayout1.setMargin(1)
self.vboxlayout1.setSpacing(6)
self.vboxlayout1.setObjectName("vboxlayout1")
self.label_2 = QtGui.QLabel(CalculatorForm)
self.label_2.setGeometry(QtCore.QRect(1,1,46,19))
self.label_2.setObjectName("label_2")
self.vboxlayout1.addWidget(self.label_2)
self.inputSpinBox2 = QtGui.QSpinBox(CalculatorForm)
self.inputSpinBox2.setGeometry(QtCore.QRect(1,26,46,25))
self.inputSpinBox2.setObjectName("inputSpinBox2")
self.vboxlayout1.addWidget(self.inputSpinBox2)
self.gridlayout.addLayout(self.vboxlayout1,0,3,1,1)
self.label_3 = QtGui.QLabel(CalculatorForm)
self.label_3.setGeometry(QtCore.QRect(63,9,20,52))
self.label_3.setAlignment(QtCore.Qt.AlignCenter)
self.label_3.setObjectName("label_3")
self.gridlayout.addWidget(self.label_3,0,1,1,1)
self.vboxlayout2 = QtGui.QVBoxLayout()
self.vboxlayout2.setMargin(1)
self.vboxlayout2.setSpacing(6)
self.vboxlayout2.setObjectName("vboxlayout2")
self.label = QtGui.QLabel(CalculatorForm)
self.label.setGeometry(QtCore.QRect(1,1,46,19))
self.label.setObjectName("label")
self.vboxlayout2.addWidget(self.label)
self.inputSpinBox1 = QtGui.QSpinBox(CalculatorForm)
self.inputSpinBox1.setGeometry(QtCore.QRect(1,26,46,25))
self.inputSpinBox1.setObjectName("inputSpinBox1")
self.vboxlayout2.addWidget(self.inputSpinBox1)
self.gridlayout.addLayout(self.vboxlayout2,0,0,1,1)
self.retranslateUi(CalculatorForm)
QtCore.QMetaObject.connectSlotsByName(CalculatorForm)
def tr(self, string):
return QtGui.QApplication.translate("CalculatorForm", string, None, QtGui.QApplication.UnicodeUTF8)
def retranslateUi(self, CalculatorForm):
CalculatorForm.setObjectName(self.tr("CalculatorForm"))
CalculatorForm.setWindowTitle(self.tr("Calculator Form"))
self.label_3_2.setObjectName(self.tr("label_3_2"))
self.label_3_2.setText(self.tr("="))
self.label_2_2_2.setObjectName(self.tr("label_2_2_2"))
self.label_2_2_2.setText(self.tr("Output"))
self.outputWidget.setObjectName(self.tr("outputWidget"))
self.outputWidget.setText(self.tr("0"))
self.label_2.setObjectName(self.tr("label_2"))
self.label_2.setText(self.tr("Input 2"))
self.inputSpinBox2.setObjectName(self.tr("inputSpinBox2"))
self.label_3.setObjectName(self.tr("label_3"))
self.label_3.setText(self.tr("+"))
self.label.setObjectName(self.tr("label"))
self.label.setText(self.tr("Input 1"))
self.inputSpinBox1.setObjectName(self.tr("inputSpinBox1"))
| 47.429752 | 343 | 0.699076 | 5,446 | 0.948946 | 0 | 0 | 0 | 0 | 0 | 0 | 575 | 0.100192 |
33190b249bfea8e389858313a9b36fc7c3e017ce | 1,605 | py | Python | ggtools/gg/static_models.py | richannan/GGTOOLS | 7909da988d90de50c82532d97121a3fbcfc0263a | [
"MIT"
] | 22 | 2019-12-16T01:30:29.000Z | 2022-03-01T08:57:07.000Z | ggtools/gg/static_models.py | richannan/GGTOOLS | 7909da988d90de50c82532d97121a3fbcfc0263a | [
"MIT"
] | 3 | 2019-12-23T14:09:30.000Z | 2022-03-29T01:52:53.000Z | ggtools/gg/static_models.py | richannan/GGTOOLS | 7909da988d90de50c82532d97121a3fbcfc0263a | [
"MIT"
] | 13 | 2019-12-19T07:01:19.000Z | 2022-03-14T11:26:36.000Z | from os import path,makedirs
from urllib.request import urlretrieve
def static_download(model):
'''
Download static gravity modle from icgem.gfz-potsdam.de; if the file to be downloaded is already included in the download directory, the download is automatically skipped.
Usage:
static_download('GGM05C')
static_download('EIGEN-6C4')
Inputs:
model -> [str] Available options are 'GGM05C' and 'EIGEN-6C'.
Outputs: downloaded static gravity model
Examples:
>>> static_download('GGM05C')
Downloading the static gravity model GGM05C ... Finished
'static_models/GGM05C.gfc'
>>> static_download('EIGEN-6C4')
Downloading the static gravity model EIGEN-6C4 ... Finished
'static_models/EIGEN-6C4.gfc'
'''
direc = 'static_models/'
if not path.exists(direc): makedirs(direc)
if model == 'GGM05C':
gravity_file = direc + 'GGM05C.gfc'
url = 'http://icgem.gfz-potsdam.de/getmodel/gfc/778a683780a5b0ad3163f4772b97b9075a0a13c389d2bd8ea3f891b64cfa383d/GGM05C.gfc'
elif model == 'EIGEN-6C4':
gravity_file = direc + 'EIGEN-6C4.gfc'
url = 'http://icgem.gfz-potsdam.de/getmodel/gfc/7fd8fe44aa1518cd79ca84300aef4b41ddb2364aef9e82b7cdaabdb60a9053f1/EIGEN-6C4.gfc'
else:
raise Exception('Currently, available static gravity models are GGM05C and EIGEN-6C4.')
if not path.exists(gravity_file):
print('Downloading the static gravity model '+ model,end=' ... ')
urlretrieve(url, gravity_file)
print('Finished')
return gravity_file | 38.214286 | 175 | 0.688474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,111 | 0.692212 |
33199afda7e088eef8462e491f6d52a8646c9666 | 5,259 | py | Python | Python/linprog/simplex.py | bashardudin/LinearPrograms | 24b67171cd5bbf407db47f2adb5150ab71d88143 | [
"CC0-1.0"
] | 22 | 2016-10-20T14:04:48.000Z | 2020-03-31T08:45:14.000Z | Python/linprog/simplex.py | bashardudin/LinearPrograms | 24b67171cd5bbf407db47f2adb5150ab71d88143 | [
"CC0-1.0"
] | 2 | 2018-06-04T01:30:01.000Z | 2018-10-09T08:18:10.000Z | Python/linprog/simplex.py | bashardudin/LinearPrograms | 24b67171cd5bbf407db47f2adb5150ab71d88143 | [
"CC0-1.0"
] | 2 | 2016-10-26T21:19:58.000Z | 2018-03-17T19:28:15.000Z | #!/usr/bin/env python
# _*_ encoding: utf-8 _*_
"""simplex.py: Simplex algorithm with rational coefficients"""
import numpy as np
import fractions as frac
__author__ = "Bashar Dudin"
__email__ = "bashar.dudin@epita.fr"
class RestrictedSimplex(object):
def __init__(self, leaving_index=None, entering_index=None):
if not leaving_index:
def func(l):
m = 0
while not l[m] and m < len(l):
m += 1
if m == len(l):
return 0
for i in range(len(l)):
if l[i] and l[m] > l[i]:
m = i
return m
leaving_index = func
if not entering_index:
def func(l):
return l.index(min(l))
entering_index = func
self.leaving_index = leaving_index
self.entering_index = entering_index
def __call__(self, lin_p, recursion_limit=100):
""" Runs a restricted version of the simplex algorithm
Runs simplex algorithm on linear programs having feasible basic
solution. It takes in an integer to limit the number of recursions.
:return: a linear program whose basic solution has maximal objective
value.
"""
a = lin_p.table
if not lin_p.has_feasible_basic:
raise TypeError("Linear program doesn't have feasible base solution")
n = 0
while any(a[0, :-1] < 0) and n < recursion_limit:
entering_choices = [i for i in map(lambda x: 0 if x > 0 else x,
a[0, :-1])]
e = self.entering_index(entering_choices)
leaving_choices = [None]*lin_p.shape[0]
for i in range(lin_p.shape[0]):
if a[i+1, e] > 0:
leaving_choices[i] = (a[i+1, -1]/a[i+1, e])
if not [i for i in leaving_choices if i]:
raise OverflowError("Linear program unbounded | check model and state.")
else:
l = 1 + self.leaving_index(leaving_choices)
lin_p.pivot(e, l)
n += 1
form = "Basic solution = " + \
"(" + "{}, " * (lin_p.shape[1] - 1) + "{})" + \
" with objective value = {}."
print(form.format(*lin_p.basic_solution(), lin_p.table[0, -1]), end="\n\n")
return lin_p.basic_solution(), lin_p.table[0, -1]
class Simplex(RestrictedSimplex):
def is_feasible(self, lin_p):
""" Checks if linear program is feasible..
Has side effect: transforms linear program if not basic feasible
into an equivalent linear program having basic feasible solution.
:return: boolean.
"""
print(" ### Checking feasibility of linear program", lin_p, sep="\n\n")
if lin_p.has_feasible_basic():
print(" ### Input linear program has feasible basic solution", end="\n\n")
return True
print(" ### Basic solution is not feasible: using auxiliary linear program in next step", end="\n\n")
gain_fun = np.copy(lin_p.table[0])
lin_p.shape = (lin_p.shape[0], lin_p.shape[1] + 1)
lin_p.table = np.insert(lin_p.table, 0, frac.Fraction(-1, 1), axis=1)
lin_p.table[0] = np.hstack((np.ones(1, dtype=frac.Fraction),
np.zeros(lin_p.shape[1], dtype=frac.Fraction)))
lin_p.basic = [i+1 for i in lin_p.basic]
l = 1 + np.argmin(lin_p.table[1:, -1])
lin_p.pivot(0, l) # Now program has feasible basic solution
if RestrictedSimplex.__call__(self, lin_p)[1] == 0:
print(" ### Input linear program is thus feasible", end="\n\n")
if 0 in lin_p.basic:
l = lin_p.basic.index(0)
e = 0
while e < lin_p.shape and lin_p.table[l, e] == 0:
# There is a at least an e with this property
# Unbounded otherwise
e += 1
lin_p.pivot(e, l) # 0 not basic anymore
lin_p.basic = [i-1 for i in lin_p.basic]
lin_p.table = lin_p.table[:, 1:]
lin_p.shape = (lin_p.shape[0], lin_p.shape[1] - 1)
lin_p.table[0] = gain_fun
for i in lin_p.basic:
lin_p.table[0, :] = lin_p.table[0, :] - \
lin_p.table[0, i] * \
lin_p.table[1 + lin_p.basic.index(i), :]
lin_p.table[0, -1] = -lin_p.table[0, -1]
return True
else:
return False
def __call__(self, lin_p, recursion_limit=100):
""" Simplex algorithm.
:return: a linear program whose basic solution has maximal objective
value.
"""
if self.is_feasible(lin_p):
simplex = RestrictedSimplex(self.leaving_index,
self.entering_index)
print(" ### Getting back to linear program equivalent to input with feasible basic solution", end="\n\n")
return simplex(lin_p, recursion_limit=recursion_limit)
else:
raise Exception("Linear program is not feasible.")
| 34.827815 | 117 | 0.535463 | 5,031 | 0.956646 | 0 | 0 | 0 | 0 | 0 | 0 | 1,483 | 0.281993 |
331c51cf21a7edb8c933a3fa13b75a18b05760cc | 3,919 | py | Python | openverse_catalog/dags/common/loader/smithsonian_unit_codes.py | yavik-kapadia/openverse-catalog | 853766f2176a96450f456a9fd6675e134c0866e1 | [
"MIT"
] | 25 | 2021-05-06T20:53:45.000Z | 2022-03-30T23:18:50.000Z | openverse_catalog/dags/common/loader/smithsonian_unit_codes.py | yavik-kapadia/openverse-catalog | 853766f2176a96450f456a9fd6675e134c0866e1 | [
"MIT"
] | 272 | 2021-05-17T05:53:00.000Z | 2022-03-31T23:57:20.000Z | openverse_catalog/dags/common/loader/smithsonian_unit_codes.py | yavik-kapadia/openverse-catalog | 853766f2176a96450f456a9fd6675e134c0866e1 | [
"MIT"
] | 13 | 2021-06-12T07:09:06.000Z | 2022-03-29T17:39:13.000Z | """
This program helps identify smithsonian unit codes which are not yet added to
the smithsonian sub-provider dictionary
"""
import logging
from textwrap import dedent
import requests
from airflow.providers.postgres.hooks.postgres import PostgresHook
from common.loader import provider_details as prov
from providers.provider_api_scripts import smithsonian
logger = logging.getLogger(__name__)
DELAY = smithsonian.DELAY
API_KEY = smithsonian.API_KEY
API_ROOT = smithsonian.API_ROOT
UNITS_ENDPOINT = smithsonian.UNITS_ENDPOINT
PARAMS = {"api_key": API_KEY, "q": "online_media_type:Images"}
SUB_PROVIDERS = prov.SMITHSONIAN_SUB_PROVIDERS
SI_UNIT_CODE_TABLE = "smithsonian_new_unit_codes"
def initialise_unit_code_table(postgres_conn_id, unit_code_table):
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
"""
Create table to store new unit codes if it does not exist
"""
postgres.run(
dedent(
f"""
CREATE TABLE IF NOT EXISTS public.{unit_code_table} (
new_unit_code character varying(80),
action character varying(40)
);
"""
)
)
"""
Delete old unit code entries
"""
postgres.run(
dedent(
f"""
DELETE FROM public.{unit_code_table};
"""
)
)
def get_new_and_outdated_unit_codes(unit_code_set, sub_prov_dict=SUB_PROVIDERS):
sub_provider_unit_code_set = set()
for sub_prov, unit_code_sub_set in sub_prov_dict.items():
sub_provider_unit_code_set = sub_provider_unit_code_set.union(unit_code_sub_set)
new_unit_codes = unit_code_set - sub_provider_unit_code_set
outdated_unit_codes = sub_provider_unit_code_set - unit_code_set
if bool(new_unit_codes):
logger.info(
f"The new unit codes {new_unit_codes} must be added to "
f"the SMITHSONIAN_SUB_PROVIDERS dictionary"
)
if bool(outdated_unit_codes):
logger.info(
f"The outdated unit codes {outdated_unit_codes} must be "
f"deleted from the SMITHSONIAN_SUB_PROVIDERS dictionary"
)
return new_unit_codes, outdated_unit_codes
def alert_unit_codes_from_api(
postgres_conn_id,
unit_code_table="smithsonian_new_unit_codes",
units_endpoint=UNITS_ENDPOINT,
query_params=PARAMS,
):
response = requests.get(units_endpoint, params=query_params)
unit_code_set = set(response.json().get("response", {}).get("terms", []))
new_unit_codes, outdated_unit_codes = get_new_and_outdated_unit_codes(unit_code_set)
initialise_unit_code_table(postgres_conn_id, unit_code_table)
postgres = PostgresHook(postgres_conn_id=postgres_conn_id)
"""
Populate the table with new unit codes
"""
for new_unit_code in new_unit_codes:
postgres.run(
dedent(
f"""
INSERT INTO public.{unit_code_table}
(new_unit_code, action)
VALUES (
'{new_unit_code}', 'add'
);
"""
)
)
"""
Populate the table with outdated unit codes
"""
for outdated_unit_code in outdated_unit_codes:
postgres.run(
dedent(
f"""
INSERT INTO public.{unit_code_table}
(new_unit_code, action)
VALUES (
'{outdated_unit_code}', 'delete'
);
"""
)
)
"""
Raise exception if human intervention is needed to update the
SMITHSONIAN_SUB_PROVIDERS dictionary by checking the entries in the
smithsonian_new_unit_codes table
"""
if bool(new_unit_codes) or bool(outdated_unit_codes):
raise Exception(
"Please check the smithsonian_new_unit_codes table for necessary "
"updates to the SMITHSONIAN_SUB_PROVIDERS dictionary"
)
| 29.02963 | 88 | 0.661393 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,632 | 0.416433 |
331c64b5688bf0f03e29dded9df8fd3ced9edae7 | 461 | py | Python | tests/programs/misc/causality_1.py | astraldawn/pylps | e9964a24bb38657b180d441223b4cdb9e1dadc8a | [
"MIT"
] | 1 | 2018-05-19T18:28:12.000Z | 2018-05-19T18:28:12.000Z | tests/programs/misc/causality_1.py | astraldawn/pylps | e9964a24bb38657b180d441223b4cdb9e1dadc8a | [
"MIT"
] | 12 | 2018-04-26T00:58:11.000Z | 2018-05-13T22:03:39.000Z | tests/programs/misc/causality_1.py | astraldawn/pylps | e9964a24bb38657b180d441223b4cdb9e1dadc8a | [
"MIT"
] | null | null | null | from pylps.core import *
initialise(max_time=2)
create_fluents('test(_, _)')
create_actions('hello(_, _)')
create_variables('Person', 'Years', 'NewYears', 'OldYears',)
initially(test('A', 0),)
reactive_rule(True).then(
hello('A', 5),
)
hello(Person, Years).initiates(test(Person, NewYears)).iff(
test(Person, OldYears), NewYears.is_(OldYears + Years)
)
hello(Person, Years).terminates(test(Person, OldYears))
execute(debug=False)
show_kb_log()
| 18.44 | 60 | 0.704989 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 66 | 0.143167 |
331d90c240056dc40d2894ad9cce53ebe9c791c5 | 4,001 | py | Python | gdrivefs-0.14.9-py3.6.egg/gdrivefs/utility.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | gdrivefs-0.14.9-py3.6.egg/gdrivefs/utility.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | gdrivefs-0.14.9-py3.6.egg/gdrivefs/utility.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | import logging
import json
import re
import sys
import gdrivefs.conf
_logger = logging.getLogger(__name__)
# TODO(dustin): Make these individual functions.
class _DriveUtility(object):
"""General utility functions loosely related to GD."""
# # Mime-types to translate to, if they appear within the "exportLinks" list.
# gd_to_normal_mime_mappings = {
# 'application/vnd.google-apps.document':
# 'text/plain',
# 'application/vnd.google-apps.spreadsheet':
# 'application/vnd.ms-excel',
# 'application/vnd.google-apps.presentation':
#/gd_to_normal_mime_mappings
# 'application/vnd.ms-powerpoint',
# 'application/vnd.google-apps.drawing':
# 'application/pdf',
# 'application/vnd.google-apps.audio':
# 'audio/mpeg',
# 'application/vnd.google-apps.photo':
# 'image/png',
# 'application/vnd.google-apps.video':
# 'video/x-flv'
# }
# Default extensions for mime-types.
# TODO(dustin): !! Move this to the config directory.
default_extensions = {
'text/plain': 'txt',
'application/vnd.ms-excel': 'xls',
'application/vnd.ms-powerpoint': 'ppt',
'application/pdf': 'pdf',
'audio/mpeg': 'mp3',
'image/png': 'png',
'video/x-flv': 'flv'
}
local_character_set = sys.getfilesystemencoding()
def __init__(self):
self.__load_mappings()
def __load_mappings(self):
# Allow someone to override our default mappings of the GD types.
# TODO(dustin): Isn't actually used, so commenting.
# gd_to_normal_mapping_filepath = \
# gdrivefs.conf.Conf.get('gd_to_normal_mapping_filepath')
#
# try:
# with open(gd_to_normal_mapping_filepath, 'r') as f:
# self.gd_to_normal_mime_mappings.extend(json.load(f))
# except IOError:
# _logger.info("No mime-mapping was found.")
# Allow someone to set file-extensions for mime-types, and not rely on
# Python's educated guesses.
extension_mapping_filepath = \
gdrivefs.conf.Conf.get('extension_mapping_filepath')
try:
with open(extension_mapping_filepath, 'r') as f:
self.default_extensions.extend(json.load(f))
except IOError:
_logger.info("No extension-mapping was found.")
def get_first_mime_type_by_extension(self, extension):
found = [
mime_type
for mime_type, temp_extension
in self.default_extensions.items()
if temp_extension == extension
]
if not found:
return None
return found[0]
def translate_filename_charset(self, original_filename):
"""Convert the given filename to the correct character set."""
# fusepy doesn't support the Python 2.x Unicode type. Expect a native
# string (anything but a byte string).
return original_filename
# # If we're in an older version of Python that still defines the Unicode
# # class and the filename isn't unicode, translate it.
#
# try:
# sys.modules['__builtin__'].unicode
# except AttributeError:
# pass
# else:
# if issubclass(original_filename.__class__, unicode) is False:
# return unicode(original_filename)#original_filename.decode(self.local_character_set)
#
# # It's already unicode. Don't do anything.
# return original_filename
def make_safe_for_filename(self, text):
"""Remove any filename-invalid characters."""
return re.sub('[^a-z0-9\-_\.]+', '', text)
utility = _DriveUtility()
| 33.90678 | 101 | 0.579355 | 3,812 | 0.952762 | 0 | 0 | 0 | 0 | 0 | 0 | 2,541 | 0.635091 |
331dc380a0542f7715a2dea44ae9eaa75a2bf837 | 193 | py | Python | RecoLocalCalo/HGCalRecProducers/python/HeterogeneousHEBRecHitGPUtoSoA_cfi.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | RecoLocalCalo/HGCalRecProducers/python/HeterogeneousHEBRecHitGPUtoSoA_cfi.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | RecoLocalCalo/HGCalRecProducers/python/HeterogeneousHEBRecHitGPUtoSoA_cfi.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
HEBRecHitGPUtoSoAProd = cms.EDProducer('HEBRecHitGPUtoSoA',
HEBRecHitGPUTok = cms.InputTag('HEBRecHitGPUProd'))
| 38.6 | 90 | 0.663212 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.19171 |
331e3f8204325c6090e303bc0901d2d064d5cbe9 | 4,987 | py | Python | net/data_formatter.py | lhq1/legal-predicetion | 0919732d9aecba17630a3dcaedd3611ca990010c | [
"MIT"
] | null | null | null | net/data_formatter.py | lhq1/legal-predicetion | 0919732d9aecba17630a3dcaedd3611ca990010c | [
"MIT"
] | null | null | null | net/data_formatter.py | lhq1/legal-predicetion | 0919732d9aecba17630a3dcaedd3611ca990010c | [
"MIT"
] | null | null | null | import os
import json
import torch
import random
import numpy as np
from net.loader import accusation_dict, accusation_list, law_dict, law_list
from net.loader import get_num_classes
def check_crit(data):
cnt = 0
for x in data:
if x in accusation_dict.keys():
cnt += 1
else:
return False
return cnt == 1
def check_law(data):
arr = []
for x, y, z in data:
if x < 102 or x > 452:
continue
if not ((x, y) in law_dict.keys()):
return False
arr.append((x, y))
arr = list(set(arr))
arr.sort()
cnt = 0
for x in arr:
if x in arr:
cnt += 1 # return False
return cnt == 1
def get_crit_id(data, config):
for x in data:
if x in accusation_dict.keys():
return accusation_dict[x]
def get_law_id(data, config):
for x in data:
y = (x[0], x[1])
if y in law_dict.keys():
return law_dict[y]
def get_time_id(data, config):
v = 0
if len(data["youqi"]) > 0:
v1 = data["youqi"][-1]
else:
v1 = 0
if len(data["guanzhi"]) > 0:
v2 = data["guanzhi"][-1]
else:
v2 = 0
if len(data["juyi"]) > 0:
v3 = data["juyi"][-1]
else:
v3 = 0
v = max(v1, v2, v3)
if data["sixing"]:
opt = 0
elif data["wuqi"]:
opt = 0
elif v > 10 * 12:
opt = 1
elif v > 7 * 12:
opt = 2
elif v > 5 * 12:
opt = 3
elif v > 3 * 12:
opt = 4
elif v > 2 * 12:
opt = 5
elif v > 1 * 12:
opt = 6
elif v > 9:
opt = 7
elif v > 6:
opt = 8
elif v > 0:
opt = 9
else:
opt = 10
return opt
def analyze_crit(data, config):
res = torch.from_numpy(np.zeros(get_num_classes("crit")))
for x in data:
if x in accusation_dict.keys():
res[accusation_dict[x]] = 1
return res
def analyze_law(data, config):
res = torch.from_numpy(np.zeros(get_num_classes("law")))
for x in data:
y = (x[0], x[1])
if y in law_dict.keys():
res[law_dict[y]] = 1
return res
def analyze_time(data, config):
res = torch.from_numpy(np.zeros(get_num_classes("time")))
opt = get_time_id(data, config)
res[opt] = 1
return res
word_dict = {}
def load(x, transformer):
try:
return transformer[x].astype(dtype=np.float32)
except Exception as e:
return transformer['UNK'].astype(dtype=np.float32)
def get_word_vec(x, config, transformer):
vec = load(x, transformer)
return vec
cnt1 = 0
cnt2 = 0
def check_sentence(data, config):
if len(data) > config.getint("data", "sentence_num"):
return False
for x in data:
if len(x) > config.getint("data", "sentence_len"):
return False
return True
def generate_vector(data, config, transformer):
vec = []
len_vec = [0, 0]
blank = torch.from_numpy(get_word_vec("BLANK", config, transformer))
for x in data:
temp_vec = []
len_vec.append(len(x))
len_vec[1] += 1
for y in x:
len_vec[0] += 1
z = get_word_vec(y, config, transformer)
temp_vec.append(torch.from_numpy(z))
while len(temp_vec) < config.getint("data", "sentence_len"):
temp_vec.append(blank)
vec.append(torch.stack(temp_vec))
temp_vec = []
while len(temp_vec) < config.getint("data", "sentence_len"):
temp_vec.append(blank)
while len(vec) < config.getint("data", "sentence_num"):
vec.append(torch.stack(temp_vec))
len_vec.append(1)
if len_vec[1] > config.getint("data", "sentence_num"):
pass
for a in range(2, len(len_vec)):
if len_vec[a] > config.getint("data", "sentence_len"):
print(data)
if len(len_vec) != config.getint("data", "sentence_num") + 2:
pass
return torch.stack(vec), torch.LongTensor(len_vec)
def parse(data, config, transformer):
label_list = config.get("data", "type_of_label").replace(" ", "").split(",")
label = []
for x in label_list:
if x == "crit":
label.append(analyze_crit(data["meta"]["crit"], config))
if x == "law":
label.append(analyze_law(data["meta"]["law"], config))
if x == "time":
label.append(analyze_time(data["meta"]["time"], config))
vector, len_vec = generate_vector(data["content"], config, transformer)
return vector, len_vec, torch.cat(label)
def check(data, config):
if not (check_sentence(data["content"], config)):
return False
if len(data["meta"]["criminals"]) != 1:
return False
if len(data["meta"]["crit"]) == 0 or len(data["meta"]["law"]) == 0:
return False
if not (check_crit(data["meta"]["crit"])):
return False
if not (check_law(data["meta"]["law"])):
return False
return True
| 23.413146 | 80 | 0.55384 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 423 | 0.084821 |
331f06145206acb198e0044dbd6333cd441dd2cf | 1,073 | py | Python | doc/programming/parts/python-columninfo.py | laigor/sqlrelay-non-english-fixes- | 7803f862ddbf88bca078c50d621c64c22fc0a405 | [
"PHP-3.01",
"CC-BY-3.0"
] | 16 | 2018-04-23T09:58:33.000Z | 2022-01-31T13:40:20.000Z | doc/programming/parts/python-columninfo.py | laigor/sqlrelay-non-english-fixes- | 7803f862ddbf88bca078c50d621c64c22fc0a405 | [
"PHP-3.01",
"CC-BY-3.0"
] | null | null | null | doc/programming/parts/python-columninfo.py | laigor/sqlrelay-non-english-fixes- | 7803f862ddbf88bca078c50d621c64c22fc0a405 | [
"PHP-3.01",
"CC-BY-3.0"
] | 4 | 2020-12-23T12:17:54.000Z | 2022-01-04T20:46:34.000Z | from SQLRelay import PySQLRClient
con=PySQLRClient.sqlrconnection('sqlrserver',9000,'/tmp/example.socket','user','password',0,1)
cur=PySQLRClient.sqlrcursor(con)
cur.sendQuery('select * from my_table')
con.endSession()
for i in range(0,cur.colCount()-1):
print 'Name: ', cur.getColumnName(i)
print 'Type: ', cur.getColumnType(i)
print 'Length: ', cur.getColumnLength(i)
print 'Precision: ', cur.getColumnPrecision(i)
print 'Scale: ', cur.getColumnScale(i)
print 'Longest Field: ', cur.getLongest(i)
print 'Nullable: ', cur.getColumnIsNullable(i)
print 'Primary Key: ', cur.getColumnIsPrimaryKey(i)
print 'Unique: ', cur.getColumnIsUnique(i)
print 'Part Of Key: ', cur.getColumnIsParyOfKey(i)
print 'Unsigned: ', cur.getColumnIsUnsigned(i)
print 'Zero Filled: ', cur.getColumnIsZeroFilled(i)
print 'Binary: ', cur.getColumnIsBinary(i)
print 'Auto Increment:', cur.getColumnIsAutoIncrement(i)
| 44.708333 | 94 | 0.629077 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 311 | 0.289842 |
3320a3f0ba332b35fac7f43f483b6bf56fe80c12 | 1,394 | py | Python | showdownai/naive_bayes.py | AM22/Pokemon-AI | 4cd29eb880981613158db0055179f4395c5599e3 | [
"MIT"
] | null | null | null | showdownai/naive_bayes.py | AM22/Pokemon-AI | 4cd29eb880981613158db0055179f4395c5599e3 | [
"MIT"
] | null | null | null | showdownai/naive_bayes.py | AM22/Pokemon-AI | 4cd29eb880981613158db0055179f4395c5599e3 | [
"MIT"
] | null | null | null | import json
from data import MOVE_CORRECTIONS, correct_mega
def get_moves(poke, known_moves, graph, data, alpha=1.0):
poke = correct_mega(poke)
co = graph['cooccurences']
freq = graph['frequencies']
probs = {}
if len(known_moves) == 0:
probs = get_freqs(poke, freq)
else:
for move in known_moves:
if move not in co[poke]:
continue
total = float(sum(co[poke][move].values()))
for othermove in co[poke][move]:
if othermove in MOVE_CORRECTIONS:
probs[MOVE_CORRECTIONS[othermove]] = probs[othermove]
del probs[move]
if othermove in known_moves:
continue
prob = co[poke][move][othermove] / total
if othermove not in probs:
probs[othermove] = 1
probs[othermove] *= prob
if probs == {}:
probs = get_freqs(poke, freq)
return sorted(probs.items(), key=lambda x: -x[1])
def get_freqs(poke, freq):
probs = {}
total = float(sum(freq[poke].values()))
for move in freq[poke]:
prob = freq[poke][move] / total
probs[move] = prob
return probs
if __name__ == "__main__":
from data import load_data
data, bw_data, graph = load_data('data')
def foo(x, y):
return get_moves(x, y, graph, data)
| 33.190476 | 73 | 0.559541 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 43 | 0.030846 |
3321631f6d51317e5fd544639735a47e50542ab6 | 11,369 | py | Python | AppDB/appscale/datastore/fdb/transactions.py | obino/appscale | be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f | [
"Apache-2.0"
] | 1 | 2017-04-07T15:33:35.000Z | 2017-04-07T15:33:35.000Z | AppDB/appscale/datastore/fdb/transactions.py | obino/appscale | be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f | [
"Apache-2.0"
] | 1 | 2016-10-27T17:23:54.000Z | 2016-10-27T17:23:54.000Z | AppDB/appscale/datastore/fdb/transactions.py | obino/appscale | be17e5f658d7b42b5aa7eeb7a5ddd4962f3ea82f | [
"Apache-2.0"
] | null | null | null | """
This module stores and retrieves datastore transaction metadata. The
TransactionManager is the main interface that clients can use to interact with
the transaction layer. See its documentation for implementation details.
"""
from __future__ import division
import logging
import math
import random
import sys
from collections import defaultdict
import six
import six.moves as sm
from tornado import gen
from appscale.common.unpackaged import APPSCALE_PYTHON_APPSERVER
from appscale.datastore.dbconstants import BadRequest, InternalError
from appscale.datastore.fdb.codecs import (
decode_str, encode_versionstamp_index, Int64, Path, Text, TransactionID)
from appscale.datastore.fdb.utils import (
DS_ROOT, fdb, MAX_ENTITY_SIZE, ResultIterator, VERSIONSTAMP_SIZE)
sys.path.append(APPSCALE_PYTHON_APPSERVER)
from google.appengine.datastore import entity_pb
logger = logging.getLogger(__name__)
class TransactionMetadata(object):
"""
A TransactionMetadata directory handles the encoding and decoding details for
transaction metadata for a specific project.
The directory path looks like (<project-dir>, 'transactions').
Within this directory, keys are encoded as
<scatter-byte> + <txid> + <rpc-type (optional)> + <rpc-details (optional)>.
The <scatter-byte> is a single byte derived from the txid. Its purpose is to
spread writes more evenly across the cluster and minimize hotspots. This is
especially important for this index because each write is given a new, larger
<txid> value than the last.
The <txid> is an 8-byte integer that serves as a handle for the client to
identify a transaction. It also serves as a read versionstamp for FDB
transactions used within the datastore transaction.
The initial creation of the datastore transaction does not specify any RPC
details. The purpose of that KV is to verify that the datastore transaction
exists (and the garbage collector hasn't cleaned it up) before committing it.
The <rpc-type> is a single byte that indicates what kind of RPC is being
logged as having occurred inside the transaction.
The <rpc-details> encodes the necessary details in order for the datastore
to reconstruct the RPCs that occurreed during the transaction when it comes
time to commit the mutations.
# TODO: Go into more detail about how different RPC types are encoded.
"""
DIR_NAME = u'transactions'
LOOKUPS = b'\x00'
QUERIES = b'\x01'
PUTS = b'\x02'
DELETES = b'\x03'
# The max number of bytes for each FDB value.
_CHUNK_SIZE = 10000
_ENTITY_LEN_SIZE = 3
def __init__(self, directory):
self.directory = directory
@property
def project_id(self):
return self.directory.get_path()[len(DS_ROOT)]
@classmethod
def directory_path(cls, project_id):
return project_id, cls.DIR_NAME
def encode_start_key(self, scatter_val, commit_versionstamp=None):
key = b''.join([self.directory.rawPrefix, six.int2byte(scatter_val),
commit_versionstamp or b'\x00' * VERSIONSTAMP_SIZE])
if not commit_versionstamp:
key += encode_versionstamp_index(len(key) - VERSIONSTAMP_SIZE)
return key
def encode_lookups(self, txid, keys):
section_prefix = self._txid_prefix(txid) + self.LOOKUPS
return self._encode_chunks(section_prefix, self._encode_keys(keys))
def encode_query_key(self, txid, namespace, ancestor_path):
if not isinstance(ancestor_path, tuple):
ancestor_path = Path.flatten(ancestor_path)
section_prefix = self._txid_prefix(txid) + self.QUERIES
encoded_ancestor = Text.encode(namespace) + Path.pack(ancestor_path[:2])
return section_prefix + encoded_ancestor
def encode_puts(self, txid, entities):
section_prefix = self._txid_prefix(txid) + self.PUTS
encoded_entities = [entity.Encode() for entity in entities]
value = b''.join([b''.join([self._encode_entity_len(entity), entity])
for entity in encoded_entities])
return self._encode_chunks(section_prefix, value)
def encode_deletes(self, txid, keys):
section_prefix = self._txid_prefix(txid) + self.DELETES
return self._encode_chunks(section_prefix, self._encode_keys(keys))
def decode_metadata(self, txid, kvs):
lookup_rpcs = defaultdict(list)
queried_groups = set()
mutation_rpcs = []
rpc_type_index = len(self._txid_prefix(txid))
current_versionstamp = None
for kv in kvs:
rpc_type = kv.key[rpc_type_index]
pos = rpc_type_index + 1
if rpc_type == self.QUERIES:
namespace, pos = Text.decode(kv.key, pos)
group_path = Path.unpack(kv.key, pos)[0]
queried_groups.add((namespace, group_path))
continue
rpc_versionstamp = kv.key[pos:pos + VERSIONSTAMP_SIZE]
if rpc_type == self.LOOKUPS:
lookup_rpcs[rpc_versionstamp].append(kv.value)
elif rpc_type in (self.PUTS, self.DELETES):
if current_versionstamp == rpc_versionstamp:
mutation_rpcs[-1].append(kv.value)
else:
current_versionstamp = rpc_versionstamp
mutation_rpcs.append([rpc_type, kv.value])
else:
raise InternalError(u'Unrecognized RPC type')
lookups = set()
mutations = []
for chunks in six.itervalues(lookup_rpcs):
lookups.update(self._unpack_keys(b''.join(chunks)))
for rpc_info in mutation_rpcs:
rpc_type = rpc_info[0]
blob = b''.join(rpc_info[1:])
if rpc_type == self.PUTS:
mutations.extend(self._unpack_entities(blob))
else:
mutations.extend(self._unpack_keys(blob))
return lookups, queried_groups, mutations
def get_txid_slice(self, txid):
prefix = self._txid_prefix(txid)
return slice(fdb.KeySelector.first_greater_or_equal(prefix),
fdb.KeySelector.first_greater_or_equal(prefix + b'\xFF'))
def get_expired_slice(self, scatter_byte, safe_versionstamp):
prefix = self.directory.rawPrefix + six.int2byte(scatter_byte)
return slice(
fdb.KeySelector.first_greater_or_equal(prefix),
fdb.KeySelector.first_greater_or_equal(prefix + safe_versionstamp))
def _txid_prefix(self, txid):
scatter_val, commit_versionstamp = TransactionID.decode(txid)
return (self.directory.rawPrefix + six.int2byte(scatter_val) +
commit_versionstamp)
def _encode_keys(self, keys):
return b''.join(
[Text.encode(decode_str(key.name_space())) + Path.pack(key.path())
for key in keys])
def _unpack_keys(self, blob):
keys = []
pos = 0
while pos < len(blob):
namespace, pos = Text.decode(blob, pos)
path, pos = Path.unpack(blob, pos)
key = entity_pb.Reference()
key.set_app(self.project_id)
key.set_name_space(namespace)
key.mutable_path().MergeFrom(Path.decode(path))
keys.append(key)
return keys
def _unpack_entities(self, blob):
pos = 0
entities = []
while pos < len(blob):
entity_len = Int64.decode_bare(blob[pos:pos + self._ENTITY_LEN_SIZE])
pos += self._ENTITY_LEN_SIZE
entities.append(entity_pb.EntityProto(blob[pos:pos + entity_len]))
pos += entity_len
return entities
def _encode_key_len(self, key):
return bytes(bytearray([key.path().element_size()]))
def _encode_entity_len(self, encoded_entity):
if len(encoded_entity) > MAX_ENTITY_SIZE:
raise BadRequest(u'Entity exceeds maximum size')
return Int64.encode_bare(len(encoded_entity), self._ENTITY_LEN_SIZE)
def _encode_chunks(self, section_prefix, value):
full_prefix = section_prefix + b'\x00' * VERSIONSTAMP_SIZE
versionstamp_index = encode_versionstamp_index(len(section_prefix))
chunk_count = int(math.ceil(len(value) / self._CHUNK_SIZE))
return tuple(
(full_prefix + six.int2byte(index) + versionstamp_index,
value[index * self._CHUNK_SIZE:(index + 1) * self._CHUNK_SIZE])
for index in sm.range(chunk_count))
class TransactionManager(object):
"""
The TransactionManager is the main interface that clients can use to interact
with the transaction layer. It makes use of TransactionMetadata directories
to handle the encoding and decoding details when satisfying requests.
"""
def __init__(self, db, tornado_fdb, directory_cache):
self._db = db
self._tornado_fdb = tornado_fdb
self._directory_cache = directory_cache
@gen.coroutine
def create(self, project_id):
tr = self._db.create_transaction()
tx_dir = yield self._tx_metadata(tr, project_id)
scatter_val = random.randint(0, 15)
tr.set_versionstamped_key(tx_dir.encode_start_key(scatter_val), b'')
versionstamp_future = tr.get_versionstamp()
yield self._tornado_fdb.commit(tr)
txid = TransactionID.encode(scatter_val, versionstamp_future.wait().value)
raise gen.Return(txid)
@gen.coroutine
def log_lookups(self, tr, project_id, get_request):
txid = get_request.transaction().handle()
tx_dir = yield self._tx_metadata(tr, project_id)
for key, value in tx_dir.encode_lookups(txid, get_request.key_list()):
tr.set_versionstamped_key(key, value)
@gen.coroutine
def log_query(self, tr, project_id, query):
txid = query.transaction().handle()
namespace = decode_str(query.name_space())
if not query.has_ancestor():
raise BadRequest(u'Queries in a transaction must specify an ancestor')
tx_dir = yield self._tx_metadata(tr, project_id)
tr[tx_dir.encode_query_key(txid, namespace, query.ancestor().path())] = b''
@gen.coroutine
def log_puts(self, tr, project_id, put_request):
txid = put_request.transaction().handle()
tx_dir = yield self._tx_metadata(tr, project_id)
for key, value in tx_dir.encode_puts(txid, put_request.entity_list()):
tr.set_versionstamped_key(key, value)
@gen.coroutine
def log_deletes(self, tr, project_id, delete_request):
txid = delete_request.transaction().handle()
tx_dir = yield self._tx_metadata(tr, project_id)
for key, value in tx_dir.encode_deletes(txid, delete_request.key_list()):
tr.set_versionstamped_key(key, value)
@gen.coroutine
def delete(self, tr, project_id, txid):
tx_dir = yield self._tx_metadata(tr, project_id)
txid_slice = tx_dir.get_txid_slice(txid)
del tr[txid_slice.start.key:txid_slice.stop.key]
@gen.coroutine
def get_metadata(self, tr, project_id, txid):
tx_dir = yield self._tx_metadata(tr, project_id)
results = yield ResultIterator(tr, self._tornado_fdb,
tx_dir.get_txid_slice(txid)).list()
scatter_val, tx_start_versionstamp = TransactionID.decode(txid)
if (not results or
results[0].key != tx_dir.encode_start_key(scatter_val,
tx_start_versionstamp)):
raise BadRequest(u'Transaction not found')
raise gen.Return(tx_dir.decode_metadata(txid, results[1:]))
@gen.coroutine
def clear_range(self, tr, project_id, scatter_byte, safe_versionstamp):
tx_dir = yield self._tx_metadata(tr, project_id)
expired_slice = tx_dir.get_expired_slice(scatter_byte, safe_versionstamp)
del tr[expired_slice.start.key:expired_slice.stop.key]
@gen.coroutine
def _tx_metadata(self, tr, project_id):
path = TransactionMetadata.directory_path(project_id)
directory = yield self._directory_cache.get(tr, path)
raise gen.Return(TransactionMetadata(directory))
| 36.79288 | 79 | 0.723371 | 10,458 | 0.91987 | 2,844 | 0.250154 | 3,168 | 0.278652 | 0 | 0 | 2,152 | 0.189287 |
3321f706d02f1fdbc011b2d0a28c18fa45d4fe4b | 810 | py | Python | Line_chart.py | sanabasangare/data-visualization | 09a03d0414941d28e312037ccaa0b283dbb2ec06 | [
"MIT"
] | null | null | null | Line_chart.py | sanabasangare/data-visualization | 09a03d0414941d28e312037ccaa0b283dbb2ec06 | [
"MIT"
] | null | null | null | Line_chart.py | sanabasangare/data-visualization | 09a03d0414941d28e312037ccaa0b283dbb2ec06 | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from collections import Counter
def line_graph(plt):
# years observed since 2000
years = [2000, 2002, 2005, 2007, 2010, 2012, 2014, 2015]
# total number of websites on the world wide web
# (source: Internet Live Stats)
websites = [17, 38, 64, 121, 206, 697, 968, 863]
# create a line chart with years on x-axis and number of websites on y-axis
plt.plot(years, websites, color='blue', marker='o', linestyle='solid',
linewidth=2)
# adjust the x and y axis markers
plt.xlim(2000, 2015)
plt.ylim(10, 1000)
# add a title to the chart
plt.title("Total number of websites online")
# add a label to the y-axis
plt.ylabel("Websites (millions)")
plt.show()
if __name__ == "__main__":
line_graph(plt)
| 24.545455 | 79 | 0.650617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 347 | 0.428395 |
3322558b16b9e4d86a91248106c527a75ff6b943 | 2,350 | py | Python | miyamoto/test/mocks.py | caedesvvv/miyamoto | d781dffeb0b2af3ce679f13114f47e6965d1cdb1 | [
"MIT"
] | 1 | 2015-01-20T17:32:19.000Z | 2015-01-20T17:32:19.000Z | miyamoto/test/mocks.py | caedesvvv/miyamoto | d781dffeb0b2af3ce679f13114f47e6965d1cdb1 | [
"MIT"
] | null | null | null | miyamoto/test/mocks.py | caedesvvv/miyamoto | d781dffeb0b2af3ce679f13114f47e6965d1cdb1 | [
"MIT"
] | null | null | null | from twisted.web import server, resource
class MockSubscriber(resource.Resource):
isLeaf = True
def render_GET(self, request):
if request.path.endswith('/callback'):
return request.args.get('hub.challenge', [''])[0]
else:
return "Huh?"
class MockPublisher(resource.Resource):
isLeaf = True
def render(self, request):
host = '%s:%s' % (request.host.host, request.host.port)
if request.path.endswith('/happycats.xml'):
request.setHeader('content-type', 'application/atom+xml')
return """<?xml version="1.0"?>
<feed>
<!-- Normally here would be source, title, etc ... -->
<link rel="hub" href="http://%s/" />
<link rel="self" href="http://%s%s" />
<updated>2008-08-11T02:15:01Z</updated>
<!-- Example of a full entry. -->
<entry>
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<content>
What a happy cat. Full content goes here.
</content>
</entry>
<!-- Example of an entity that isn't full/is truncated. This is implied
by the lack of a <content> element and a <summary> element instead. -->
<entry >
<title>Heathcliff</title>
<link href="http://publisher.example.com/happycat25.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
<summary>
What a happy cat!
</summary>
</entry>
<!-- Meta-data only; implied by the lack of <content> and
<summary> elements. -->
<entry>
<title>Garfield</title>
<link rel="alternate" href="http://publisher.example.com/happycat24.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-08-11T02:15:01Z</updated>
</entry>
<!-- Context entry that's meta-data only and not new. Implied because the
update time on this entry is before the //atom:feed/updated time. -->
<entry>
<title>Nermal</title>
<link rel="alternate" href="http://publisher.example.com/happycat23s.xml" />
<id>http://publisher.example.com/happycat25.xml</id>
<updated>2008-07-10T12:28:13Z</updated>
</entry>
</feed>""" % (host, host, request.path)
else:
return 'Huh?' | 33.098592 | 80 | 0.625106 | 2,297 | 0.977447 | 0 | 0 | 0 | 0 | 0 | 0 | 1,781 | 0.757872 |
332383050871a4f49b30722483065061b8a0db71 | 1,901 | py | Python | covid19/data.py | edupenabad/covid-19-notebooks | 0b68c12c3f4e36c42476fef15a769326cb0c3d38 | [
"Apache-2.0"
] | null | null | null | covid19/data.py | edupenabad/covid-19-notebooks | 0b68c12c3f4e36c42476fef15a769326cb0c3d38 | [
"Apache-2.0"
] | null | null | null | covid19/data.py | edupenabad/covid-19-notebooks | 0b68c12c3f4e36c42476fef15a769326cb0c3d38 | [
"Apache-2.0"
] | null | null | null | import pathlib
import numpy as np
import pandas as pd
import requests
DATA_REPOS = {
"world": {
"url": "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master",
"streams": {
"deaths": "{url}/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Deaths.csv"
},
},
"italy": {
"url": "https://raw.githubusercontent.com/pcm-dpc/COVID-19/master",
"streams": {
"andamento-nazionale": "{url}/dati-andamento-nazionale/dpc-covid19-ita-andamento-nazionale.csv",
"regioni": "{url}/dati-regioni/dpc-covid19-ita-regioni.csv",
"province": "{url}/dati-province/dpc-covid19-ita-province.csv",
},
},
}
def download(url, path=".", repo="italy"):
repo = DATA_REPOS[repo]
base_url = repo["url"]
stream_url = repo["streams"].get(url, url).format(url=base_url)
root_path = pathlib.Path(path)
download_path = root_path / stream_url.rpartition("/")[2]
with requests.get(stream_url) as resp:
with open(download_path, "wb") as fp:
fp.write(resp.content)
return str(download_path)
def reformat(path,varname='deaths'):
raw_data = pd.read_csv(path)
lines = []
dates = [np.datetime64('20{2}-{0:02d}-{1:02d}'.format(*map(int, d.split('/')))) for d in raw_data.columns[4:]]
for i, record in raw_data.iterrows():
for i, d in enumerate(record[4:]):
location = record['Country/Region'].strip()
if isinstance(record['Province/State'], str):
location += ' - ' + record['Province/State'].strip()
if d > 0:
lines.append({
'location': location,
'country': record['Country/Region'],
varname: d,
'date': dates[i]
})
return pd.DataFrame(lines).set_index('date')
| 33.946429 | 114 | 0.579695 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 634 | 0.333509 |
3324e48df6cb45bfb982f9754f04ae47cbd3bffa | 7,484 | py | Python | util/utils.py | choyoungjung/xray-align-AR | 18847c01008fe5a53bbdea5915a1a4e84e7c7f22 | [
"MIT"
] | null | null | null | util/utils.py | choyoungjung/xray-align-AR | 18847c01008fe5a53bbdea5915a1a4e84e7c7f22 | [
"MIT"
] | null | null | null | util/utils.py | choyoungjung/xray-align-AR | 18847c01008fe5a53bbdea5915a1a4e84e7c7f22 | [
"MIT"
] | 1 | 2022-02-23T06:45:04.000Z | 2022-02-23T06:45:04.000Z | from __future__ import print_function
import random
from torch.autograd import Variable
import torch
from PIL import Image
import numpy as np
import math
import os
import cv2
'''
Code from
https://github.com/ycszen/pytorch-seg/blob/master/transform.py
Modified so it complies with the Cityscape label map colors
'''
def learning_rate_scheduler(opt, old_lr, epoch):
new_lr_G = 0.0
new_lr_D = 0.0
if opt['lr_update'] == 'linear':
if epoch > opt['niter']:
lrd = opt['lr'] / opt['niter_decay']
new_lr = old_lr - lrd
else:
new_lr = old_lr
new_lr_G = new_lr
new_lr_D = new_lr
elif opt['lr_update'] == 'TTUR':
if epoch > opt['niter']:
lrd = opt['lr'] / opt['niter_decay']
new_lr = old_lr - lrd
else:
new_lr = old_lr
new_lr_G = old_lr / 2.0
new_lr_D = old_lr * 2.0
elif opt['lr_update'] == 'cosine':
reduction_ratio = 0.5 * (1 + math.cos(math.pi * epoch / (opt['niter'] + opt['niter_decay'])))
new_lr = old_lr * reduction_ratio
new_lr_G = old_lr * reduction_ratio
new_lr_D = old_lr * reduction_ratio
else:
raise ValueError('Learning Rate Update Mode {} not implemented'.format(opt['lr_update']))
return new_lr_G, new_lr_D, new_lr
def uint82bin(n, count=8):
"""returns the binary of integer n, count refers to amount of bits"""
return ''.join([str((n >> y) & 1) for y in range(count-1, -1, -1)])
def labelcolormap(N):
if N == 20: # cityscape
label_colours = [(0,0,0)
# 0=Background
,(128,0,0),(255,0,0),(0,85,0),(170,0,51),(255,85,0)
# 1=Hat, 2=Hair, 3=Glove, 4=Sunglasses, 5=UpperClothes
,(0,0,85),(0,119,221),(85,85,0),(0,85,85),(85,51,0)
# 6=Dress, 7=Coat, 8=Socks, 9=Pants, 10=Jumpsuits
,(52,86,128),(0,128,0),(0,0,255),(51,170,221),(0,255,255)
# 11=Scarf, 12=Skirt, 13=Face, 14=LeftArm, 15=RightArm
,(85,255,170),(170,255,85),(255,255,0),(255,170,0)]
# 16=LeftLeg, 17=RightLeg, 18=LeftShoe, 19=RightShoe
cmap = np.array(label_colours,dtype=np.uint8)
else:
cmap = np.zeros((N, 3), dtype=np.uint8)
for i in range(N):
r, g, b = 0, 0, 0
idx = i
for j in range(7):
str_id = uint82bin(idx)
r = r ^ (np.uint8(str_id[-1]) << (7-j))
g = g ^ (np.uint8(str_id[-2]) << (7-j))
b = b ^ (np.uint8(str_id[-3]) << (7-j))
idx >>= 3
cmap[i, 0] = r
cmap[i, 1] = g
cmap[i, 2] = b
return cmap
class Colorize(object):
def __init__(self, n=20):
self.cmap = labelcolormap(n)
self.cmap = torch.from_numpy(self.cmap[:n])
def __call__(self, gray_image):
size = gray_image.size()
color_image = torch.ByteTensor(3, size[1], size[2]).fill_(0)
for label in range(0, len(self.cmap)):
mask = (label == gray_image[0]).cpu()
color_image[0][mask] = self.cmap[label][0]
color_image[1][mask] = self.cmap[label][1]
color_image[2][mask] = self.cmap[label][2]
return color_image
# Converts a Tensor into a Numpy array
# |imtype|: the desired type of the converted numpy array
def tensor2im(image_tensor, imtype=np.uint8, normalize=True):
if isinstance(image_tensor, list):
image_numpy = []
for i in range(len(image_tensor)):
image_numpy.append(tensor2im(image_tensor[i], imtype, normalize))
return image_numpy
image_numpy = image_tensor.cpu().float().numpy()
if normalize:
image_numpy = (np.transpose(image_numpy, (1, 2, 0)) + 1) / 2.0 * 255.0
else:
image_numpy = np.transpose(image_numpy, (1, 2, 0)) * 255.0
image_numpy = np.clip(image_numpy, 0, 255)
if image_numpy.shape[2] == 1 or image_numpy.shape[2] > 3:
image_numpy = image_numpy[:,:,0]
return image_numpy.astype(imtype)
# Converts a one-hot tensor into a colorful label map
def tensor2label(label_tensor, n_label, imtype=np.uint8):
if n_label == 0:
return tensor2im(label_tensor, imtype)
label_tensor = label_tensor.float()
if label_tensor.size()[0] > 1:
label_tensor = label_tensor.max(0, keepdim=True)[1]
label_tensor = Colorize(n_label)(label_tensor)
label_numpy = np.transpose(label_tensor.numpy(), (1, 2, 0))
return label_numpy.astype(imtype)
def tensor2edgemap(label_tensor, imtype=np.uint8):
edgemap = torch.argmax(label_tensor,dim=0,keepdim=True)
edgemap = edgemap.squeeze(0)
edgemap = edgemap.cpu().float().numpy()
return edgemap.astype(imtype)
def mkdirs(paths):
if isinstance(paths, list) and not isinstance(paths, str):
for path in paths:
mkdir(path)
else:
mkdir(paths)
def mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def gen_noise(shape):
noise = np.zeros(shape, dtype=np.uint8)
noise = cv2.randn(noise, 0, 255)
noise = np.asarray(noise / 255, dtype=np.uint8)
noise = torch.tensor(noise, dtype=torch.float32)
return noise
def save_image(image_numpy, image_path):
image_pil = Image.fromarray(image_numpy)
image_pil.save(image_path)
# noinspection PyBroadException
def save_images(img_tensors, img_names, save_dir):
for img_tensor, img_name in zip(img_tensors, img_names):
tensor = (img_tensor.clone()+1)*0.5 * 255
tensor = tensor.cpu().clamp(0,255)
try:
array = tensor.numpy().astype('uint8')
except Exception:
array = tensor.detach().numpy().astype('uint8')
if array.shape[0] == 1:
array = array.squeeze(0)
elif array.shape[0] == 3:
array = array.swapaxes(0, 1).swapaxes(1, 2)
im = Image.fromarray(array)
im.save(os.path.join(save_dir, img_name), format='JPEG')
def load_checkpoint(model, checkpoint_path):
if not os.path.exists(checkpoint_path):
raise ValueError("'{}' is not a valid checkpoint path".format(checkpoint_path))
model.load_state_dict(torch.load(checkpoint_path))
class ImagePool:
def __init__(self, pool_size):
self.pool_size = pool_size
if self.pool_size > 0:
self.num_imgs = 0
self.images = []
def query(self, images):
if self.pool_size == 0:
return images
return_images = []
for image in images.data:
image = torch.unsqueeze(image, 0)
if self.num_imgs < self.pool_size:
self.num_imgs += 1
self.images.append(image)
return_images.append(image)
else:
p = random.uniform(0, 1)
if p > 0.5:
random_id = random.randint(0, self.pool_size-1)
tmp = self.images[random_id].clone()
self.images[random_id] = image
return_images.append(tmp)
else:
return_images.append(image)
return_images = Variable(torch.cat(return_images, 0))
return return_images
| 34.971963 | 102 | 0.568813 | 1,623 | 0.216863 | 0 | 0 | 0 | 0 | 0 | 0 | 879 | 0.117451 |
33257a0af8469e27f7a0ccf12276dbf023a6e9b0 | 276 | py | Python | pythonx/lints/vim/vint.py | maralla/validator.vim | fd5ec0891cbd035bd572e74d684b8afd852b87bf | [
"MIT"
] | 255 | 2016-09-08T12:12:26.000Z | 2022-03-10T01:50:06.000Z | pythonx/lints/vim/vint.py | maralla/vim-fixup | fd5ec0891cbd035bd572e74d684b8afd852b87bf | [
"MIT"
] | 56 | 2016-09-09T05:53:24.000Z | 2020-11-11T16:02:05.000Z | pythonx/lints/vim/vint.py | maralla/vim-linter | fd5ec0891cbd035bd572e74d684b8afd852b87bf | [
"MIT"
] | 23 | 2016-09-09T13:37:51.000Z | 2019-04-08T22:31:24.000Z | # -*- coding: utf-8 -*-
from validator import Validator
class VimVint(Validator):
__filetype__ = 'vim'
checker = 'vint'
args = '-w --no-color'
regex = r"""
.+?:
(?P<lnum>\d+):
(?P<col>\d+):
\s(?P<text>.+)"""
| 17.25 | 31 | 0.434783 | 216 | 0.782609 | 0 | 0 | 0 | 0 | 0 | 0 | 153 | 0.554348 |
33269c8198e5473d3ddda4bf83fff9637afee268 | 1,793 | py | Python | src/graph2.py | gpu0/nnGraph | ae68af41804ce95dd4dbd6deeea57e377915acc9 | [
"MIT"
] | null | null | null | src/graph2.py | gpu0/nnGraph | ae68af41804ce95dd4dbd6deeea57e377915acc9 | [
"MIT"
] | null | null | null | src/graph2.py | gpu0/nnGraph | ae68af41804ce95dd4dbd6deeea57e377915acc9 | [
"MIT"
] | null | null | null | # t = 2 * (x*y + max(z,w))
class Num:
def __init__(self, val):
self.val = val
def forward(self):
return self.val
def backward(self, val):
print val
class Mul:
def __init__(self, left, right):
self.left = left
self.right = right
def forward(self):
self.left_fw = self.left.forward()
self.right_fw = self.right.forward()
return self.left_fw * self.right_fw
def backward(self, val):
self.left.backward(val * self.right_fw)
self.right.backward(val * self.left_fw)
class Factor:
def __init__(self, center, factor):
self.center = center
self.factor = factor
def forward(self):
return self.factor * self.center.forward()
def backward(self, val):
self.center.backward(val * self.factor)
class Add:
def __init__(self, left, right):
self.left = left
self.right = right
def forward(self):
return self.left.forward() + self.right.forward()
def backward(self, val):
self.left.backward(val)
self.right.backward(val)
class Max:
def __init__(self, left, right):
self.left = left
self.right = right
def forward(self):
self.left_fw = self.left.forward()
self.right_fw = self.right.forward()
self.out = 0
if self.left_fw > self.right_fw:
self.out = 1
return self.left_fw
return self.right_fw
def backward(self, val):
self.left.backward(val * self.out)
self.right.backward(val * (1 - self.out))
if __name__ == '__main__':
x = Num(3)
y = Num(-4)
z = Num(2)
w = Num(-1)
p = Mul(x, y)
q = Max(z, w)
r = Add(p, q)
t = Factor(r, 2)
print t.forward()
t.backward(1)
| 25.985507 | 57 | 0.572783 | 1,551 | 0.865031 | 0 | 0 | 0 | 0 | 0 | 0 | 36 | 0.020078 |
3327097fad25f8089f5aa779974b776188cc2bb0 | 2,599 | py | Python | skeleton_video.py | ashish1sasmal/Human-Skeleton-Estimation | 290cde92191b2b6b0c28189667851119f5ca564d | [
"MIT"
] | null | null | null | skeleton_video.py | ashish1sasmal/Human-Skeleton-Estimation | 290cde92191b2b6b0c28189667851119f5ca564d | [
"MIT"
] | null | null | null | skeleton_video.py | ashish1sasmal/Human-Skeleton-Estimation | 290cde92191b2b6b0c28189667851119f5ca564d | [
"MIT"
] | null | null | null | # @Author: ASHISH SASMAL <ashish>
# @Date: 20-10-2020
# @Last modified by: ashish
# @Last modified time: 20-10-2020
import cv2
import numpy as np
import time
proto = "Models/pose_deploy_linevec_faster_4_stages.prototxt"
weights= "Models/pose_iter_160000.caffemodel"
net = cv2.dnn.readNetFromCaffe(proto, weights)
net.setPreferableBackend(cv2.dnn.DNN_TARGET_CPU)
print("Using CPU device")
wid = 368
height=368
gt = cv2.VideoCapture("sample2.mp4")
hasFrame, frame = gt.read()
vid_writer1 = cv2.VideoWriter('output.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame.shape[1],frame.shape[0]))
vid_writer2 = cv2.VideoWriter('output2.avi',cv2.VideoWriter_fourcc('M','J','P','G'), 10, (frame.shape[1],frame.shape[0]))
while cv2.waitKey(1) < 0:
image = gt.read()[1]
image_copy = np.copy(image)
image_wid = image.shape[1]
image_height = image.shape[0]
thresh = np.zeros((frame.shape[0],frame.shape[1],1), np.uint8)
thresh = cv2.cvtColor(thresh,cv2.COLOR_GRAY2BGR)
blob = cv2.dnn.blobFromImage(image, 1.0/255, (wid,height), (0,0,0), swapRB = False, crop = False)
net.setInput(blob)
POSE_PAIRS = [[0,1], [1,2], [2,3], [3,4], [1,5], [5,6], [6,7], [1,14], [14,8], [8,9], [9,10], [14,11], [11,12], [12,13] ]
preds = net.forward()
H = preds.shape[2]
W = preds.shape[3]
# Empty list to store the detected keypoints
points = []
for i in range(15):
probMap = preds[0, i, :, :]
minVal, prob, minLoc, point = cv2.minMaxLoc(probMap)
x = (image_wid * point[0]) / W
y = (image_height * point[1]) / H
if prob >0.1 :
# cv2.circle(image_copy, (int(x), int(y)), 3, (0, 255, 255), thickness=-1, lineType=cv2.FILLED)
# cv2.putText(image_copy, "{}".format(i), (int(x), int(y)), cv2.FONT_HERSHEY_SIMPLEX, 1.4, (0, 0, 255), 3, lineType=cv2.LINE_AA)
points.append((int(x), int(y)))
else :
points.append(None)
for pair in POSE_PAIRS:
partA = pair[0]
partB = pair[1]
if points[partA] and points[partB]:
cv2.line(image, points[partA], points[partB], (199,99,0), 2)
cv2.circle(image, points[partA], 4, (17,199,0), thickness=-1, lineType=cv2.FILLED)
cv2.line(thresh, points[partA], points[partB], (199,99,0), 2)
cv2.circle(thresh, points[partA], 4, (17,199,0), thickness=-1, lineType=cv2.FILLED)
cv2.imshow('Output-Skeleton', image)
cv2.imshow('Output-Skeleton2', thresh)
vid_writer1.write(image)
vid_writer2.write(thresh)
gt.release()
cv2.destroyAllWindows()
| 33.320513 | 140 | 0.621008 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 587 | 0.225856 |
33270a402f92d2ec94e2db647e7c5d239b6f78fe | 3,733 | py | Python | temboo/core/Library/LittleSis/Relationship/GetOneRelationship.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 7 | 2016-03-07T02:07:21.000Z | 2022-01-21T02:22:41.000Z | temboo/core/Library/LittleSis/Relationship/GetOneRelationship.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | null | null | null | temboo/core/Library/LittleSis/Relationship/GetOneRelationship.py | jordanemedlock/psychtruths | 52e09033ade9608bd5143129f8a1bfac22d634dd | [
"Apache-2.0"
] | 8 | 2016-06-14T06:01:11.000Z | 2020-04-22T09:21:44.000Z | # -*- coding: utf-8 -*-
###############################################################################
#
# GetOneRelationship
# Retrieves information about any known relationship between two entities in LittleSis according their IDs.
#
# Python versions 2.6, 2.7, 3.x
#
# Copyright 2014, Temboo Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
#
###############################################################################
from temboo.core.choreography import Choreography
from temboo.core.choreography import InputSet
from temboo.core.choreography import ResultSet
from temboo.core.choreography import ChoreographyExecution
import json
class GetOneRelationship(Choreography):
def __init__(self, temboo_session):
"""
Create a new instance of the GetOneRelationship Choreo. A TembooSession object, containing a valid
set of Temboo credentials, must be supplied.
"""
super(GetOneRelationship, self).__init__(temboo_session, '/Library/LittleSis/Relationship/GetOneRelationship')
def new_input_set(self):
return GetOneRelationshipInputSet()
def _make_result_set(self, result, path):
return GetOneRelationshipResultSet(result, path)
def _make_execution(self, session, exec_id, path):
return GetOneRelationshipChoreographyExecution(session, exec_id, path)
class GetOneRelationshipInputSet(InputSet):
"""
An InputSet with methods appropriate for specifying the inputs to the GetOneRelationship
Choreo. The InputSet object is used to specify input parameters when executing this Choreo.
"""
def set_APIKey(self, value):
"""
Set the value of the APIKey input for this Choreo. ((required, string) The API Key obtained from LittleSis.org.)
"""
super(GetOneRelationshipInputSet, self)._set_input('APIKey', value)
def set_EntityIDs(self, value):
"""
Set the value of the EntityIDs input for this Choreo. ((required, string) The IDs of the entities between which you want to find relationships. Format is a semicolon delimited string (e.g. 1026;1))
"""
super(GetOneRelationshipInputSet, self)._set_input('EntityIDs', value)
def set_ResponseFormat(self, value):
"""
Set the value of the ResponseFormat input for this Choreo. ((optional, string) Format of the response returned by LittleSis.org. Acceptable inputs: xml or json. Defaults to xml)
"""
super(GetOneRelationshipInputSet, self)._set_input('ResponseFormat', value)
class GetOneRelationshipResultSet(ResultSet):
"""
A ResultSet with methods tailored to the values returned by the GetOneRelationship Choreo.
The ResultSet object is used to retrieve the results of a Choreo execution.
"""
def getJSONFromString(self, str):
return json.loads(str)
def get_Response(self):
"""
Retrieve the value for the "Response" output from this Choreo execution. (The response from LittleSis.org.)
"""
return self._output.get('Response', None)
class GetOneRelationshipChoreographyExecution(ChoreographyExecution):
def _make_result_set(self, response, path):
return GetOneRelationshipResultSet(response, path)
| 40.139785 | 205 | 0.699437 | 2,581 | 0.691401 | 0 | 0 | 0 | 0 | 0 | 0 | 2,250 | 0.602732 |
3327fef21614dc3511498e572fb63b8ff5adf0a3 | 9,385 | py | Python | rec_to_nwb/test/processing/tools/test_beartype.py | jihyunbak/rec_to_nwb | 6e65f8bf0a4faa4d986483ec2442ba19d70c92a9 | [
"Apache-2.0"
] | 8 | 2020-05-29T13:48:35.000Z | 2021-11-19T04:24:48.000Z | rec_to_nwb/test/processing/tools/test_beartype.py | jihyunbak/rec_to_nwb | 6e65f8bf0a4faa4d986483ec2442ba19d70c92a9 | [
"Apache-2.0"
] | 12 | 2020-11-13T01:36:32.000Z | 2022-01-23T20:35:55.000Z | rec_to_nwb/test/processing/tools/test_beartype.py | jihyunbak/rec_to_nwb | 6e65f8bf0a4faa4d986483ec2442ba19d70c92a9 | [
"Apache-2.0"
] | 3 | 2020-10-20T06:52:45.000Z | 2021-07-06T23:00:53.000Z | #!/usr/bin/env python3
"""
`py.test`-driven unit test suite for the `@beartype` decorator, implementing a
rudimentary subset of PEP 484-style type checking based on Python 3.x function
annotations.
Usage
----------
These tests assume the `@beartype` decorator and all utility functions (e.g.,
`_check_type_annotation()`) and globals (e.g., `_PARAMETER_KIND_IGNORED`)
required by this decorator to reside in a top-level module named `beartype`. If
this is the case, these tests may be run as is with:
$ py.test -k test_beartype
See Also
----------
https://stackoverflow.com/a/37961120/2809027
Stackoverflow answer introducing the `@beartype` decorator.
"""
from unittest import TestCase
import typing
import pytest
from testfixtures import should_raise
from rec_to_nwb.processing.tools.beartype.beartype import beartype
class TestBearyype(TestCase):
def test_beartype_noop(self) -> None:
"""
Test bear typing of a function with no function annotations, reducing to
_no_ type checking.
"""
# Unannotated function to be type checked.
@beartype
def khorne(gork, mork):
return gork + mork
# Call this function and assert the expected return value.
assert khorne('WAAAGH!', '!HGAAAW') == 'WAAAGH!!HGAAAW'
# ....................{ TESTS ~ pass : param }....................
def test_beartype_pass_param_keyword_and_positional(self) -> None:
"""
Test bear typing of a function call successfully passed both annotated
positional and keyword parameters.
"""
# Function to be type checked.
@beartype
def slaanesh(daemonette: str, keeper_of_secrets: str) -> str:
return daemonette + keeper_of_secrets
# Call this function with both positional and keyword arguments and assert
# the expected return value.
assert slaanesh(
'Seeker of Decadence', keeper_of_secrets="N'Kari") == (
"Seeker of DecadenceN'Kari")
def test_beartype_pass_param_keyword_only(self) -> None:
"""
Test bear typing of a function call successfully passed an annotated
keyword-only parameter following an `*` or `*args` parameter.
"""
# Function to be type checked.
@beartype
def changer_of_ways(sky_shark: str, *, chaos_spawn: str) -> str:
return sky_shark + chaos_spawn
# Call this function with keyword arguments and assert the expected return
# value.
assert changer_of_ways(
'Screamers', chaos_spawn="Mith'an'driarkh") == (
"ScreamersMith'an'driarkh")
def test_beartype_pass_param_tuple(self) -> None:
"""
Test bear typing of a function call successfully passed a parameter
annotated as a tuple.
"""
# Function to be type checked.
@beartype
def genestealer(tyranid: str, hive_fleet: (str, int)) -> str:
return tyranid + str(hive_fleet)
# Call this function with each of the two types listed in the above tuple.
assert genestealer(
'Norn-Queen', hive_fleet='Behemoth') == 'Norn-QueenBehemoth'
assert genestealer(
'Carnifex', hive_fleet=0xDEADBEEF) == 'Carnifex3735928559'
def test_type_check_pass_param_custom(self) -> None:
"""
Test bear typing of a function call successfully passed a parameter
annotated as a user-defined rather than builtin type.
"""
# User-defined type.
class CustomTestStr(str):
pass
# Function to be type checked.
@beartype
def hrud(gugann: str, delphic_plague: CustomTestStr) -> str:
return gugann + delphic_plague
# Call this function with each of the two types listed in the above tuple.
assert hrud(
'Troglydium hruddi', delphic_plague=CustomTestStr('Delphic Sink')) == (
'Troglydium hruddiDelphic Sink')
def test_type_check_pass_typing_module(self) -> None:
"""
Test bear typing of a function call successfully passed a parameter
annotated with an abstract type from the typing module.
"""
MyMap = typing.Mapping
@beartype
def function(par: MyMap, ameter: MyMap) -> MyMap:
result = par.copy()
result.update(ameter)
return result
assert function({1:1}, {2:2}) == {1:1, 2:2}
def test_type_check_pass_parameterized_typing_module(self) -> None:
"""
Test bear typing of a function call successfully passed a parameter
annotated with a parametirized abstract type from the typing module.
"""
MyMap = typing.Mapping
@beartype
def function(par: MyMap, ameter: MyMap) -> MyMap:
result = par.copy()
result.update(ameter)
return result
assert function({1:1}, {2:2}) == {1:1, 2:2}
# ....................{ TESTS ~ pass : return }....................
def test_type_check_pass_return_none(self) -> None:
"""
Test bear typing of a function call successfully returning `None` and
annotated as such.
"""
# Function to be type checked.
@beartype
def xenos(interex: str, diasporex: str) -> None:
interex + diasporex
# Call this function and assert no value to be returned.
assert xenos(
'Luna Wolves', diasporex='Iron Hands Legion') is None
# ....................{ TESTS ~ fail }....................
def test_beartype_fail_keyword_unknown(self) -> None:
"""
Test bear typing of an annotated function call passed an unrecognized
keyword parameter.
"""
# Annotated function to be type checked.
@beartype
def tau(kroot: str, vespid: str) -> str:
return kroot + vespid
# Call this function with an unrecognized keyword parameter and assert the
# expected exception.
with pytest.raises(TypeError) as exception:
tau(kroot='Greater Good', nicassar='Dhow')
# For readability, this should be a "TypeError" synopsizing the exact issue
# raised by the Python interpreter on calling the original function rather
# than a "TypeError" failing to synopsize the exact issue raised by the
# wrapper type-checking the original function. Since the function
# annotations defined above guarantee that the exception message of the
# latter will be suffixed by "not a str", ensure this is *NOT* the case.
assert not str(exception.value).endswith('not a str')
def test_beartype_fail_param_name(self) -> None:
"""
Test bear typing of a function accepting a parameter name reserved for
use by the `@beartype` decorator.
"""
# Define a function accepting a reserved parameter name and assert the
# expected exception.
@beartype
@should_raise(NameError)
def jokaero(weaponsmith: str, __beartype_func: str) -> str:
return weaponsmith + __beartype_func
# ....................{ TESTS ~ fail : type }....................
def test_beartype_fail_param_type(self) -> None:
"""
Test bear typing of an annotated function call failing a parameter type
check.
"""
# Annotated function to be type checked.
@beartype
def eldar(isha: str, asuryan: (str, int)) -> str:
return isha + asuryan
# Call this function with an invalid type and assert the expected exception.
with pytest.raises(TypeError):
eldar('Mother of the Eldar', 100.100)
def test_beartype_fail_return_type(self) -> None:
"""
Test bear typing of an annotated function call failing a return type
check.
"""
# Annotated function to be type checked.
@beartype
def necron(star_god: str, old_one: str) -> str:
return 60e6
# Call this function and assert the expected exception.
with pytest.raises(TypeError):
necron("C'tan", 'Elder Thing')
# ....................{ TESTS ~ fail : annotation }....................
def test_beartype_fail_annotation_param(self) -> None:
"""
Test bear typing of a function with an unsupported parameter annotation.
"""
# Assert the expected exception from attempting to type check a function
# with a parameter annotation that is *NOT* a type.
with pytest.raises(TypeError):
@beartype
def nurgle(nurgling: str, great_unclean_one: 'Bringer of Poxes') -> str:
return nurgling + great_unclean_one
def test_beartype_fail_annotation_return(self) -> None:
"""
Test bear typing of a function with an unsupported return annotation.
"""
# Assert the expected exception from attempting to type check a function
# with a return annotation that is *NOT* a type.
with pytest.raises(TypeError):
@beartype
def tzeentch(disc: str, lord_of_change: str) -> 'Player of Games':
return disc + lord_of_change
| 36.235521 | 84 | 0.613213 | 8,548 | 0.910815 | 0 | 0 | 1,707 | 0.181886 | 0 | 0 | 5,052 | 0.538306 |
332899d38b421fab9695a008493c81d6890c2e39 | 2,114 | py | Python | var/spack/repos/builtin/packages/jemalloc/package.py | ilagunap/spack | 510f869c3ae8ac2721debd29e98076212ee75852 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2 | 2018-11-16T02:42:57.000Z | 2019-06-06T19:18:50.000Z | var/spack/repos/builtin/packages/jemalloc/package.py | ilagunap/spack | 510f869c3ae8ac2721debd29e98076212ee75852 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 18 | 2021-03-12T16:22:58.000Z | 2022-03-02T17:07:08.000Z | var/spack/repos/builtin/packages/jemalloc/package.py | ilagunap/spack | 510f869c3ae8ac2721debd29e98076212ee75852 | [
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null | # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class Jemalloc(AutotoolsPackage):
"""jemalloc is a general purpose malloc(3) implementation that emphasizes
fragmentation avoidance and scalable concurrency support."""
homepage = "http://jemalloc.net/"
url = "https://github.com/jemalloc/jemalloc/releases/download/4.0.4/jemalloc-4.0.4.tar.bz2"
version('5.2.1', sha256='34330e5ce276099e2e8950d9335db5a875689a4c6a56751ef3b1d8c537f887f6')
version('5.2.0', sha256='74be9f44a60d2a99398e706baa921e4efde82bf8fd16e5c0643c375c5851e3b4')
version('4.5.0', sha256='9409d85664b4f135b77518b0b118c549009dc10f6cba14557d170476611f6780')
version('4.4.0', sha256='a7aea63e9718d2f1adf81d87e3df3cb1b58deb86fc77bad5d702c4c59687b033')
version('4.3.1', sha256='f7bb183ad8056941791e0f075b802e8ff10bd6e2d904e682f87c8f6a510c278b')
version('4.2.1', sha256='5630650d5c1caab95d2f0898de4fe5ab8519dc680b04963b38bb425ef6a42d57')
version('4.2.0', sha256='b216ddaeb901697fe38bd30ea02d7505a4b60e8979092009f95cfda860d46acb')
version('4.1.0', sha256='fad06d714f72adb4265783bc169c6d98eeb032d57ba02d87d1dcb4a2d933ec8e')
version('4.0.4', sha256='3fda8d8d7fcd041aa0bebbecd45c46b28873cf37bd36c56bf44961b36d0f42d0')
variant('stats', default=False, description='Enable heap statistics')
variant('prof', default=False, description='Enable heap profiling')
variant(
'jemalloc_prefix', default='none',
description='Prefix to prepend to all public APIs',
values=None,
multi=False
)
def configure_args(self):
spec = self.spec
args = []
if '+stats' in spec:
args.append('--enable-stats')
if '+prof' in spec:
args.append('--enable-prof')
je_prefix = spec.variants['jemalloc_prefix'].value
if je_prefix != 'none':
args.append('--with-jemalloc-prefix={0}'.format(je_prefix))
return args
| 44.041667 | 100 | 0.726585 | 1,893 | 0.895459 | 0 | 0 | 0 | 0 | 0 | 0 | 1,316 | 0.622517 |
33299ffe9a3bcae73b7f087adb90f942699dff82 | 466 | py | Python | ABC/abc051-abc100/abc093/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 2 | 2020-06-12T09:54:23.000Z | 2021-05-04T01:34:07.000Z | ABC/abc051-abc100/abc093/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | 961 | 2020-06-23T07:26:22.000Z | 2022-03-31T21:34:52.000Z | ABC/abc051-abc100/abc093/b.py | KATO-Hiro/AtCoder | cbbdb18e95110b604728a54aed83a6ed6b993fde | [
"CC0-1.0"
] | null | null | null | '''input
4 8 3
4
5
6
7
8
3 8 2
3
4
7
8
2 9 100
2
3
4
5
6
7
8
9
'''
# -*- coding: utf-8 -*-
# AtCoder Beginner Contest
# Problem B
if __name__ == '__main__':
a, b, k = list(map(int, input().split()))
if (b - a + 1) <= 2 * k:
for i in range(a, b + 1):
print(i)
else:
for j in range(a, a + k):
print(j)
for j in range(b - k + 1, b + 1):
print(j)
| 10.590909 | 46 | 0.401288 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 168 | 0.360515 |
332b0f0138c11f0eed41ed393577248e868d822b | 5,104 | py | Python | examples/python/qiskit_integration.py | CQCL/pytket | 44fa95eb060afc8c45598f89afda993aa2d06634 | [
"Apache-2.0"
] | 249 | 2018-07-20T03:04:52.000Z | 2022-03-31T08:45:58.000Z | examples/python/qiskit_integration.py | CQCL/pytket | 44fa95eb060afc8c45598f89afda993aa2d06634 | [
"Apache-2.0"
] | 67 | 2018-08-03T09:38:15.000Z | 2022-03-22T09:39:45.000Z | examples/python/qiskit_integration.py | CQCL/pytket | 44fa95eb060afc8c45598f89afda993aa2d06634 | [
"Apache-2.0"
] | 69 | 2019-02-26T15:15:30.000Z | 2022-03-15T14:47:24.000Z | # # Integrating `pytket` into Qiskit software
# In this tutorial, we will focus on:
# - Using `pytket` for compilation or providing devices/simulators within Qiskit workflows;
# - Adapting Qiskit code to use `pytket` directly.
# This example assumes some familiarity with the Qiskit algorithms library. We have chosen a small variational quantum eigensolver (VQE) for our example, but the same principles apply to a wide range of quantum algorithms.
#
# To run this example, you will need `pytket-qiskit`, as well as the separate `qiskit-optimization` package. You will also need IBMQ credentials stored on your local machine.
#
# Qiskit has risen to prominence as the most popular platform for the development of quantum software, providing an open source, full-stack solution with a large feature list and extensive examples from the developers and community. For many researchers who have already invested in building a large codebase built on top of Qiskit, the idea of switching entirely to a new platform can look like a time-sink and may require reversion to take advantage of the new tools that get regularly added to Qiskit.
#
# The interoperability provided by `pytket-qiskit` allows Qiskit users to start taking advantage of some of the unique features of `pytket` without having to completely rewrite their software.
# Let's take as an example an ansatz for computing the ground-state energy of a hydrogen molecule.
from qiskit.opflow.primitive_ops import PauliSumOp
H2_op = PauliSumOp.from_list(
[
("II", -1.052373245772859),
("IZ", 0.39793742484318045),
("ZI", -0.39793742484318045),
("ZZ", -0.01128010425623538),
("XX", 0.18093119978423156),
]
)
# First let's use qiskit's NumPyEigensolver to compute the exact answer:
from qiskit.algorithms import NumPyEigensolver
es = NumPyEigensolver(k=1)
exact_result = es.compute_eigenvalues(H2_op).eigenvalues[0].real
print("Exact result:", exact_result)
# The following function will attempt to find an approximation to this using VQE, given a qiskit QuantumInstance on which to run circuits:
from qiskit.algorithms import VQE
from qiskit.algorithms.optimizers import SPSA
from qiskit.circuit.library import EfficientSU2
def vqe_solve(op, maxiter, quantum_instance):
optimizer = SPSA(maxiter=maxiter)
ansatz = EfficientSU2(op.num_qubits, entanglement="linear")
vqe = VQE(ansatz=ansatz, optimizer=optimizer, quantum_instance=quantum_instance)
return vqe.compute_minimum_eigenvalue(op).eigenvalue
# We will run this on a pytket `IBMQEmulatorBackend`. This is a noisy simulator whose characteristics match those of the real device, in this case "ibmq_belem" (a 5-qubit machine). The characteristics are retrieved from the device when the backend is constructed, so we must first load our IBMQ account. Circuits will be compiled to match the connectivity of the device and simulated using a basic noise model [constructed from the device parameters](https://qiskit.org/documentation/apidoc/aer_noise.html).
from pytket.extensions.qiskit import IBMQEmulatorBackend
from qiskit import IBMQ
IBMQ.load_account()
b_emu = IBMQEmulatorBackend("ibmq_belem", hub="ibm-q", group="open", project="main")
# Most qiskit algorithms require a qiskit `QuantumInstance` as input; this in turn is constructed from a `qiskit.providers.Backend`. The `TketBackend` class wraps a pytket backend as a `qiskit.providers.Backend`.
from pytket.extensions.qiskit.tket_backend import TketBackend
from qiskit.utils import QuantumInstance
qis_backend = TketBackend(b_emu)
qi = QuantumInstance(qis_backend, shots=8192, wait=0.1)
# Note that we could have used any other pytket shots backend instead of `b_emu` here. The `pytket` extension modules provide an interface to a wide variety of devices and simulators from different quantum software platforms.
#
# We can now run the VQE algorithm. In this example we use only 50 iterations, but greater accuracy may be achieved by increasing this number:
print("VQE result:", vqe_solve(H2_op, 50, qi))
# Another way to improve the accuracy of results is to apply optimisations to the circuit in an attempt to reduce the overall noise. When we construct our qiskit backend, we can pass in a pytket compilation pass as an additional parameter. There is a wide range of options here; we recommend the device-specific default compilation pass, provided by each tket backend. This pass will ensure that all the hardware constraints of the device are met. We can enable tket's most aggressive optimisation level by setting the parameter `optimisation_level=2`.
qis_backend2 = TketBackend(b_emu, b_emu.default_compilation_pass(optimisation_level=2))
qi2 = QuantumInstance(qis_backend2, shots=8192, wait=0.1)
# Let's run the optimisation again:
print("VQE result (with optimisation):", vqe_solve(H2_op, 50, qi2))
# These are small two-qubit circuits, so the improvement may be small, but with larger, more complex circuits, the reduction in noise from compilation will make a greater difference and allow VQE experiments to converge with fewer iterations.
| 61.493976 | 552 | 0.788009 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,662 | 0.717476 |
332bc827b9397befa3b3df403d9170657a773ac3 | 2,372 | py | Python | Cura/Cura/plugins/VersionUpgrade/VersionUpgrade34to35/__init__.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | Cura/Cura/plugins/VersionUpgrade/VersionUpgrade34to35/__init__.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | Cura/Cura/plugins/VersionUpgrade/VersionUpgrade34to35/__init__.py | TIAO-JI-FU/3d-printing-with-moveo-1 | 100ecfd1208fe1890f8bada946145d716b2298eb | [
"MIT"
] | null | null | null | # Copyright (c) 2018 Ultimaker B.V.
# Cura is released under the terms of the LGPLv3 or higher.
from typing import Any, Dict, TYPE_CHECKING
from . import VersionUpgrade34to35
if TYPE_CHECKING:
from UM.Application import Application
upgrade = VersionUpgrade34to35.VersionUpgrade34to35()
def getMetaData() -> Dict[str, Any]:
return {
"version_upgrade": {
# From To Upgrade function
("preferences", 6000004): ("preferences", 6000005, upgrade.upgradePreferences),
("definition_changes", 4000004): ("definition_changes", 4000005, upgrade.upgradeInstanceContainer),
("quality_changes", 4000004): ("quality_changes", 4000005, upgrade.upgradeInstanceContainer),
("quality", 4000004): ("quality", 4000005, upgrade.upgradeInstanceContainer),
("user", 4000004): ("user", 4000005, upgrade.upgradeInstanceContainer),
("machine_stack", 4000004): ("machine_stack", 4000005, upgrade.upgradeStack),
("extruder_train", 4000004): ("extruder_train", 4000005, upgrade.upgradeStack),
},
"sources": {
"preferences": {
"get_version": upgrade.getCfgVersion,
"location": {"."}
},
"machine_stack": {
"get_version": upgrade.getCfgVersion,
"location": {"./machine_instances"}
},
"extruder_train": {
"get_version": upgrade.getCfgVersion,
"location": {"./extruders"}
},
"definition_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./definition_changes"}
},
"quality_changes": {
"get_version": upgrade.getCfgVersion,
"location": {"./quality_changes"}
},
"quality": {
"get_version": upgrade.getCfgVersion,
"location": {"./quality"}
},
"user": {
"get_version": upgrade.getCfgVersion,
"location": {"./user"}
}
}
}
def register(app: "Application") -> Dict[str, Any]:
return { "version_upgrade": upgrade }
| 38.258065 | 111 | 0.52403 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 777 | 0.327572 |
332c81f4c480a3b1d74e9ed525d3b78c37420632 | 8,717 | py | Python | xos/synchronizer/pull_steps/test_pull_olts.py | iecedge/olt-service | 0aac847ca228f2c20a2b57c783a414f185a0116c | [
"Apache-2.0"
] | null | null | null | xos/synchronizer/pull_steps/test_pull_olts.py | iecedge/olt-service | 0aac847ca228f2c20a2b57c783a414f185a0116c | [
"Apache-2.0"
] | null | null | null | xos/synchronizer/pull_steps/test_pull_olts.py | iecedge/olt-service | 0aac847ca228f2c20a2b57c783a414f185a0116c | [
"Apache-2.0"
] | null | null | null | # Copyright 2017-present Open Networking Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from mock import patch, call, Mock, PropertyMock
import requests_mock
import os, sys
test_path=os.path.abspath(os.path.dirname(os.path.realpath(__file__)))
class TestSyncOLTDevice(unittest.TestCase):
def setUp(self):
global DeferredException
self.sys_path_save = sys.path
# Setting up the config module
from xosconfig import Config
config = os.path.join(test_path, "../test_config.yaml")
Config.clear()
Config.init(config, "synchronizer-config-schema.yaml")
# END Setting up the config module
from xossynchronizer.mock_modelaccessor_build import mock_modelaccessor_config
mock_modelaccessor_config(test_path, [("olt-service", "volt.xproto"),
("vsg", "vsg.xproto"),
("rcord", "rcord.xproto"),])
import xossynchronizer.modelaccessor
reload(xossynchronizer.modelaccessor) # in case nose2 loaded it in a previous test
from xossynchronizer.modelaccessor import model_accessor
self.model_accessor = model_accessor
from pull_olts import OLTDevicePullStep
# import all class names to globals
for (k, v) in model_accessor.all_model_classes.items():
globals()[k] = v
self.sync_step = OLTDevicePullStep
# mock volt service
self.volt_service = Mock()
self.volt_service.id = "volt_service_id"
self.volt_service.voltha_url = "voltha_url"
self.volt_service.voltha_user = "voltha_user"
self.volt_service.voltha_pass = "voltha_pass"
self.volt_service.voltha_port = 1234
# mock voltha responses
self.devices = {
"items": [
{
"id": "test_id",
"type": "simulated_olt",
"host_and_port": "172.17.0.1:50060",
"admin_state": "ENABLED",
"oper_status": "ACTIVE",
"serial_number": "serial_number",
}
]
}
self.logical_devices = {
"items": [
{
"root_device_id": "test_id",
"id": "of_id",
"datapath_id": "55334486016"
}
]
}
self.ports = {
"items": [
{
"label": "PON port",
"port_no": 1,
"type": "PON_OLT",
"admin_state": "ENABLED",
"oper_status": "ACTIVE"
},
{
"label": "NNI facing Ethernet port",
"port_no": 2,
"type": "ETHERNET_NNI",
"admin_state": "ENABLED",
"oper_status": "ACTIVE"
}
]
}
def tearDown(self):
sys.path = self.sys_path_save
@requests_mock.Mocker()
def test_missing_volt_service(self, m):
self.assertFalse(m.called)
@requests_mock.Mocker()
def test_pull(self, m):
with patch.object(VOLTService.objects, "all") as olt_service_mock, \
patch.object(OLTDevice, "save") as mock_olt_save, \
patch.object(PONPort, "save") as mock_pon_save, \
patch.object(NNIPort, "save") as mock_nni_save:
olt_service_mock.return_value = [self.volt_service]
m.get("http://voltha_url:1234/api/v1/devices", status_code=200, json=self.devices)
m.get("http://voltha_url:1234/api/v1/devices/test_id/ports", status_code=200, json=self.ports)
m.get("http://voltha_url:1234/api/v1/logical_devices", status_code=200, json=self.logical_devices)
self.sync_step(model_accessor=self.model_accessor).pull_records()
# TODO how to asster this?
# self.assertEqual(existing_olt.admin_state, "ENABLED")
# self.assertEqual(existing_olt.oper_status, "ACTIVE")
# self.assertEqual(existing_olt.volt_service_id, "volt_service_id")
# self.assertEqual(existing_olt.device_id, "test_id")
# self.assertEqual(existing_olt.of_id, "of_id")
# self.assertEqual(existing_olt.dp_id, "of:0000000ce2314000")
mock_olt_save.assert_called()
mock_pon_save.assert_called()
mock_nni_save.assert_called()
@requests_mock.Mocker()
def test_pull_existing(self, m):
existing_olt = Mock()
existing_olt.admin_state = "ENABLED"
existing_olt.enacted = 2
existing_olt.updated = 1
with patch.object(VOLTService.objects, "all") as olt_service_mock, \
patch.object(OLTDevice.objects, "filter") as mock_get, \
patch.object(PONPort, "save") as mock_pon_save, \
patch.object(NNIPort, "save") as mock_nni_save, \
patch.object(existing_olt, "save") as mock_olt_save:
olt_service_mock.return_value = [self.volt_service]
mock_get.return_value = [existing_olt]
m.get("http://voltha_url:1234/api/v1/devices", status_code=200, json=self.devices)
m.get("http://voltha_url:1234/api/v1/devices/test_id/ports", status_code=200, json=self.ports)
m.get("http://voltha_url:1234/api/v1/logical_devices", status_code=200, json=self.logical_devices)
self.sync_step(model_accessor=self.model_accessor).pull_records()
self.assertEqual(existing_olt.admin_state, "ENABLED")
self.assertEqual(existing_olt.oper_status, "ACTIVE")
self.assertEqual(existing_olt.volt_service_id, "volt_service_id")
self.assertEqual(existing_olt.device_id, "test_id")
self.assertEqual(existing_olt.of_id, "of_id")
self.assertEqual(existing_olt.dp_id, "of:0000000ce2314000")
mock_olt_save.assert_called()
mock_pon_save.assert_called()
mock_nni_save.assert_called()
@requests_mock.Mocker()
def test_pull_existing_do_not_sync(self, m):
existing_olt = Mock()
existing_olt.enacted = 1
existing_olt.updated = 2
existing_olt.device_id = "test_id"
with patch.object(VOLTService.objects, "all") as olt_service_mock, \
patch.object(OLTDevice.objects, "filter") as mock_get, \
patch.object(PONPort, "save") as mock_pon_save, \
patch.object(NNIPort, "save") as mock_nni_save, \
patch.object(existing_olt, "save") as mock_olt_save:
olt_service_mock.return_value = [self.volt_service]
mock_get.return_value = [existing_olt]
m.get("http://voltha_url:1234/api/v1/devices", status_code=200, json=self.devices)
m.get("http://voltha_url:1234/api/v1/devices/test_id/ports", status_code=200, json=self.ports)
m.get("http://voltha_url:1234/api/v1/logical_devices", status_code=200, json=self.logical_devices)
self.sync_step(model_accessor=self.model_accessor).pull_records()
mock_olt_save.assert_not_called()
mock_pon_save.assert_called()
mock_nni_save.assert_called()
@requests_mock.Mocker()
def test_pull_deleted_object(self, m):
existing_olt = Mock()
existing_olt.enacted = 2
existing_olt.updated = 1
existing_olt.device_id = "test_id"
m.get("http://voltha_url:1234/api/v1/devices", status_code=200, json={"items": []})
with patch.object(VOLTService.objects, "all") as olt_service_mock, \
patch.object(OLTDevice.objects, "get_items") as mock_get, \
patch.object(existing_olt, "delete") as mock_olt_delete:
olt_service_mock.return_value = [self.volt_service]
mock_get.return_value = [existing_olt]
self.sync_step(model_accessor=self.model_accessor).pull_records()
mock_olt_delete.assert_called()
if __name__ == "__main__":
unittest.main() | 39.265766 | 110 | 0.609958 | 7,898 | 0.906046 | 0 | 0 | 5,034 | 0.577492 | 0 | 0 | 2,394 | 0.274636 |
332ca6164e5d6ccaf3d86265c927173981639004 | 4,005 | py | Python | src/embedding/triple2vec.py | MengtingWan/grocery | d9401418915a481dcd4be1f0be2cad238e8cc00e | [
"Apache-2.0"
] | 46 | 2019-01-24T19:48:19.000Z | 2022-03-22T22:16:55.000Z | src/embedding/triple2vec.py | MengtingWan/grocery | d9401418915a481dcd4be1f0be2cad238e8cc00e | [
"Apache-2.0"
] | 2 | 2019-11-05T19:55:57.000Z | 2021-04-01T12:15:13.000Z | src/embedding/triple2vec.py | MengtingWan/grocery | d9401418915a481dcd4be1f0be2cad238e8cc00e | [
"Apache-2.0"
] | 17 | 2019-03-30T02:45:59.000Z | 2021-12-30T00:56:02.000Z | import tensorflow as tf
from embedding.learner import Model
from embedding.sampler import Sampler
import sys
class triple2vec(Model):
def __init__(self, DATA_NAME, HIDDEN_DIM, LEARNING_RATE, BATCH_SIZE, N_NEG, MAX_EPOCH=500, N_SAMPLE_PER_EPOCH=None):
super().__init__('triple2vec', DATA_NAME, HIDDEN_DIM, LEARNING_RATE, BATCH_SIZE, N_NEG, MAX_EPOCH, N_SAMPLE_PER_EPOCH)
def assign(self, dataTrain, n_user, n_item, N_SAMPLE, dump=True):
mySampler = Sampler(dataTrain, self.DATA_NAME)
trainSamples = mySampler.sample_triples(N_SAMPLE, dump=dump)
super().assign_data(trainSamples, n_user, n_item)
def assign_from_file(self, n_user, n_item):
mySampler = Sampler(None, self.DATA_NAME)
trainSamples = mySampler.load_triples_from_file()
super().assign_data(trainSamples, n_user, n_item)
def model_constructor(self, opt='sgd'):
n_user = self.n_user
n_item = self.n_item
HIDDEN_DIM = self.HIDDEN_DIM
LEARNING_RATE = self.LEARNING_RATE
N_NEG = self.N_NEG
u = tf.placeholder(tf.int32, [None])
i = tf.placeholder(tf.int32, [None])
j = tf.placeholder(tf.int32, [None])
user_emb = tf.get_variable("user_emb", [n_user, HIDDEN_DIM],
initializer=tf.random_uniform_initializer(-0.01, 0.01))
item_emb1 = tf.get_variable("item_emb1", [n_item, HIDDEN_DIM],
initializer=tf.random_uniform_initializer(-0.01, 0.01))
item_emb2 = tf.get_variable("item_emb2", [n_item, HIDDEN_DIM],
initializer=tf.random_uniform_initializer(-0.01, 0.01))
b_item = tf.get_variable("item_bias", [n_item, 1],
initializer=tf.constant_initializer(0))
b_user = tf.get_variable("user_bias", [n_user, 1],
initializer=tf.constant_initializer(0))
i_emb = tf.nn.embedding_lookup(item_emb1, i)
j_emb = tf.nn.embedding_lookup(item_emb2, j)
u_emb = tf.nn.embedding_lookup(user_emb, u)
input_emb_i = j_emb + u_emb
loss_i = tf.reduce_mean(tf.nn.nce_loss(weights=item_emb1, biases=b_item[:,0],
labels=tf.reshape(i, (tf.shape(i)[0], 1)), inputs=input_emb_i,
num_sampled=N_NEG, num_classes=n_item))
input_emb_j = i_emb + u_emb
loss_j = tf.reduce_mean(tf.nn.nce_loss(weights=item_emb2, biases=b_item[:,0],
labels=tf.reshape(j, (tf.shape(j)[0], 1)), inputs=input_emb_j,
num_sampled=N_NEG, num_classes=n_item))
input_emb_u = i_emb + j_emb
loss_u = tf.reduce_mean(tf.nn.nce_loss(weights=user_emb, biases=b_user[:,0],
labels=tf.reshape(u, (tf.shape(u)[0], 1)), inputs=input_emb_u,
num_sampled=N_NEG, num_classes=n_user))
trainloss = tf.reduce_mean([loss_i, loss_j, loss_u])
if opt == 'sgd':
myOpt = tf.train.GradientDescentOptimizer(LEARNING_RATE)
elif opt == 'adaGrad':
myOpt = tf.train.AdagradOptimizer(LEARNING_RATE)
elif opt == 'adam':
myOpt = tf.train.AdamOptimizer(LEARNING_RATE)
elif opt == 'lazyAdam':
myOpt = tf.contrib.opt.LazyAdamOptimizer(LEARNING_RATE)
elif opt == 'momentum':
myOpt = tf.train.MomentumOptimizer(LEARNING_RATE, 0.9)
else:
print('optimizer is not recognized, use SGD instead.')
sys.stdout.flush()
myOpt = tf.train.GradientDescentOptimizer(LEARNING_RATE)
optimizer = myOpt.minimize(trainloss)
paramDict = {'item_emb1': item_emb1, 'item_emb2': item_emb2, 'user_emb': user_emb, 'item_bias': b_item, 'user_bias': b_user}
return [u, i, j], [trainloss], [optimizer], paramDict
| 48.253012 | 132 | 0.604494 | 3,887 | 0.970537 | 0 | 0 | 0 | 0 | 0 | 0 | 212 | 0.052934 |
332ec3ad83ab42693d9db460bc909a8573da26d4 | 1,210 | py | Python | src/model/synapses/numba_backend/VoltageJump.py | Fassial/pku-intern | 4463e7d5a5844c8002f7e3d01b4fadc3a20e2038 | [
"MIT"
] | null | null | null | src/model/synapses/numba_backend/VoltageJump.py | Fassial/pku-intern | 4463e7d5a5844c8002f7e3d01b4fadc3a20e2038 | [
"MIT"
] | null | null | null | src/model/synapses/numba_backend/VoltageJump.py | Fassial/pku-intern | 4463e7d5a5844c8002f7e3d01b4fadc3a20e2038 | [
"MIT"
] | null | null | null | """
Created on 12:39, June. 4th, 2021
Author: fassial
Filename: VoltageJump.py
"""
import brainpy as bp
__all__ = [
"VoltageJump",
]
class VoltageJump(bp.TwoEndConn):
target_backend = ['numpy', 'numba', 'numba-parallel', 'numba-cuda']
def __init__(self, pre, post, conn,
weight = 1., delay = 0., **kwargs
):
# init params
self.weight = weight
self.delay = delay
# init connections
self.conn = conn(pre.size, post.size)
self.pre_ids, self.post_ids = self.conn.requires("pre_ids", "post_ids")
self.size = len(self.pre_ids)
# init vars
self.Isyn = self.register_constant_delay("Isyn",
size = self.size,
delay_time = self.delay
)
# init super
super(VoltageJump, self).__init__(pre = pre, post = post, **kwargs)
def update(self, _t):
# set post.V
for i in range(self.size):
pre_id, post_id = self.pre_ids[i], self.post_ids[i]
self.Isyn.push(i,
self.pre.spike[pre_id] * self.weight
)
if not self.post.refractory[post_id]:
self.post.V[post_id] += self.Isyn.pull(i)
| 26.304348 | 79 | 0.566116 | 1,069 | 0.883471 | 0 | 0 | 0 | 0 | 0 | 0 | 228 | 0.18843 |
3330488f0dd33b218d18cd27c32030cf62e1cdf9 | 9,041 | py | Python | bns.py | amansrivastava17/bns-short-text-similarity | fde1761bf24cc7e90eb53bce4419ccf882d11817 | [
"MIT"
] | 22 | 2018-06-14T13:50:12.000Z | 2022-01-04T16:52:01.000Z | bns.py | amansrivastava17/bns-vectorizer-text-classification | fde1761bf24cc7e90eb53bce4419ccf882d11817 | [
"MIT"
] | null | null | null | bns.py | amansrivastava17/bns-vectorizer-text-classification | fde1761bf24cc7e90eb53bce4419ccf882d11817 | [
"MIT"
] | 4 | 2018-08-31T05:36:40.000Z | 2020-10-10T08:57:14.000Z | # coding=utf-8
from collections import Counter, namedtuple
from scipy.sparse import csr_matrix
from scipy.stats import norm
import numpy as np
import math
from nlp_utils import ngrams
class BNS:
"""Bi-normal Separation is a popular method to score textual data importance against its
belonging category, it can efficiently find out important keywords in a document and assign
a weighted positive score, also provide negative scoring for unimportant word for a document.
Below are the description of variables used to calculate Bi-normal separation score for a
word for each category (or classes).
Features Descriptions:
======================
pos = number of positive training cases, typically minority,
neg = number of negative training cases,
tp = number of positive training cases containing word,
fp = number of negative training cases containing word,
fn = pos - tp,
tn = neg - fp,
true positive rate(tpr) = P(word | positive class) = tp/pos
false positive rate (fpr) = P(word | negative class) = fp/neg,
Bi-Normal Separation (BNS): = F-1(tpr) – F-1(fpr)
(F-1) is the inverse Normal cumulative distribution function
"""
def __init__(self, ngram_range=None):
self.categories = []
self.bound_min_score = 0.0005
self.bound_max_score = 1 - 0.0005
self.bns_scores = {}
self.vectors = {}
self.sentences_category_map = {}
if ngram_range is None:
self.ngram_range = [1, 2]
def bound(self, value):
"""
Bound the bns score under `bound_min_score` and `bound_max_score`
Args:
value (float): bnr score
Returns:
(float): bounded bnr score within min max limit
"""
return max(self.bound_min_score, min(self.bound_max_score, value))
@staticmethod
def calculate_bns_score(tpr, fpr):
"""
Calculate bns score for given `tpr` and `fpr` value
Args:
tpr (float): true positive rate
fpr (float): false positive rate
Returns:
(float) : bns score
"""
return norm.ppf(tpr) - norm.ppf(fpr)
def get_bns_score(self, word, category):
"""
Returns bns score for given `word` belongs to `category`
Args:
word (str): word whose bns score to be determined
category (str): category or class in which word bns score has to be find
Returns:
score (float): bns score
"""
score = None
if word in self.bns_scores:
if category in self.bns_scores[word]:
score = self.bns_scores[word][category]
return score
@staticmethod
def get_word_list(documents):
"""
Given list of sentences
Args:
documents (list): list of documents
Returns:
words (set): set of unique of words in documents
"""
words = []
for doc in documents:
words.extend(doc.split())
return set(words)
@staticmethod
def get_word_count_in_category(documents, categories):
"""
Create dict containing count of word for every category from document.
Examples:
documents - ['book cab', 'book me a taxi', 'book flight to mumbai']
categories - ['book_cab', 'book_cab', 'book_flight']
word_dict => {'book': {'book_cab': 2 , book_flight: 1}, 'cab': {'book_cab': 1},
'me': {'book_cab': 1}, 'a': {'book_cab':1 }, 'flight': {'book_flight': 1},
'to': {book_flight: 1}, 'mumbai': {'book_flight': 1}}
Args:
documents (list): list of documents
categories (list): list of category for doc in documents
Returns:
word_dict (dict): dict of word and their count in respective categories
"""
word_dict = {}
for sent, cat in zip(documents, categories):
words = sent.split()
for word in words:
if word not in word_dict:
word_dict[word] = {cat: 1}
else:
if cat not in word_dict[word]:
word_dict[word][cat] = 1
else:
word_dict[word][cat] += 1
return word_dict
def create_bns_score(self, documents, categories, word_category_count_dict):
"""
Create a dict of words and their respective bns score for each categories
Args:
documents (list): list of documents
categories (list): list category doc in documents
word_category_count_dict (dict): dict containing word and their respective count in categories
Returns:
None
"""
self.categories = list(set(categories))
total_categories = len(categories)
word_list = self.get_word_list(documents)
for sent, cat in zip(documents, categories):
if cat not in self.sentences_category_map:
self.sentences_category_map[cat] = [sent]
else:
self.sentences_category_map[cat].append(sent)
for index, word in enumerate(word_list):
for category in self.categories:
positive_sent = len(self.sentences_category_map[category])
negative_sent = total_categories - positive_sent
word_dict = word_category_count_dict[word]
total_word_occurrence = sum(word_dict.values())
if category in word_dict:
tp = word_dict[category]
else:
tp = 0
fp = total_word_occurrence - tp
tpr = self.bound(tp / float(positive_sent))
fpr = self.bound(fp / float(negative_sent))
bns_score = self.calculate_bns_score(tpr, fpr)
if not self.bns_scores.get(word, None):
self.bns_scores[word] = {'index': index, category: bns_score}
else:
if not self.bns_scores.get(word, {}).get(category, None):
self.bns_scores[word][category] = bns_score
def fit(self, training_documents, categories):
"""
Fit the documents and categories to create bns vectors for documents
Args:
training_documents (list): list of documents
categories (list): list category doc in documents
Returns:
None
"""
word_category_count_dict = self.get_word_count_in_category(training_documents, categories)
self.create_bns_score(training_documents, categories, word_category_count_dict)
for category in self.sentences_category_map.keys():
scores, indexes, counter = [], [], []
for count, sentence in enumerate(self.sentences_category_map[category]):
tokens = []
for n in self.ngram_range:
tokens.extend(ngrams(sentence, n))
tokens_dict = dict(Counter(tokens))
for token, token_count in tokens_dict.iteritems():
token_meta_data = self.bns_scores.get(token, None)
if token_meta_data:
if category in token_meta_data:
scores.append(token_count * token_meta_data[category])
indexes.append(token_meta_data['index'])
counter.append(count)
self.vectors[category] = csr_matrix((scores, (counter, indexes)),
shape=(len(counter), len(self.bns_scores)))
def transform(self, test_documents):
"""
Return bns vectors for test documents
Args:
test_documents (list): list of documents to convert them to bns vectorizer
Returns:
test_vector (list): list of bns vectors for each doc in `test_documents`
"""
test_vector = {}
for category in self.categories:
scores, indexes, counter = [], [], []
for count, sentence in enumerate(test_documents):
tokens = []
for n in self.ngram_range:
tokens.extend(ngrams(sentence, n))
tokens_dict = dict(Counter(tokens))
for token, token_count in tokens_dict.iteritems():
token_meta_data = self.bns_scores.get(token, None)
if token_meta_data:
if category in token_meta_data:
scores.append(token_count * token_meta_data[category])
indexes.append(token_meta_data['index'])
counter.append(count)
test_vector[category] = csr_matrix((scores, (counter, indexes)), shape=(len(counter), len(self.bns_scores)))
return test_vector
| 41.095455 | 120 | 0.574162 | 8,854 | 0.9791 | 0 | 0 | 2,011 | 0.222382 | 0 | 0 | 3,649 | 0.403517 |
33310d445d44da6d57d70c0d985520626d8989bd | 1,473 | py | Python | scrape.py | ilemhadri/fb_messenger | c5da22ec40e0caa4c11236016226e258bf181c64 | [
"MIT"
] | 11 | 2018-11-18T18:16:13.000Z | 2022-02-07T14:14:33.000Z | scrape.py | ilemhadri/fb_messenger | c5da22ec40e0caa4c11236016226e258bf181c64 | [
"MIT"
] | 1 | 2021-01-16T16:54:14.000Z | 2021-01-17T09:28:34.000Z | scrape.py | ilemhadri/fb_messenger | c5da22ec40e0caa4c11236016226e258bf181c64 | [
"MIT"
] | 10 | 2019-02-28T18:01:51.000Z | 2022-03-24T16:43:57.000Z | import os
import sys
from collections import defaultdict
import datetime
import pickle
import re
import time
import json
from selenium import webdriver
def main():
driver = webdriver.Chrome() # Optional argument, if not specified will search path.
#load login cookie
driver.get('https://www.messenger.com')
cookies=pickle.load(open('data/logincookies.pkl','rb'))
for c in cookies:
driver.add_cookie(c)
driver.get('https://www.messenger.com')
#get page source
source=(driver.page_source).encode('utf8','replace')
#get last active times and add them to database
v=re.compile("lastActiveTimes\":(.*),\"chatNotif")
lolo=json.loads(v.findall(source)[0])
d=defaultdict(lambda:[0],json.load(open("data/lastActiveTimes.json",'r')))
for k in lolo:
if lolo[k]!=d[k][-1]:
d[k].append(lolo[k])
json.dump(d,open("data/lastActiveTimes.json",'w'))
#maintain up to date database of friends profiles
v=re.compile("shortProfiles\":(.*),\"nearby")
lala=json.loads(v.findall(source)[0])
d=json.load(open('data/shortProfiles.json','r'))
d.update(lala)
json.dump(d,open('data/shortProfiles.json','w'))
driver.quit()
if not os.path.exists('data/logincookies.pkl'):
print ('missing cookie. Have you run init.py?')
sys.exit()
while True:
main()
with open('data/lastScrapeTime.txt','a') as f:
f.write(str(datetime.datetime.now())+'\n')
time.sleep(600)
| 30.061224 | 87 | 0.663951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 559 | 0.379498 |
33319c7108f2f54f95e8dca0a5199386d7358140 | 2,760 | py | Python | cfgs/config.py | Pandinosaurus/yolo2-pytorch | f046769ea157e6d57579c67dcef62fdd3b71111e | [
"MIT"
] | 1,663 | 2017-02-25T02:09:58.000Z | 2022-03-31T07:18:30.000Z | cfgs/config.py | Guroto/yolo2-pytorch | 17056ca69f097a07884135d9031c53d4ef217a6a | [
"MIT"
] | 115 | 2017-03-04T12:26:49.000Z | 2022-01-06T04:02:44.000Z | cfgs/config.py | Guroto/yolo2-pytorch | 17056ca69f097a07884135d9031c53d4ef217a6a | [
"MIT"
] | 492 | 2017-02-28T20:09:14.000Z | 2022-03-18T21:25:10.000Z | import os
from .config_voc import * # noqa
from .exps.darknet19_exp1 import * # noqa
def mkdir(path, max_depth=3):
parent, child = os.path.split(path)
if not os.path.exists(parent) and max_depth > 1:
mkdir(parent, max_depth-1)
if not os.path.exists(path):
os.mkdir(path)
# input and output size
############################
multi_scale_inp_size = [np.array([320, 320], dtype=np.int),
np.array([352, 352], dtype=np.int),
np.array([384, 384], dtype=np.int),
np.array([416, 416], dtype=np.int),
np.array([448, 448], dtype=np.int),
np.array([480, 480], dtype=np.int),
np.array([512, 512], dtype=np.int),
np.array([544, 544], dtype=np.int),
np.array([576, 576], dtype=np.int),
# np.array([608, 608], dtype=np.int),
] # w, h
multi_scale_out_size = [multi_scale_inp_size[0] / 32,
multi_scale_inp_size[1] / 32,
multi_scale_inp_size[2] / 32,
multi_scale_inp_size[3] / 32,
multi_scale_inp_size[4] / 32,
multi_scale_inp_size[5] / 32,
multi_scale_inp_size[6] / 32,
multi_scale_inp_size[7] / 32,
multi_scale_inp_size[8] / 32,
# multi_scale_inp_size[9] / 32,
] # w, h
inp_size = np.array([416, 416], dtype=np.int) # w, h
out_size = inp_size / 32
# for display
############################
def _to_color(indx, base):
""" return (b, r, g) tuple"""
base2 = base * base
b = 2 - indx / base2
r = 2 - (indx % base2) / base
g = 2 - (indx % base2) % base
return b * 127, r * 127, g * 127
base = int(np.ceil(pow(num_classes, 1. / 3)))
colors = [_to_color(x, base) for x in range(num_classes)]
# detection config
############################
thresh = 0.3
# dir config
############################
ROOT_DIR = os.path.abspath(os.path.join(os.path.dirname(__file__), '..'))
DATA_DIR = os.path.join(ROOT_DIR, 'data')
MODEL_DIR = os.path.join(ROOT_DIR, 'models')
TRAIN_DIR = os.path.join(MODEL_DIR, 'training')
TEST_DIR = os.path.join(MODEL_DIR, 'testing')
trained_model = os.path.join(MODEL_DIR, h5_fname)
pretrained_model = os.path.join(MODEL_DIR, pretrained_fname)
train_output_dir = os.path.join(TRAIN_DIR, exp_name)
test_output_dir = os.path.join(TEST_DIR, imdb_test, h5_fname)
mkdir(train_output_dir, max_depth=3)
mkdir(test_output_dir, max_depth=4)
rand_seed = 1024
use_tensorboard = True
log_interval = 50
disp_interval = 10
| 33.253012 | 73 | 0.537681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 342 | 0.123913 |
3334965719b021bbd03119042e95c8563a0cdb7e | 9,233 | py | Python | tests.py | jpchiodini/Grasp-Planning | e31234244b8f934743605ebea59d9d98a258957e | [
"MIT"
] | null | null | null | tests.py | jpchiodini/Grasp-Planning | e31234244b8f934743605ebea59d9d98a258957e | [
"MIT"
] | null | null | null | tests.py | jpchiodini/Grasp-Planning | e31234244b8f934743605ebea59d9d98a258957e | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tests.py
========
Created by: hbldh <henrik.blidh@nedomkull.com>
Created on: 2016-02-07, 23:50
"""
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from __future__ import absolute_import
import numpy as np
import pyefd
lbl_1 = 5
img_1 = np.array(
[[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 191, 64, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 64, 0, 0, 0, 0, 64, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 0, 0, 0, 0, 64, 127, 64, 64, 0, 0, 64, 191, 255, 255, 255,
255],
[255, 255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 127, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 64, 0, 0, 127, 255, 255, 191, 64, 0, 0, 0, 0, 0, 64, 127, 127, 255, 255, 255,
255, 255],
[255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 191, 0, 0, 0, 64, 127, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 64, 0, 0, 0, 0, 0, 64, 191, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 64, 0, 0, 0, 0, 64, 191, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 191, 127, 0, 0, 0, 0, 127, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 191, 127, 0, 0, 0, 64, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 191, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 127, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 127, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 191, 255, 255, 255, 255, 127, 0, 0, 0, 191, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 127, 255, 255, 191, 64, 0, 0, 0, 191, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 191, 255, 255, 255, 255, 255, 255, 255,
255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 0, 0, 0, 0, 64, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 127, 0, 0, 0, 64, 191, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255],
[255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255,
255, 255, 255, 255, 255]])
contour_1 = np.array([[24.0, 13.0125], [23.0125, 14.0], [23.004188481675392, 15.0], [23.0, 15.0125], [22.0125, 16.0],
[22.00313725490196, 17.0], [22.0, 17.004188481675392], [21.0, 17.004188481675392],
[20.004188481675392, 18.0], [20.0, 18.004188481675392], [19.0, 18.006299212598424],
[18.0, 18.006299212598424], [17.0, 18.004188481675392], [16.9875, 18.0], [16.0, 17.0125],
[15.993700787401576, 17.0], [15.0, 16.006299212598424], [14.995811518324608, 16.0],
[14.9875, 15.0], [14.0, 14.0125], [13.995811518324608, 14.0], [13.9875, 13.0], [13.0, 12.0125],
[12.996862745098039, 12.0], [12.993700787401576, 11.0], [12.9875, 10.0], [12.0, 9.0125],
[11.0, 9.003137254901961], [10.0, 9.006299212598424], [9.006299212598424, 10.0],
[9.003137254901961, 11.0], [9.003137254901961, 12.0], [9.004188481675392, 13.0], [9.0125, 14.0],
[10.0, 14.9875], [10.003137254901961, 15.0], [10.003137254901961, 16.0],
[10.003137254901961, 17.0], [10.003137254901961, 18.0], [10.003137254901961, 19.0],
[10.0, 19.0125], [9.0125, 20.0], [9.006299212598424, 21.0], [9.006299212598424, 22.0],
[9.0, 22.006299212598424], [8.9875, 22.0], [8.0, 21.0125], [7.996862745098039, 21.0],
[7.996862745098039, 20.0], [8.0, 19.9875], [8.9875, 19.0], [8.9875, 18.0],
[8.993700787401576, 17.0], [8.9875, 16.0], [8.0, 15.0125], [7.996862745098039, 15.0],
[7.9875, 14.0], [7.0, 13.0125], [6.993700787401575, 13.0], [6.0, 12.006299212598424],
[5.993700787401575, 12.0], [5.9875, 11.0], [5.995811518324607, 10.0], [6.0, 9.996862745098039],
[7.0, 9.9875], [7.9875, 9.0], [8.0, 8.995811518324608], [8.995811518324608, 8.0],
[9.0, 7.995811518324607], [10.0, 7.9875], [10.9875, 7.0], [11.0, 6.995811518324607],
[12.0, 6.995811518324607], [12.0125, 7.0], [13.0, 7.9875], [13.003137254901961, 8.0],
[13.006299212598424, 9.0], [13.0125, 10.0], [14.0, 10.9875], [14.004188481675392, 11.0],
[14.006299212598424, 12.0], [15.0, 12.993700787401576], [15.004188481675392, 13.0],
[15.006299212598424, 14.0], [16.0, 14.993700787401576], [16.00313725490196, 15.0],
[17.0, 15.996862745098039], [17.006299212598424, 16.0], [18.0, 16.993700787401576],
[19.0, 16.993700787401576], [19.993700787401576, 16.0], [20.0, 15.993700787401576],
[20.993700787401576, 15.0], [21.0, 14.9875], [21.9875, 14.0], [21.995811518324608, 13.0],
[21.99686274509804, 12.0], [21.99686274509804, 11.0], [21.993700787401576, 10.0],
[21.0, 9.006299212598424], [20.993700787401576, 9.0], [21.0, 8.993700787401576],
[22.0, 8.996862745098039], [22.006299212598424, 9.0], [23.0, 9.993700787401576],
[23.006299212598424, 10.0], [24.0, 10.993700787401576], [24.00313725490196, 11.0],
[24.00313725490196, 12.0], [24.00313725490196, 13.0], [24.0, 13.0125]])
def test_efd_shape_1():
coeffs = pyefd.elliptic_fourier_descriptors(contour_1, order=10)
assert coeffs.shape == (10, 4)
def test_efd_shape_2():
c = pyefd.elliptic_fourier_descriptors(contour_1, order=40)
assert c.shape == (40, 4)
def test_normalizing_1():
c = pyefd.elliptic_fourier_descriptors(contour_1, normalize=False)
assert np.abs(c[0, 0]) > 0.0
assert np.abs(c[0, 1]) > 0.0
assert np.abs(c[0, 2]) > 0.0
def test_normalizing_2():
c = pyefd.elliptic_fourier_descriptors(contour_1, normalize=True)
np.testing.assert_almost_equal(c[0, 0], 1.0, decimal=14)
np.testing.assert_almost_equal(c[0, 1], 0.0, decimal=14)
np.testing.assert_almost_equal(c[0, 2], 0.0, decimal=14)
def test_locus():
locus = pyefd.calculate_dc_coefficients(contour_1)
np.testing.assert_array_almost_equal(locus, np.mean(contour_1, axis=0), decimal=0)
def test_fit_1():
n = 300
locus = pyefd.calculate_dc_coefficients(contour_1)
coeffs = pyefd.elliptic_fourier_descriptors(contour_1, order=20)
t = np.linspace(0, 1.0, n)
xt = np.ones((n,)) * locus[0]
yt = np.ones((n,)) * locus[1]
for n in pyefd._range(coeffs.shape[0]):
xt += (coeffs[n, 0] * np.cos(2 * (n + 1) * np.pi * t)) + \
(coeffs[n, 1] * np.sin(2 * (n + 1) * np.pi * t))
yt += (coeffs[n, 2] * np.cos(2 * (n + 1) * np.pi * t)) + \
(coeffs[n, 3] * np.sin(2 * (n + 1) * np.pi * t))
assert True
| 58.069182 | 120 | 0.552475 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 148 | 0.016029 |
3334b81c461f0f87292473253147e7b45b17a48f | 1,705 | py | Python | scripts/wsi_bot_show_regions.py | higex/qpath | 0377f2fdadad6e02ecde8ba2557fe9b957280fa1 | [
"MIT"
] | 6 | 2017-03-18T19:17:42.000Z | 2019-05-05T14:57:31.000Z | WSItk/tools/wsi_bot_show_regions.py | vladpopovici/WSItk | 02db9dbf1148106a576d7b4dd7965c73607efdae | [
"MIT"
] | null | null | null | WSItk/tools/wsi_bot_show_regions.py | vladpopovici/WSItk | 02db9dbf1148106a576d7b4dd7965c73607efdae | [
"MIT"
] | 4 | 2015-11-29T14:47:25.000Z | 2019-11-28T03:16:39.000Z | # -*- coding: utf-8 -*-
"""
SHOW_REGIONS
Emphasizes some regions in the image, by decreasing the importance of the rest.
@author: vlad
"""
from __future__ import (absolute_import, division, print_function, unicode_literals)
from builtins import *
import argparse as opt
import skimage.io
import numpy as np
from util.storage import ModelPersistence
from util.visualization import enhance_patches
__author__ = 'vlad'
__version__ = 0.1
def main():
p = opt.ArgumentParser(description="""
Emphasizes the patches with a given code (from BoT) by reducing the contrast of the rest of the image.
"""
)
p.add_argument('image', action='store', help='image file name')
p.add_argument('res_image', action='store', help='name of the resulting image')
p.add_argument('bot_result', action='store', help='a file with BoT coding for regions')
p.add_argument('bot_code', action='store', help='the code of the regions to be emphasized', type=int)
p.add_argument('-g', '--gamma', action='store', nargs=1, type=float,
help='the gamma level of the background regions',
default=0.2)
args = p.parse_args()
img = skimage.io.imread(args.image)
regs = []
with ModelPersistence(args.bot_result, 'r', format='pickle') as d:
block_codes = d['l1_codes']
regs = d['regs']
#print(block_codes)
#print(args.bot_code)
# filter regions of interest:
roi = [ regs[k] for k in np.where(np.array(block_codes, dtype=np.int) == args.bot_code)[0] ]
#print(roi)
img = enhance_patches(img, roi, _gamma=args.gamma)
skimage.io.imsave(args.res_image, img)
return
if __name__ == '__main__':
main() | 28.898305 | 106 | 0.674487 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 635 | 0.372434 |
3335e848cff8038f4a309c43400a1a6a921170c6 | 985 | py | Python | isomyr/tests/test_thing.py | dave-leblanc/isomyr | 2eae8067687408ba48e0a88de97fbd4fe6d57848 | [
"BSD-3-Clause"
] | null | null | null | isomyr/tests/test_thing.py | dave-leblanc/isomyr | 2eae8067687408ba48e0a88de97fbd4fe6d57848 | [
"BSD-3-Clause"
] | null | null | null | isomyr/tests/test_thing.py | dave-leblanc/isomyr | 2eae8067687408ba48e0a88de97fbd4fe6d57848 | [
"BSD-3-Clause"
] | null | null | null | from unittest import TestCase
from isomyr.thing import Thing
from isomyr.world.world import Scene
class ThingOfThingsRemoveObjectTestCase(TestCase):
def setUp(self):
self.scene = Scene(0)
self.scene.addObject(Thing(name="apple"))
self.scene.addObject(Thing(name="orange"))
self.scene.addObject(Thing())
def test_byName(self):
self.assertEquals(len(self.scene.objectList), 3)
result = self.scene.removeObject(name="apple")
self.assertEquals(result, True)
self.assertEquals(len(self.scene.objectList), 2)
def test_byInstance(self):
self.assertEquals(len(self.scene.objectList), 3)
instance = self.scene.getObject("orange")
result = self.scene.removeObject(instance=instance)
self.assertEquals(result, True)
self.assertEquals(len(self.scene.objectList), 2)
def test_byNameWithNotFoundError(self):
pass
def test_withNoIndexError(self):
pass
| 29.848485 | 59 | 0.684264 | 883 | 0.896447 | 0 | 0 | 0 | 0 | 0 | 0 | 30 | 0.030457 |
33364bb5677f1a9389265f84b5d60907696bc680 | 492 | py | Python | python/consecutive-characters.py | alirezaghey/leetcode-solutions | 676b71b4790c64d21af91dce02e97ee47e78d523 | [
"MIT"
] | 3 | 2020-10-10T00:14:23.000Z | 2022-03-02T21:16:29.000Z | python/consecutive-characters.py | alirezaghey/leetcode-solutions | 676b71b4790c64d21af91dce02e97ee47e78d523 | [
"MIT"
] | null | null | null | python/consecutive-characters.py | alirezaghey/leetcode-solutions | 676b71b4790c64d21af91dce02e97ee47e78d523 | [
"MIT"
] | 1 | 2021-09-14T05:16:54.000Z | 2021-09-14T05:16:54.000Z | from itertools import groupby
class Solution:
def maxPower(self, s: str) -> int:
return max(len(list(g)) for _, g in groupby(s))
def maxPower2(self, s: str) -> int:
best, curr_count, curr_char = 1, 1, s[0]
for i in range(1, len(s)):
if s[i] == curr_char:
curr_count += 1
else:
curr_char = s[i]
curr_count = 1
best = max(best, curr_count)
return best | 28.941176 | 55 | 0.487805 | 462 | 0.939024 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3339f7cf782a2803830764348c1674b391c59c54 | 113 | py | Python | DigitalMeLib/servers/gedis/GedisProcessManager.py | jdelrue/digital_me | e5c92c405c0cea419ce18d25863f35d1bfe5a428 | [
"Apache-2.0"
] | null | null | null | DigitalMeLib/servers/gedis/GedisProcessManager.py | jdelrue/digital_me | e5c92c405c0cea419ce18d25863f35d1bfe5a428 | [
"Apache-2.0"
] | 72 | 2018-08-01T06:13:46.000Z | 2019-02-01T15:50:20.000Z | DigitalMeLib/servers/gedis/GedisProcessManager.py | jdelrue/digital_me | e5c92c405c0cea419ce18d25863f35d1bfe5a428 | [
"Apache-2.0"
] | 2 | 2018-08-05T08:09:13.000Z | 2018-11-21T13:11:28.000Z | from jumpscale import j
JSBASE = j.application.jsbase_get_class()
class GedisProcessManager(JSBASE):
pass
| 14.125 | 41 | 0.778761 | 43 | 0.380531 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
333a76b375064f0b441523ba76ee75b882d55473 | 4,648 | py | Python | lifesaver/bot/exts/health.py | lun-4/lifesaver | e8c4b492490d678db80258f69fce6dc4769fd1d7 | [
"MIT"
] | 12 | 2017-12-21T03:44:52.000Z | 2021-02-05T02:09:13.000Z | lifesaver/bot/exts/health.py | lun-4/lifesaver | e8c4b492490d678db80258f69fce6dc4769fd1d7 | [
"MIT"
] | 9 | 2017-12-21T01:56:07.000Z | 2020-12-31T00:01:20.000Z | lifesaver/bot/exts/health.py | lun-4/lifesaver | e8c4b492490d678db80258f69fce6dc4769fd1d7 | [
"MIT"
] | 2 | 2017-12-21T01:52:07.000Z | 2019-12-17T01:51:50.000Z | # encoding: utf-8
import asyncio
import logging
import random
from typing import Optional, Tuple
import discord
from discord.ext import commands
import lifesaver
from lifesaver.utils.formatting import truncate
from lifesaver.utils.timing import Timer, format_seconds
log = logging.getLogger(__name__)
SendVerdict = Tuple[bool, Optional[Exception]]
def bold_timer(timer: Timer) -> str:
if timer.duration > 1:
return f"**{timer}**"
else:
return str(timer)
class Health(lifesaver.Cog):
def __init__(self, bot, *args, **kwargs):
super().__init__(bot, *args, **kwargs)
self.rtt_sends = {}
self.rtt_edits = {}
@commands.Cog.listener()
async def on_message_edit(self, message, _message):
event = self.rtt_edits.get(message.id)
if event:
log.debug("RTT: Received edit_rx for %d.", message.id)
event.set()
del self.rtt_edits[message.id]
@commands.Cog.listener()
async def on_message(self, message):
event = self.rtt_sends.get(message.nonce)
if event:
log.debug(
"RTT: Received send_rx for %d (nonce=%d).", message.id, message.nonce
)
event.set()
del self.rtt_sends[message.nonce]
@lifesaver.command(aliases=["p"])
@commands.cooldown(1, 1, type=commands.BucketType.guild)
async def ping(self, ctx: lifesaver.commands.Context):
"""Pings the bot."""
await ctx.send("Pong!")
@lifesaver.command()
@commands.cooldown(3, 4, type=commands.BucketType.guild)
async def rtt(self, ctx: lifesaver.commands.Context):
"""Measures API and gateway latency.
"TX" refers to the time it takes for the HTTP request to be sent, and
for a response to be received and processed.
"RX" refers to the time it takes for the gateway to dispatch an action,
for example "Edit RX" refers to the time between editing a message with
the APIand the gateway dispatching a MESSAGE_UPDATE packet.
"""
nonce = random.randint(1000, 10000)
send_failed: SendVerdict = (False, None)
edit_failed: SendVerdict = (False, None)
# Send a message, and wait for it to come back.
with Timer() as send:
event = asyncio.Event()
self.rtt_sends[nonce] = event
with Timer() as send_tx:
try:
message = await ctx.send("RTT: `\N{LOWER HALF BLOCK}`", nonce=nonce)
except discord.HTTPException as error:
send_failed = (True, error)
with Timer() as send_rx:
await event.wait()
# Edit a message, and wait for it to come back.
with Timer() as edit:
event = asyncio.Event()
self.rtt_edits[message.id] = event
with Timer() as edit_tx:
try:
await message.edit(content="RTT: `\N{FULL BLOCK}`")
except discord.HTTPException as error:
edit_failed = (True, error)
with Timer() as edit_rx:
await event.wait()
avg_rx = (send_rx.duration + edit_rx.duration) / 2
avg_tx = (send_tx.duration + edit_tx.duration) / 2
slow = send.duration > 1 or edit.duration > 1
def format_transfer(timer, tx, rx):
timer = bold_timer(timer)
tx = bold_timer(tx)
rx = bold_timer(rx)
return f"RTT: {timer}\n\nTX: {tx}\nRX: {rx}"
if slow:
color = discord.Color.red()
else:
color = discord.Color.green()
embed = discord.Embed(title="RTT Results", color=color)
embed.add_field(
name="MESSAGE_CREATE", value=format_transfer(send, send_tx, send_rx),
)
embed.add_field(
name="MESSAGE_UPDATE", value=format_transfer(edit, edit_tx, edit_rx),
)
embed.set_footer(
text=f"Avg. TX: {format_seconds(avg_tx)}, RX: {format_seconds(avg_rx)}",
)
failures = {"Send": send_failed, "Edit": edit_failed}
if any(result[0] for (name, result) in failures.items()):
content = "\n".join(
f"{name}: Failed with HTTP {result[1].code}: {truncate(result[1].message, 100)}" # type: ignore
for (name, result) in failures.items()
if result[0] is not False
)
embed.add_field(name="Failures", value=content, inline=False)
await message.edit(content="", embed=embed)
def setup(bot):
bot.add_cog(Health(bot))
| 32.055172 | 112 | 0.586274 | 4,114 | 0.885112 | 0 | 0 | 3,912 | 0.841652 | 3,669 | 0.789372 | 958 | 0.20611 |
333a9b81c1942bff823966256c44e46de0b7f8db | 1,753 | py | Python | install/install.py | naztronaut/FCW | e272f29dda54f8a18f9ff533de598bb0bfd29099 | [
"MIT"
] | 4 | 2021-01-03T05:54:23.000Z | 2021-08-06T19:39:04.000Z | install/install.py | naztronaut/FCW | e272f29dda54f8a18f9ff533de598bb0bfd29099 | [
"MIT"
] | 4 | 2020-10-15T17:09:31.000Z | 2022-03-26T00:46:48.000Z | install/install.py | naztronaut/FCW | e272f29dda54f8a18f9ff533de598bb0bfd29099 | [
"MIT"
] | 1 | 2021-01-17T14:12:43.000Z | 2021-01-17T14:12:43.000Z | # install.py
# Version: 1.0.0
# Installs dependencies needed for FCW
# Author: Nazmus Nasir
# Website: https://www.easyprogramming.net
import os
from shutil import copy2
def install_dependencies():
print("================== Start Installing PIP and venv ==================")
os.system("sudo apt install python3-pip python3-venv -y")
print("================== Completed Installing PIP ==================")
print("================== Start Updating PIP ==================")
os.system("sudo pip3 install --upgrade pip")
print("================== Completed Updating PIP ==================")
print("================== Start Installing Setuptools and Libatlas ==================")
os.system("sudo apt install python-setuptools libatlas-base-dev -y")
print("================== Completed Installing Setuptools and Libatlas ==================")
print("================== Start Installing Fortran ==================")
os.system("sudo apt install libatlas3-base libgfortran5 -y")
print("================== Completed Installing Fortran ==================")
print("================== Start Installing rpi_ws281x ==================")
os.system("sudo pip3 install rpi_ws281x")
print("================== Completed Installing rpi_ws281x ==================")
print("================== Start Installing Apache ==================")
os.system("sudo apt install apache2 -y")
print("================== Completed Installing Apache ==================")
print("================== Start Installing Mod WSGI ==================")
os.system("sudo apt install libapache2-mod-wsgi-py3 -y")
print("================== Completed Installing Mod WSGI ==================")
install_dependencies()
| 40.767442 | 95 | 0.503708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,370 | 0.781517 |
333dcc9167d75b30415a919b52fe0903e6d5c709 | 1,453 | py | Python | convert-to-loom/convert-to-loom-3.py | mzager/dv-pipelines | 3356753cc56a5298bb075f12681f9282d8f08658 | [
"MIT"
] | 3 | 2020-02-24T21:08:11.000Z | 2020-05-19T18:26:01.000Z | convert-to-loom/convert-to-loom-3.py | mzager/dv-pipelines | 3356753cc56a5298bb075f12681f9282d8f08658 | [
"MIT"
] | null | null | null | convert-to-loom/convert-to-loom-3.py | mzager/dv-pipelines | 3356753cc56a5298bb075f12681f9282d8f08658 | [
"MIT"
] | 2 | 2020-01-04T00:23:07.000Z | 2020-02-26T17:54:34.000Z | #!/bin/python3
import sys
import os
import pandas as pd
import scanpy as sc
import anndata
from anndata import AnnData
sc.settings.verbosity = 3 # verbosity: errors (0), warnings (1), info (2), hints (3)
working_dir = os.getcwd()
adata = sc.read(os.path.join(working_dir, 'gene_count.txt'),
cache=True,
delimiter=' ').transpose()
print('adata variables:')
print(dir(adata))
df_cell = pd.read_csv(os.path.join(working_dir, 'cell_annotate.csv'), delimiter=',')
df_gene = pd.read_csv(os.path.join(working_dir, 'gene_annotate.csv'), delimiter=',')
df_cell.index = df_cell["sample"]
df_gene.index = df_gene["gene_id"]
adata.obs = df_cell # based off of index
adata.var = df_gene
# save the loom file
adata.write_loom("output.loom")
# gene count - cell by gene, based on index. They're exported using the same index
# annotations on the columns
# gene index annotations on the rows
# other thing to look at - anndata. CDS objects (cell dataset, in R)
# h5ad, loom, cds, and Seurat. And now our pubweb format.
top row - dimensions of the dataset. It's a sparse matrix.
It's mostly zeroes.
58 - 1 1 ->
# matrix market format, https://math.nist.gov/MatrixMarket/formats.html
First column is row, second column is column, the third is value. Index is 1 based
# wilfred is struggling to convert this to a dense matrix. -> they're straight up data tables.
HDF5 -either sparse matrices or strongly typed big-ass lists.
| 31.586957 | 94 | 0.720578 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 659 | 0.453544 |
333df86277504bde365d69f5650cddd392f0652c | 96 | py | Python | venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/parsers/earley_forest.py | Retraces/UkraineBot | 3d5d7f8aaa58fa0cb8b98733b8808e5dfbdb8b71 | [
"MIT"
] | 2 | 2022-03-13T01:58:52.000Z | 2022-03-31T06:07:54.000Z | venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/parsers/earley_forest.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | 19 | 2021-11-20T04:09:18.000Z | 2022-03-23T15:05:55.000Z | venv/lib/python3.8/site-packages/poetry/core/_vendor/lark/parsers/earley_forest.py | DesmoSearch/Desmobot | b70b45df3485351f471080deb5c785c4bc5c4beb | [
"MIT"
] | null | null | null | /home/runner/.cache/pip/pool/8f/d6/74/783ee5c7dc6070d67f88eab5cd5dae217fdec6556b8d97a3bd1061e541 | 96 | 96 | 0.895833 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
333f7d3158bfef79ceb32eebbb1b5503f134e870 | 1,900 | py | Python | schedsi/threads/_bg_stat_thread.py | z33ky/schedsi | 3affe28a3e1d2001c639d7c0423cb105d1991590 | [
"CC0-1.0"
] | 1 | 2017-08-03T12:58:53.000Z | 2017-08-03T12:58:53.000Z | schedsi/threads/_bg_stat_thread.py | z33ky/schedsi | 3affe28a3e1d2001c639d7c0423cb105d1991590 | [
"CC0-1.0"
] | null | null | null | schedsi/threads/_bg_stat_thread.py | z33ky/schedsi | 3affe28a3e1d2001c639d7c0423cb105d1991590 | [
"CC0-1.0"
] | null | null | null | """Define the :class:`_BGStatThread`.
This should be used in favor of :class:`Thread` for non-worker threads.
"""
from schedsi.threads.thread import Thread, LOG_INDIVIDUAL
import sys
class _BGStatThread(Thread):
"""Base class for threads recording background time."""
def __init__(self, module, tid=None, **kwargs):
"""Create a :class:`_BGStatThread`."""
super().__init__(module, tid=tid, **kwargs)
if tid is None:
print('Warning: Did not specify tid for non-worker thread', self.module.name, self.tid,
'. Usually automatic naming is not desired here.', file=sys.stderr)
self.bg_times = [[]]
def run_background(self, current_time, run_time):
"""Update runtime state.
See :meth:`Thread.run_background`.
"""
if LOG_INDIVIDUAL:
self.bg_times[-1].append(run_time)
self._update_ready_time(current_time)
def resume(self, current_time, returning):
if LOG_INDIVIDUAL and returning:
self.bg_times.append([])
super().resume(current_time, returning)
def finish(self, current_time):
"""Become inactive.
See :meth:`Thread.finish`.
"""
if self.module.parent is not None or self.tid != 0:
if LOG_INDIVIDUAL:
self.bg_times.append([])
else:
# in single timer scheduling the kernel is restarted
# but we already got a new list from resume() after the context switch
assert self.bg_times[-1] == []
super().finish(current_time)
def get_statistics(self, current_time):
"""Obtain statistics.
See :meth:`Thread.get_statistics`.
"""
stats = super().get_statistics(current_time)
stats['bg'] = self.bg_times
if stats['bg'][-1] == []:
stats['bg'].pop()
return stats
| 30.645161 | 99 | 0.603684 | 1,712 | 0.901053 | 0 | 0 | 0 | 0 | 0 | 0 | 666 | 0.350526 |
333fc7cf7e1820391a570f954460758928bb90e7 | 753 | py | Python | 069_totient_maximum.py | fbcom/project-euler | 3c2194f797d54a0cc04031cd0be153f6a6f849ad | [
"MIT"
] | null | null | null | 069_totient_maximum.py | fbcom/project-euler | 3c2194f797d54a0cc04031cd0be153f6a6f849ad | [
"MIT"
] | null | null | null | 069_totient_maximum.py | fbcom/project-euler | 3c2194f797d54a0cc04031cd0be153f6a6f849ad | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# A Solution to "Totient maximum" – Project Euler Problem No. 69
# by Florian Buetow
#
# Sourcecode: https://github.com/fbcom/project-euler
# Problem statement: https://projecteuler.net/problem=69
def is_prime(n):
if n < 2:
return False
if n == 2:
return True
if n % 2 == 0:
return False
for d in range(3, int(n**0.5)+1, 2):
if n % d == 0:
return False
return True
# Solve
solution = n = i = 2 # starting with a prime
while n < 1000*1000:
i = i + 1
while not is_prime(i):
i += 1
solution = n
n = n * i # n can only have prime factors (then it has the most amount of coprime numbers)
print "Solution:", solution
| 22.818182 | 95 | 0.586985 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 360 | 0.476821 |
334183eb998b219f40fc5f9ed455577122ddc46b | 1,401 | py | Python | lectures/07-python-dictionaries/examples/gashlycrumb.py | mattmiller899/biosys-analytics | ab24a4c7206ed9a865e896daa57cee3c4e62df1f | [
"MIT"
] | 4 | 2019-01-10T17:12:37.000Z | 2019-03-01T18:25:07.000Z | lectures/07-python-dictionaries/examples/gashlycrumb.py | mattmiller899/biosys-analytics | ab24a4c7206ed9a865e896daa57cee3c4e62df1f | [
"MIT"
] | null | null | null | lectures/07-python-dictionaries/examples/gashlycrumb.py | mattmiller899/biosys-analytics | ab24a4c7206ed9a865e896daa57cee3c4e62df1f | [
"MIT"
] | 33 | 2019-01-05T17:03:47.000Z | 2019-11-11T20:48:24.000Z | #!/usr/bin/env python3
"""dictionary lookup"""
import os
import sys
args = sys.argv[1:]
if len(args) != 1:
print('Usage: {} LETTER'.format(os.path.basename(sys.argv[0])))
sys.exit(1)
letter = args[0].upper()
lines = """
A is for Amy who fell down the stairs.
B is for Basil assaulted by bears.
C is for Clara who wasted away.
D is for Desmond thrown out of a sleigh.
E is for Ernest who choked on a peach.
F is for Fanny sucked dry by a leech.
G is for George smothered under a rug.
H is for Hector done in by a thug.
I is for Ida who drowned in a lake.
J is for James who took lye by mistake.
K is for Kate who was struck with an axe.
L is for Leo who choked on some tacks.
M is for Maud who was swept out to sea.
N is for Neville who died of ennui.
O is for Olive run through with an awl.
P is for Prue trampled flat in a brawl.
Q is for Quentin who sank on a mire.
R is for Rhoda consumed by a fire.
S is for Susan who perished of fits.
T is for Titus who flew into bits.
U is for Una who slipped down a drain.
V is for Victor squashed under a train.
W is for Winnie embedded in ice.
X is for Xerxes devoured by mice.
Y is for Yorick whose head was bashed in.
Z is for Zillah who drank too much gin.
""".strip().splitlines()
lookup = {}
for line in lines:
lookup[line[0]] = line
if letter in lookup:
print(lookup[letter])
else:
print('I do not know "{}"'.format(letter))
| 26.942308 | 67 | 0.708779 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,073 | 0.765882 |
3341a45cd2bb6b6d1354a60132e865acaaafa2de | 762 | py | Python | to_nwb/extensions/buzsaki_meta/buzsaki_meta.py | mpompolas/to_nwb | 1317f0ee0f4d80dde451d60d8eb5c6a544e214fe | [
"BSD-3-Clause"
] | 1 | 2020-03-31T20:02:01.000Z | 2020-03-31T20:02:01.000Z | to_nwb/extensions/buzsaki_meta/buzsaki_meta.py | mpompolas/to_nwb | 1317f0ee0f4d80dde451d60d8eb5c6a544e214fe | [
"BSD-3-Clause"
] | 2 | 2020-08-27T18:16:04.000Z | 2020-09-08T18:43:34.000Z | to_nwb/extensions/buzsaki_meta/buzsaki_meta.py | mpompolas/to_nwb | 1317f0ee0f4d80dde451d60d8eb5c6a544e214fe | [
"BSD-3-Clause"
] | 5 | 2018-04-04T21:27:23.000Z | 2019-04-01T13:40:00.000Z | from pynwb import load_namespaces
from ..auto_class import get_class, get_multi_container
# load custom classes
namespace = 'buzsaki_meta'
ns_path = namespace + '.namespace.yaml'
ext_source = namespace + '.extensions.yaml'
load_namespaces(ns_path)
BuzSubject = get_class(namespace, 'BuzSubject')
Histology = get_class(namespace, 'Histology')
Probe = get_class(namespace, 'Probe')
VirusInjection = get_class(namespace, 'VirusInjection')
VirusInjections = get_multi_container(namespace, 'VirusInjections', VirusInjection)
Surgery = get_class(namespace, 'Surgery')
Surgeries = get_multi_container(namespace, 'Surgeries', Surgery)
Manipulation = get_class(namespace, 'Manipulation')
Manipulations = get_multi_container(namespace, 'Manipulations', Manipulation) | 34.636364 | 83 | 0.804462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.238845 |
33420b595ab8ea2d66e865aea4f9be309fdb9795 | 709 | py | Python | tests/search_test.py | krishna-kalavadia/computer-vision-python | 6b37aaa433153f82412d865cef8fce81ce8422c7 | [
"BSD-3-Clause"
] | 9 | 2021-03-23T17:19:11.000Z | 2022-01-25T20:45:34.000Z | tests/search_test.py | krishna-kalavadia/computer-vision-python | 6b37aaa433153f82412d865cef8fce81ce8422c7 | [
"BSD-3-Clause"
] | 31 | 2020-11-18T01:47:19.000Z | 2022-03-24T23:14:46.000Z | tests/search_test.py | krishna-kalavadia/computer-vision-python | 6b37aaa433153f82412d865cef8fce81ce8422c7 | [
"BSD-3-Clause"
] | 42 | 2020-11-18T01:19:14.000Z | 2022-03-22T19:38:12.000Z | from modules.search.Search import Search
from modules.search.searchWorker import searchWorker
mock_tent = {
"latitude": 51.083665,
"longitude": -114.114693
}
mock_plane = {
"latitude": 51.059971,
"longitude": -114.10714
}
def test_search_function():
search = Search()
command = search.perform_search(tentGPS=mock_tent, planeGPS=mock_plane)
assert 0.9 * 168 < command["heading"] < 1.1 * 168
assert command["latestDistance"] == 0
def test_search_worker():
mock_plane_data = {
"gpsCoordinates": mock_plane
}
command = searchWorker(mock_plane_data, mock_tent)
assert 0.9 * 168 < command["heading"] < 1.1 * 168
assert command["latestDistance"] == 0
| 25.321429 | 75 | 0.681241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 108 | 0.152327 |
3343300fe9fc63763059c54d0198c6efedcd6999 | 1,774 | py | Python | Lesson 12 Keras/Test_Keras.py | alchemz/Self-Driving-Car-Engineer-Nanodegree | 70d6ae9d741b6c53712e0099af04597dc0ba0291 | [
"MIT"
] | 1 | 2021-03-20T12:32:35.000Z | 2021-03-20T12:32:35.000Z | Lesson 12 Keras/Test_Keras.py | alchemz/Self-Driving-Car-Engineer-Nanodegree | 70d6ae9d741b6c53712e0099af04597dc0ba0291 | [
"MIT"
] | null | null | null | Lesson 12 Keras/Test_Keras.py | alchemz/Self-Driving-Car-Engineer-Nanodegree | 70d6ae9d741b6c53712e0099af04597dc0ba0291 | [
"MIT"
] | null | null | null | # Load pickled data
import pickle
import numpy as np
import tensorflow as tf
tf.python.control_flow_ops = tf
with open('small_train_traffic.p', mode='rb') as f:
data = pickle.load(f)
X_train, y_train = data['features'], data['labels']
# Initial Setup for Keras
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.layers.convolutional import Convolution2D
from keras.layers.pooling import MaxPooling2D
# TODO: Build the Final Test Neural Network in Keras Here
model = Sequential()
model.add(Convolution2D(32, 3, 3, input_shape=(32, 32, 3)))
model.add(MaxPooling2D((2, 2)))
model.add(Dropout(0.5))
model.add(Activation('relu'))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dense(5))
model.add(Activation('softmax'))
# preprocess data
X_normalized = np.array(X_train / 255.0 - 0.5 )
from sklearn.preprocessing import LabelBinarizer
label_binarizer = LabelBinarizer()
y_one_hot = label_binarizer.fit_transform(y_train)
model.compile('adam', 'categorical_crossentropy', ['accuracy'])
history = model.fit(X_normalized, y_one_hot, nb_epoch=10, validation_split=0.2)
with open('small_test_traffic.p', 'rb') as f:
data_test = pickle.load(f)
X_test = data_test['features']
y_test = data_test['labels']
# preprocess data
X_normalized_test = np.array(X_test / 255.0 - 0.5 )
y_one_hot_test = label_binarizer.fit_transform(y_test)
print("Testing")
# TODO: Evaluate the test data in Keras Here
metrics = model.evaluate(X_normalized_test, y_one_hot_test)
# TODO: UNCOMMENT CODE
for metric_i in range(len(model.metrics_names)):
metric_name = model.metrics_names[metric_i]
metric_value = metrics[metric_i]
print('{}: {}'.format(metric_name, metric_value))
| 30.067797 | 79 | 0.756483 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 370 | 0.208568 |
3344178b165547c21ead9ed9691b9fa23693168d | 2,360 | py | Python | crude/extrude_crude/take_05_model_run.py | i2mint/crude | 29fea0c7cd7cf58055b14bc47519bd527469f8d0 | [
"Apache-2.0"
] | null | null | null | crude/extrude_crude/take_05_model_run.py | i2mint/crude | 29fea0c7cd7cf58055b14bc47519bd527469f8d0 | [
"Apache-2.0"
] | null | null | null | crude/extrude_crude/take_05_model_run.py | i2mint/crude | 29fea0c7cd7cf58055b14bc47519bd527469f8d0 | [
"Apache-2.0"
] | null | null | null | """
Same as take_04_model_run, but where the dispatch is not as manual.
"""
from front.crude import KT, StoreName, Mall
from crude.extrude_crude.extrude_crude_util import mall, np, apply_model
# ---------------------------------------------------------------------------------------
# dispatchable function:
from front.crude import prepare_for_crude_dispatch
f = prepare_for_crude_dispatch(apply_model, param_to_mall_map=mall)
assert all(
f("fitted_model_1", "test_fvs") == np.array([[0.0], [1.0], [0.5], [2.25], [-1.5]])
)
def simple_mall_dispatch_core_func(
key: KT, action: str, store_name: StoreName, mall: Mall
):
if not store_name:
# if store_name empty, list the store names (i.e. the mall keys)
return list(mall)
else: # if not, get the store
store = mall[store_name]
if action == "list":
key = key.strip() # to handle some invisible whitespace that would screw things
return list(filter(lambda k: key in k, store))
elif action == "get":
return store[key]
# TODO: the function doesn't see updates made to mall. Fix.
# Just the partial (with mall set), but without mall arg visible (or will be dispatched)
def explore_mall(key: KT, action: str, store_name: StoreName):
return simple_mall_dispatch_core_func(key, action, store_name, mall=mall)
# Attempt to do this wit i2.wrapper
# from functools import partial
# from i2.wrapper import rm_params_ingress_factory, wrap
#
# without_mall_param = partial(
# wrap, ingress=partial(rm_params_ingress_factory, params_to_remove="mall")
# )
# mall_exploration_func = without_mall_param(
# partial(simple_mall_dispatch_core_func, mall=mall)
# )
# mall_exploration_func.__name__ = "explore_mall"
if __name__ == "__main__":
from crude.util import ignore_import_problems
with ignore_import_problems:
from streamlitfront.base import dispatch_funcs
from functools import partial
dispatchable_apply_model = prepare_for_crude_dispatch(
apply_model, output_store="model_results"
)
# extra, to get some defaults in:
dispatchable_apply_model = partial(
dispatchable_apply_model,
fitted_model="fitted_model_1",
fvs="test_fvs",
)
app = dispatch_funcs([dispatchable_apply_model, explore_mall])
app()
| 34.202899 | 89 | 0.677542 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 993 | 0.420763 |
33442932bed34ae8a7cfa58a82b7cd61d3e5042f | 145 | py | Python | tests/conftest.py | Anishmourya/flask-restx-api | 6e64a22ce14dd97d69809c3b50cb5781918fd1d4 | [
"MIT"
] | 3 | 2020-04-01T15:10:59.000Z | 2021-09-30T13:00:20.000Z | tests/conftest.py | Anishmourya/flask-restx-api | 6e64a22ce14dd97d69809c3b50cb5781918fd1d4 | [
"MIT"
] | null | null | null | tests/conftest.py | Anishmourya/flask-restx-api | 6e64a22ce14dd97d69809c3b50cb5781918fd1d4 | [
"MIT"
] | null | null | null | import pytest
from app import create_app
@pytest.fixture
def client():
app = create_app()
client = app.test_client()
return client
| 14.5 | 30 | 0.703448 | 0 | 0 | 0 | 0 | 101 | 0.696552 | 0 | 0 | 0 | 0 |
3344e7fbcc20c1b88448cf2bc6b2cea4cf5be780 | 331 | py | Python | Part_3_advanced/m14_metaclass/register_cls/homework_1_solution/example_system/bike.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m14_metaclass/register_cls/homework_1_solution/example_system/bike.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | Part_3_advanced/m14_metaclass/register_cls/homework_1_solution/example_system/bike.py | Mikma03/InfoShareacademy_Python_Courses | 3df1008c8c92831bebf1625f960f25b39d6987e6 | [
"MIT"
] | null | null | null | from example_system.serializable import RegisteredSerializable
class Bike(RegisteredSerializable):
def __init__(self, brand: str, model: str) -> None:
super().__init__(brand, model)
self.brand = brand
self.model = model
def __str__(self) -> str:
return f"Bike: {self.brand} {self.model}"
| 27.583333 | 62 | 0.667674 | 265 | 0.800604 | 0 | 0 | 0 | 0 | 0 | 0 | 34 | 0.102719 |
334572d04bea77a18ada43a30928987b98727f24 | 107 | py | Python | authentication/authenticator.py | gabrielbazan/http_auth | 5d86669ead6beedd8d980031d09d7dedfc1033fd | [
"MIT"
] | null | null | null | authentication/authenticator.py | gabrielbazan/http_auth | 5d86669ead6beedd8d980031d09d7dedfc1033fd | [
"MIT"
] | null | null | null | authentication/authenticator.py | gabrielbazan/http_auth | 5d86669ead6beedd8d980031d09d7dedfc1033fd | [
"MIT"
] | null | null | null |
class Authenticator(object):
def authenticate(self, credentials):
raise NotImplementedError()
| 21.4 | 40 | 0.728972 | 105 | 0.981308 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
334618648cc53a79bbe36c64245af99769f241e5 | 12,502 | py | Python | spatial_two_mics/data_loaders/wham.py | etzinis/unsupervised_spatial_dc | 7330303d3c994b2abdbceedf2ee08660f94bd9ce | [
"MIT"
] | 21 | 2018-12-12T01:53:58.000Z | 2022-01-16T17:23:40.000Z | spatial_two_mics/data_loaders/wham.py | etzinis/unsupervised_spatial_dc | 7330303d3c994b2abdbceedf2ee08660f94bd9ce | [
"MIT"
] | 1 | 2019-04-25T07:14:31.000Z | 2020-04-22T00:59:30.000Z | spatial_two_mics/data_loaders/wham.py | etzinis/unsupervised_spatial_dc | 7330303d3c994b2abdbceedf2ee08660f94bd9ce | [
"MIT"
] | 4 | 2019-10-06T09:08:11.000Z | 2021-02-09T23:11:27.000Z | """!
@brief Pytorch dataloader for wham dataset for multiple gender combinations.
@author Efthymios Tzinis {etzinis2@illinois.edu}
@copyright University of illinois at Urbana Champaign
"""
import torch
import os
import numpy as np
import pickle
import glob2
import sys
current_dir = os.path.dirname(os.path.abspath('__file__'))
root_dir = os.path.abspath(os.path.join(current_dir, '../../'))
sys.path.append(root_dir)
import approx_ensembles.separation.dataset_loader.abstract_dataset as \
abstract_dataset
from scipy.io import wavfile
import warnings
from tqdm import tqdm
from time import time
EPS = 1e-8
enh_single = {'mixture': 'mix_single',
'sources': ['s1', 'noise'],
'n_sources': 1}
enh_single_white_noise = {
'mixture': 'source_with_white_noise',
'sources': ['s1', 'white_noise'],
'n_sources': 1}
enh_both = {'mixture': 'mix_both',
'sources': ['mix_clean', 'noise'],
'n_sources': 1}
sep_clean = {'mixture': 'mix_clean',
'sources': ['s1', 's2'],
'n_sources': 2}
sep_noisy = {'mixture': 'mix_both',
'sources': ['s1', 's2', 'noise'],
'n_sources': 2}
VALID_GENDER_COMBS = set(['ff', 'mm', 'fm', 'mf'])
WHAM_TASKS = {'enhance_single_white_noise': enh_single_white_noise,
'enhance_single': enh_single,
'enhance_both': enh_both,
'sep_clean': sep_clean,
'sep_noisy': sep_noisy}
WHAM_TASKS['enh_single'] = WHAM_TASKS['enhance_single']
WHAM_TASKS['enh_both'] = WHAM_TASKS['enhance_both']
def normalize_tensor_wav(wav_tensor, eps=1e-8, std=None):
mean = wav_tensor.mean(-1, keepdim=True)
if std is None:
std = wav_tensor.std(-1, keepdim=True)
return (wav_tensor - mean) / (std + eps)
class Dataset(torch.utils.data.Dataset, abstract_dataset.Dataset):
""" Dataset class for WHAM source separation and speech enhancement tasks.
Example of kwargs:
root_dirpath='/mnt/data/wham', task='enh_single',
split='tr', sample_rate=8000, timelength=4.0,
normalize_audio=False, n_samples=0, zero_pad=False
"""
def __init__(self, **kwargs):
super(Dataset, self).__init__()
warnings.filterwarnings("ignore")
self.kwargs = kwargs
self.task = self.get_arg_and_check_validness(
'task', known_type=str, choices=WHAM_TASKS.keys())
self.zero_pad = self.get_arg_and_check_validness(
'zero_pad', known_type=bool)
self.augment = self.get_arg_and_check_validness(
'augment', known_type=bool)
# Gender combination priors for combinations
# ff, mm, fm/mf
self.gender_combination_priors = self.get_arg_and_check_validness(
'gender_combination_priors', known_type=float,
dict_check={'ff': float, 'mm': float, 'fm': float, 'mf':float},
extra_lambda_checks=[lambda x: [0 <= y <= 1 for y in x.values()]])
self.normalize_audio = self.get_arg_and_check_validness(
'normalize_audio', known_type=bool)
self.min_or_max = self.get_arg_and_check_validness(
'min_or_max', known_type=str, choices=['min', 'max'])
self.split = self.get_arg_and_check_validness(
'split', known_type=str, choices=['cv', 'tr', 'tt'])
self.n_samples = self.get_arg_and_check_validness(
'n_samples', known_type=int, extra_lambda_checks=[lambda x: x >= 0])
self.sample_rate = self.get_arg_and_check_validness('sample_rate',
known_type=int)
self.root_path = self.get_arg_and_check_validness(
'root_dirpath', known_type=str,
extra_lambda_checks=[lambda y: os.path.lexists(y)])
self.dataset_dirpath = self.get_path()
self.mixtures_info_metadata_path = os.path.join(
self.dataset_dirpath, 'metadata_v2')
self.timelength = self.get_arg_and_check_validness(
'timelength', known_type=float)
self.time_samples = int(self.sample_rate * self.timelength)
# Create the indexing for the dataset
mix_folder_path = os.path.join(self.dataset_dirpath,
WHAM_TASKS[self.task]['mixture'])
self.file_names = []
self.available_mixtures = glob2.glob(mix_folder_path + '/*.wav')
self.mixtures_info = []
print('Parsing Dataset found at: {}...'.format(self.dataset_dirpath))
if not os.path.lexists(self.mixtures_info_metadata_path):
# Parse gender information.
gender_info_path = os.path.join(
os.path.dirname(os.path.abspath('__file__')),
'wham_speaker_info.txt')
gender_dic = {}
if os.path.lexists(gender_info_path):
with open(gender_info_path, 'rb') as filehandle:
gender_dic = dict([tuple([x.decode() for x in l.split()])
for l in filehandle.readlines()])
for file_path in tqdm(self.available_mixtures):
sample_rate, waveform = wavfile.read(file_path)
assert sample_rate == self.sample_rate
numpy_wav = np.array(waveform)
speaker_info = os.path.basename(file_path).split('.wav')[0]
speaker_info = [x[:3] for x in speaker_info.split('_')[::2]]
this_gender_comb = ''
for speaker in speaker_info:
if speaker not in gender_dic:
raise ValueError('Speaker with id: {} not '
'found!'.format(speaker))
else:
this_gender_comb += gender_dic[speaker].lower()
self.mixtures_info.append([os.path.basename(file_path),
numpy_wav.shape[0],
this_gender_comb])
print('Dumping metadata in: {}'.format(
self.mixtures_info_metadata_path))
with open(self.mixtures_info_metadata_path, 'wb') as filehandle:
pickle.dump(self.mixtures_info, filehandle)
if os.path.lexists(self.mixtures_info_metadata_path):
with open(self.mixtures_info_metadata_path, 'rb') as filehandle:
self.mixtures_info = pickle.load(filehandle)
print('Loaded metadata from: {}'.format(
self.mixtures_info_metadata_path))
self.file_names_g_comb = dict([(g, []) for g in VALID_GENDER_COMBS])
for path, n_samples, gender_comb in self.mixtures_info:
if (n_samples >= self.time_samples or self.zero_pad):
self.file_names_g_comb[gender_comb].append((path, n_samples))
self.file_names = []
# Apply the priors
for gender_comb in self.file_names_g_comb:
percentage = self.gender_combination_priors[gender_comb]
length = len(self.file_names_g_comb[gender_comb])
n_requested = int(length * percentage)
self.file_names += self.file_names_g_comb[gender_comb][:n_requested]
if self.n_samples > 0:
self.file_names = self.file_names[:self.n_samples]
max_time_samples = max([n_s for (_, n_s) in self.file_names])
self.file_names = [x for (x, _) in self.file_names]
# for the case that we need the whole audio input
if self.time_samples <= 0.:
self.time_samples = max_time_samples
def get_path(self):
path = os.path.join(self.root_path,
'wav{}k'.format(int(self.sample_rate / 1000)),
self.min_or_max, self.split)
if os.path.lexists(path):
return path
else:
raise IOError('Dataset path: {} not found!'.format(path))
def safe_pad(self, tensor_wav):
if self.zero_pad and tensor_wav.shape[0] < self.time_samples:
appropriate_shape = tensor_wav.shape
padded_wav = torch.zeros(
list(appropriate_shape[:-1]) + [self.time_samples],
dtype=torch.float32)
padded_wav[:tensor_wav.shape[0]] = tensor_wav
return padded_wav[:self.time_samples]
else:
return tensor_wav[:self.time_samples]
def __len__(self):
return len(self.file_names)
def __getitem__(self, idx):
if self.augment:
the_time = int(np.modf(time())[0] * 100000000)
np.random.seed(the_time)
filename = self.file_names[idx]
mixture_path = os.path.join(self.dataset_dirpath,
WHAM_TASKS[self.task]['mixture'],
filename)
_, waveform = wavfile.read(mixture_path)
max_len = len(waveform)
rand_start = 0
if self.augment and max_len > self.time_samples:
rand_start = np.random.randint(0, max_len - self.time_samples)
waveform = waveform[rand_start:rand_start+self.time_samples]
mixture_wav = np.array(waveform)
mixture_wav = torch.tensor(mixture_wav, dtype=torch.float32)
# First normalize the mixture and then pad
if self.normalize_audio:
mixture_wav = normalize_tensor_wav(mixture_wav)
mixture_wav = self.safe_pad(mixture_wav)
sources_list = []
for source_name in WHAM_TASKS[self.task]['sources']:
source_path = os.path.join(self.dataset_dirpath,
source_name, filename)
try:
_, waveform = wavfile.read(source_path)
except Exception as e:
print(e)
raise IOError('could not load file from: {}'.format(source_path))
waveform = waveform[rand_start:rand_start + self.time_samples]
numpy_wav = np.array(waveform)
source_wav = torch.tensor(numpy_wav, dtype=torch.float32)
# First normalize the mixture and then pad
if self.normalize_audio:
source_wav = normalize_tensor_wav(source_wav)
source_wav = self.safe_pad(source_wav)
sources_list.append(source_wav)
if self.normalize_audio:
mix_std = mixture_wav.detach().cpu().numpy().std()
mixture_wav = normalize_tensor_wav(mixture_wav, std=mix_std)
sources_list = [normalize_tensor_wav(s, std=mix_std)
for s in sources_list]
sources_wavs = torch.stack(sources_list, dim=0)
return mixture_wav, sources_wavs
def get_generator(self, batch_size=4, shuffle=True, num_workers=4):
generator_params = {'batch_size': batch_size,
'shuffle': shuffle,
'num_workers': num_workers,
'drop_last': True}
return torch.utils.data.DataLoader(self, **generator_params,
pin_memory=True)
def test_generator():
wham_root_p = '/mnt/data/wham'
batch_size = 1
sample_rate = 8000
timelength = 4.0
gender_combination_priors = {
'ff': 0., 'mm': 0.05, 'fm': 0., 'mf': 0.02
}
time_samples = int(sample_rate * timelength)
data_loader = Dataset(
root_dirpath=wham_root_p, task='sep_clean',
gender_combination_priors=gender_combination_priors,
split='tt', sample_rate=sample_rate, timelength=timelength,
zero_pad=True, min_or_max='min', augment=True,
normalize_audio=False, n_samples=10)
generator = data_loader.get_generator(batch_size=batch_size, num_workers=1)
for mixture, sources in generator:
assert mixture.shape == (batch_size, time_samples)
assert sources.shape == (batch_size, 2, time_samples)
# test the testing set with batch size 1 only
data_loader = Dataset(
root_dirpath=wham_root_p, task='sep_clean',
gender_combination_priors=gender_combination_priors,
split='tt', sample_rate=sample_rate, timelength=-1.,
zero_pad=False, min_or_max='min', augment=False,
normalize_audio=False, n_samples=10)
generator = data_loader.get_generator(batch_size=1, num_workers=1)
for mixture, sources in generator:
assert mixture.shape[-1] == sources.shape[-1]
if __name__ == "__main__":
test_generator()
| 40.723127 | 81 | 0.607103 | 9,304 | 0.744201 | 0 | 0 | 0 | 0 | 0 | 0 | 1,833 | 0.146617 |
33483b8215659fc4a60059ae841163838e1448c1 | 3,779 | py | Python | dali/test/python/test_operator_input_promotion.py | lbhm/DALI | d478d768c55069351a78d6bdcebed7632ca21ecb | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-05-31T14:00:58.000Z | 2019-05-31T14:00:58.000Z | dali/test/python/test_operator_input_promotion.py | Pandinosaurus/DALI | 1031314b7857ec11d40e31496089579297a2e863 | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2021-06-11T17:05:37.000Z | 2021-06-23T03:45:04.000Z | dali/test/python/test_operator_input_promotion.py | Pandinosaurus/DALI | 1031314b7857ec11d40e31496089579297a2e863 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import nvidia.dali as dali
import nvidia.dali.fn as fn
import numpy as np
def test_cat_numpy_array():
pipe = dali.pipeline.Pipeline(1,1,None)
src = fn.external_source([[np.array([[10,11],[12,13]], dtype=np.float32)]])
pipe.set_outputs(fn.cat(src, np.array([[20],[21]], dtype=np.float32), axis=1))
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[10,11,20],[12,13,21]]))
def test_stack_numpy_scalar():
pipe = dali.pipeline.Pipeline(1,1,None)
src = fn.external_source([[np.array([[10,11],[12,13]], dtype=np.float32)]])
pipe.set_outputs(fn.cat(src, np.array([[20],[21]], dtype=np.float32), axis=1))
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[10,11,20],[12,13,21]]))
def test_slice_fn():
pipe = dali.pipeline.Pipeline(1,1,0)
src = fn.external_source([[np.array([[10,11,12],[13,14,15],[16,17,18]], dtype=np.float32)]])
out_cpu = fn.slice(src, np.array([1,1]), np.array([2,1]), axes=[0,1])
out_gpu = fn.slice(src.gpu(), np.array([1,1]), np.array([2,1]), axes=[0,1])
pipe.set_outputs(out_cpu, out_gpu)
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[14],[17]]))
assert np.array_equal(o[1].as_cpu().at(0), np.array([[14],[17]]))
def test_slice_ops():
pipe = dali.pipeline.Pipeline(1,1,0)
src = fn.external_source([[np.array([[10,11,12],[13,14,15],[16,17,18]], dtype=np.float32)]])
slice_cpu = dali.ops.Slice(axes=[0,1], device="cpu")
slice_gpu = dali.ops.Slice(axes=[0,1], device="gpu")
out_cpu = slice_cpu(src, np.array([1,1]), np.array([2,1]))
out_gpu = slice_gpu(src.gpu(), np.array([1,1]), np.array([2,1]))
pipe.set_outputs(out_cpu, out_gpu)
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[14],[17]]))
assert np.array_equal(o[1].as_cpu().at(0), np.array([[14],[17]]))
def test_python_function():
pipe = dali.pipeline.Pipeline(3,1,0, exec_async=False, exec_pipelined=False)
with pipe:
def func(inp):
ret = [x*x for x in inp]
return [ret]
out_cpu = fn.python_function(np.array([[1,2],[3,4]]), function=func, batch_processing=True)
pipe.set_outputs(out_cpu)
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[1,4],[9,16]]))
def test_arithm_ops():
pipe = dali.pipeline.Pipeline(1,1,None)
with pipe:
in1 = fn.external_source([[np.uint8([[1,2],[3,4]])]])
pipe.set_outputs(in1 + np.array([[10,20],[30,40]]), in1 + np.array(5), in1 + np.uint8(100))
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[11,22],[33,44]]))
assert np.array_equal(o[1].at(0), np.array([[6,7],[8,9]]))
assert np.array_equal(o[2].at(0), np.array([[101,102],[103,104]]))
def test_arg_input():
pipe = dali.pipeline.Pipeline(1,1,None)
with pipe:
in1 = fn.external_source([[np.float32([[1,2,3],[4,5,6]])]])
pipe.set_outputs(fn.transforms.translation(in1, offset=np.float32([10,20])))
pipe.build()
o = pipe.run()
assert np.array_equal(o[0].at(0), np.array([[1,2,13],[4,5,26]]))
| 40.634409 | 99 | 0.632442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 606 | 0.16036 |
3349ab80fd693215fc2ec86a1bdc3c3c49e9ea6e | 8,446 | py | Python | www/transwarp/orm.py | houxiao2011/webapp-python | c74e2585dd96bc6ca240c443b2d18a8b3ea011e0 | [
"Apache-2.0"
] | null | null | null | www/transwarp/orm.py | houxiao2011/webapp-python | c74e2585dd96bc6ca240c443b2d18a8b3ea011e0 | [
"Apache-2.0"
] | null | null | null | www/transwarp/orm.py | houxiao2011/webapp-python | c74e2585dd96bc6ca240c443b2d18a8b3ea011e0 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# --*-- coding: utf-8 --*--
import time, logging
import db
class Field(object):
_count = 0
def __init__(self, **kw):
self.name = kw.get('name', None)
self._default = kw.get('default', None)
self.primary_key = kw.get('primary_key', False)
self.nullable = kw.get('nullable', False)
self.updatable = kw.get('updatable', True)
self.insertable = kw.get('insertable', True)
self.ddl = kw.get('ddl', '')
self._order = Field._count
self._count = Field._count + 1
@property
def default(self):
d = self._default
return d() if callable(d) else d
def __str__(self):
s = ['<%s : %s, %s, default(%s), ' % (self.__class__.__name__, self.name, self.ddl, self._default)]
self.nullable and s.append('N')
self.updatable and s.append('U')
self.insertable and s.append('I')
s.append('>')
return ''.join(s)
class IntegerField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = 0
if not 'ddl' in kw:
kw['ddl'] = 'bigint'
super(IntegerField, self).__init__(**kw)
class StringField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = ''
if not 'ddl' in kw:
kw['ddl'] = 'varchar(255)'
super(StringField, self).__init__(**kw)
class FloatField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = 0.0
if not 'ddl' in kw:
kw['ddl'] = 'real'
super(FloatField, self).__init__(**kw)
class BooleanField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = False
if not 'ddl' in kw:
kw['ddl'] = 'bool'
super(BooleanField, self).__init__(**kw)
class TextField(Field):
def __init__(self, **kw):
if not 'default' in kw:
kw['default'] = ''
if not 'ddl' in kw:
kw['ddl'] = 'text'
super(TextField, self).__init__(**kw)
class BlobField(Field):
def __init__(self, **kw):
if 'default' not in kw:
kw['default'] = ''
if not 'ddl' in kw:
kw['ddl'] = 'blob'
super(BlobField, self).__init__(**kw)
class VersionField(Field):
def __init__(self, name=None):
super(VersionField, self).__init__(name=name, default=0, ddl='bigint')
_triggers = frozenset(['pre_insert', 'pre_update', 'pre_delete'])
def _gen_sql(table_name, mappings):
pk = None
sql = ['-- generating SQL for %s:' % table_name, 'create table `%s` (' % table_name]
for f in sorted(mappings.values(), lambda x, y: cmp(x._order, y._order)):
if not hasattr(f, 'ddl'):
raise StandardError('no ddl in field "%s".' % f)
ddl = f.ddl
nullable = f.nullable
if f.primary_key:
pk = f.name
sql.append(nullable and ' `%s` %s,' % (f.name, ddl) or '`%s` %s not null,' % (f.name, ddl))
sql.append(' primary key(`%s`)' % pk)
sql.append(');')
return '\n'.join(sql)
class ModelMetaclass(type):
def __new__(cls, name, basees, attrs):
if name == 'Model':
return type.__new__(cls, name, basees, attrs)
if not hasattr(cls, 'subclasses'):
cls.subclasses = {}
if not name in cls.subclasses:
cls.subclasses[name] = name
else:
logging.warning('Redefine class: %s' % name)
logging.info('Scan ORMapping %s...' % name)
mappings = dict()
primary_key = None
for k, v in attrs.iteritems():
if isinstance(v, Field):
if not v.name:
v.name = k
logging.info('Found mapping: %s => %s' % (k, v))
if v.primary_key:
if primary_key:
raise TypeError('Can not define more than 1 primary key in class: %s' % name)
if v.updatable:
logging.warning('NOTE: change primary key to non-updatable')
v.updatable = False
if v.nullable:
logging.warning('NOTE: change primary keyto non-nullable')
v.nullable = False
primary_key = v
mappings[k] = v
if not primary_key:
raise TypeError('Primary key not defined in class: %s' % name)
for k in mappings.iterkeys():
attrs.pop(k)
if not '__table__' in attrs:
attrs['__table__'] = name.lower()
attrs['__mappings__'] = mappings
attrs['__primary_key__'] = primary_key
attrs['__sql__'] = lambda self: _gen_sql(attrs['__table__'], mappings)
for trigger in _triggers:
if not trigger in attrs:
attrs[trigger] = None
return type.__new__(cls, name, basees, attrs)
class Model(dict):
__metaclass__ = ModelMetaclass
def __init__(self, **kw):
super(Model, self).__init__(**kw)
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError(r"'Dict' Object has no attribute '%s'" % key)
def __setattr__(self, key, value):
self[key] = value
@classmethod
def get(cls, pk):
d = db.select_one('select * from %s where %s = ?' % (cls.__table__, cls.__primary_key__.name), pk)
return cls(**d) if d else None
@classmethod
def find_first(cls, where, *args):
d = db.select_one('select * from %s %s' % (cls.__table__, where), *args)
return cls(**d) if d else None
@classmethod
def find_all(cls):
l = db.select('select * from `%s`' % cls.__table__)
return [cls(**d) for d in l]
@classmethod
def find_by(cls, where, *args):
l = db.select('select * from `%s` %s' % (cls.__table__, where), *args)
return [cls(**d) for d in l]
@classmethod
def count_all(cls):
return db.select_int('select count(`%s`) from `%s`' % (cls.__primary_key__.name, cls.__table__))
@classmethod
def count_by(cls, where, *args):
return db.select_int('select count(`%s`) from `%s` %s' % (cls.__primary_key__.name, cls.__table__, where),
*args)
def update(self):
self.pre_update and self.pre_update()
l = []
args = []
for k, v in self.__mappings__.iteritems():
if v.updatable:
if hasattr(self, k):
arg = getattr(self, k)
else:
arg = v.default
setattr(self, k, arg)
l.append('`%s`=?' % k)
pk = self.__primary_key__.name
args.append(getattr(self, pk))
db.update('update `%s` set %s where %s=?' % (self.__table__, ','.join(L), pk), *args)
return self
def delete(self):
self.pre_delete and self.pre_delete()
pk = self.__primary_key__.name
args = (getattr(self, pk))
db.update('delete from `%s` where `%s`=?' % (self.__table__, pk), *args)
def insert(self):
self.pre_insert and self.pre_insert()
params = {}
for k, v in self.__mappings__.iteritems():
if v.insertable:
if not hasattr(self, k):
setattr(self, k, v.default)
params[v.name] = getattr(self, k)
db.insert('%s' % self.__table__, **params)
return self
# class User(Model):
# __table__ = 'users'
#
# id = StringField(primary_key=True, default=db.next_id, ddl='varchar(50)')
# email = StringField(updatable=False, ddl='varchar(50)')
# password = StringField(ddl='varchar(50)')
# admin = BooleanField()
# name = StringField(ddl='varchar(50)')
# image = StringField(ddl='varchar(500)')
# created_at = FloatField(updatable=False, default=time.time)
#
#
# if __name__ == '__main__':
# db.create_engine('../../www-data')
# db.update('drop table if exists users')
# db.update('create table users (id int primary key, name text, email text, password text, admin bool, image text, '
# 'created_at real)')
# u = User(name='Test', email='test@example.com', password='1234567890', image='about:blank')
# u.insert()
# print 'new user id:', u.id
| 31.632959 | 120 | 0.543689 | 6,814 | 0.806772 | 0 | 0 | 1,087 | 0.1287 | 0 | 0 | 1,990 | 0.235614 |
3349c42d9a6787f22dabc4fb2936a9e909a361bb | 121 | py | Python | EX10 dolar carteira.py | RODRIGOKTK/Python-exercicios | f7985f2c277aae8b158bdeea4f2493febaaf06c5 | [
"Unlicense"
] | null | null | null | EX10 dolar carteira.py | RODRIGOKTK/Python-exercicios | f7985f2c277aae8b158bdeea4f2493febaaf06c5 | [
"Unlicense"
] | null | null | null | EX10 dolar carteira.py | RODRIGOKTK/Python-exercicios | f7985f2c277aae8b158bdeea4f2493febaaf06c5 | [
"Unlicense"
] | null | null | null | carteira=float(input('Quanto tem na carteira: '))
dolar=3.27
print('Equivalente em dolares: %.2f ' %(carteira/dolar))
| 30.25 | 57 | 0.702479 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 57 | 0.471074 |
334a98c4b9b9f9e53176aaf0e2f36f0be5032d0d | 1,757 | py | Python | code/libs/utils.py | shinebobo/Semantic-Line-SLNet | c9b8f9f0af8b74fdcd9f101d246ef8b4be96b8b2 | [
"MIT"
] | 1 | 2021-07-21T01:06:12.000Z | 2021-07-21T01:06:12.000Z | code/libs/utils.py | shinebobo/Semantic-Line-SLNet | c9b8f9f0af8b74fdcd9f101d246ef8b4be96b8b2 | [
"MIT"
] | null | null | null | code/libs/utils.py | shinebobo/Semantic-Line-SLNet | c9b8f9f0af8b74fdcd9f101d246ef8b4be96b8b2 | [
"MIT"
] | null | null | null | import os
import pickle
import numpy as np
import random
import torch
global global_seed
global_seed = 123
torch.manual_seed(global_seed)
torch.cuda.manual_seed(global_seed)
torch.cuda.manual_seed_all(global_seed)
np.random.seed(global_seed)
random.seed(global_seed)
def _init_fn(worker_id):
seed = global_seed + worker_id
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
return
# convertor
def to_tensor(data):
return torch.from_numpy(data).cuda()
def to_np(data):
return data.cpu().numpy()
def to_np2(data):
return data.detach().cpu().numpy()
def logger(text, LOGGER_FILE): # write log
with open(LOGGER_FILE, 'a') as f:
f.write(text)
f.close()
# directory & file
def mkdir(path):
if os.path.exists(path) == False:
os.makedirs(path)
def rmfile(path):
if os.path.exists(path):
os.remove(path)
# pickle
def save_pickle(dir_name, file_name, data):
'''
:param file_path: ...
:param data:
:return:
'''
mkdir(dir_name)
with open(dir_name + file_name + '.pickle', 'wb') as f:
pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL)
def load_pickle(file_path):
with open(file_path + '.pickle', 'rb') as f:
data = pickle.load(f)
return data
# create dict
def create_test_dict():
out = {'cls': {},
'reg': {},
'left': {},
'middle': {},
'right': {}} # detected lines
# pred
out['cls'] = torch.FloatTensor([]).cuda()
out['reg'] = torch.FloatTensor([]).cuda()
return out
def create_forward_step(num, batch_size):
step = [i for i in range(0, num, batch_size)]
if step[len(step) - 1] != num:
step.append(num)
return step | 19.307692 | 62 | 0.625498 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 220 | 0.125213 |
334ae316e44a87d9de4896827430dd6a339557e7 | 647 | py | Python | api/admin.py | Neoklosch/QuestionBasedServer | 7e690944355471d3a9507b0414cbaf1f800bab97 | [
"Apache-2.0"
] | null | null | null | api/admin.py | Neoklosch/QuestionBasedServer | 7e690944355471d3a9507b0414cbaf1f800bab97 | [
"Apache-2.0"
] | 7 | 2020-06-05T17:05:00.000Z | 2022-03-11T23:13:05.000Z | api/admin.py | Cookie-Monsters/uQu-Backend | 7e690944355471d3a9507b0414cbaf1f800bab97 | [
"Apache-2.0"
] | null | null | null | from django.contrib import admin
from api.models import Answer, Question, User
from django import forms
class AnswerAdmin(admin.ModelAdmin):
model = Answer
class QuestionAdmin(admin.ModelAdmin):
model = Question
# class UserForm(forms.ModelForm):
# password = forms.CharField(widget=forms.PasswordInput)
#
# def __init__(self, *args, **kwargs):
# super(UserForm, self).__init__(*args, **kwargs)
#
# class Meta:
# model = User
class UserAdmin(admin.ModelAdmin):
model = User
admin.site.register(Answer, AnswerAdmin)
admin.site.register(Question, QuestionAdmin)
admin.site.register(User, UserAdmin)
| 20.870968 | 60 | 0.718702 | 165 | 0.255023 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.361669 |
334afd08e2c0f5208e6a49dbf748437e970df29e | 20,153 | py | Python | Minesweeper_Python/src/MyAI.py | Thomas1728/AI-MineSweeper | 7d8baec928637c87b1b4cd67649eb2b4cd2227ba | [
"MIT"
] | null | null | null | Minesweeper_Python/src/MyAI.py | Thomas1728/AI-MineSweeper | 7d8baec928637c87b1b4cd67649eb2b4cd2227ba | [
"MIT"
] | null | null | null | Minesweeper_Python/src/MyAI.py | Thomas1728/AI-MineSweeper | 7d8baec928637c87b1b4cd67649eb2b4cd2227ba | [
"MIT"
] | null | null | null | # ==============================CS-171==================================
# FILE: MyAI.py
#
# AUTHOR: bugMaker
#
# DESCRIPTION: This file contains the MyAI class. You will implement your
# agent in this file. You will write the 'getAction' function,
# the constructor, and any additional helper functions.
#
# NOTES: - MyAI inherits from the abstract AI class in AI.py.
#
# - DO NOT MAKE CHANGES TO= THIS FILE.
# # ==============================CS-171=================================
from itertools import combinations
from AI import AI
from Action import Action
from collections import defaultdict
import time
class MyAI(AI):
class Board:
def __init__(self, rowDimension, colDimension):
self.board = [['.' for i in range(colDimension)] for j in range(rowDimension)]
self.__rowDimension = rowDimension
self.__colDimension = colDimension
def isValid(self, coor):
return 0 <= coor[1] < self.__rowDimension and 0 <= coor[0] < self.__colDimension
def set(self, coor, number):
if self.isValid(coor):
self.board[coor[1]][coor[0]] = number
def get(self, coor):
if self.isValid(coor):
return self.board[coor[1]][coor[0]]
count = 1
def __init__(self, rowDimension, colDimension, totalMines, startX, startY):
self.__rowDimension = rowDimension
self.__colDimension = colDimension
self.__moveCount = 0
self.__totalMines = totalMines
self.__coveredTiles = rowDimension * colDimension
self.lastCoordinate = (startX, startY)
self.board = self.Board(rowDimension, colDimension)
self.safePoints = set()
self.frontier = set()
self.searchingPoints = dict()
self.bombs = set()
# self.partialCoveredFrontier = []
# self.partialUncoveredFrontier = []
# self.potentialBombs = [] # For model checking
# self.appliedBombs = [] # For model checking
# self.triedPointsStack = [] # For model checking
# self.partialUncoveredFrontierCopy = []
self.triggered = False
self.cluster = {}
print("Start New Agent in " + str(MyAI.count))
MyAI.count += 1
def hasWon(self) -> bool:
return self.__coveredTiles - 1 == self.__totalMines
def flag(self, l: list):
'''add bombs into self.bombs from a list'''
for i in l:
self.bombs.add(i)
def surroundPoints(self, coordinate) -> list:
"""
returns 8 surround points (tuples) of a coordinate
"""
surround_points = []
for dx in range(-1, 2):
for dy in range(-1, 2):
if not (dx == 0 and dy == 0) and self.board.isValid((coordinate[0] + dx, coordinate[1] + dy)):
surround_points.append((coordinate[0] + dx, coordinate[1] + dy))
return surround_points
def countCovered(self, coordinate: (int, int)) -> int:
count = 0
for i in self.surroundPoints(coordinate):
if self.board.get(i) == '.':
count += 1
return count
def getCovered(self, coordinate) -> list:
return [i for i in self.surroundPoints(coordinate) if self.board.get(i) == '.' and i not in self.bombs]
def getUncovered(self, coordinate) -> list:
return [i for i in self.surroundPoints(coordinate) if self.board.get(i) != '.']
def countUncovered(self, coordinate) -> list:
count = 0
for i in self.surroundPoints(coordinate):
if self.board.get(i) != '.':
count += 1
return count
def countBombs(self, coordinate: (int, int)) -> int:
count = 0
for i in self.surroundPoints(coordinate):
if i in self.bombs:
count += 1
return count
def deleteFrontier(self):
''' eliminate unneeded uncovered frontiers (bombs)'''
for i in self.bombs:
if i in self.frontier:
self.frontier.remove(i)
def deleteSearchingPoints(self):
for point in [i for i in self.searchingPoints.keys() if self.countCovered(i) - self.countBombs(i) == 0]:
del self.searchingPoints[point]
def uncover(self):
poping_point = self.safePoints.pop()
self.lastCoordinate = poping_point
self.__coveredTiles -= 1
if poping_point in self.frontier:
self.frontier.remove(poping_point)
return Action(AI.Action.UNCOVER, poping_point[0], poping_point[1])
# CSP methods
def findNext(self, pointDict):
# Use euclidean distance to find the next variable
# Ideas are from stackOverflow
point = None
num = -1
dist = 2800
for i in pointDict:
surroundNum = len(
[x for x in self.surroundPoints(i) if self.board.get(x) != '.' and self.board.get(x) != 0])
minDist = min(abs(x[0] - i[0]) ** 2 + abs(x[1] - i[1]) ** 2 for x in pointDict if pointDict[x] != 0)
if pointDict[i] == None and minDist < dist:
point = i
num = surroundNum
dist = minDist
elif pointDict[i] == None and minDist == dist and surroundNum > num:
point = i
num = surroundNum
dist = minDist
return point
def backtrack(self, pointDict: dict, domain: set, output: list, constraint: 'function') -> bool:
# TO DO: time constraint here
if time.perf_counter() - self.time > 250:
return False
# check finish state
finish = True
for i in pointDict.values():
if i is None:
finish = False
break
if finish == True:
output.append(pointDict)
return True
# select point
point = self.findNext(pointDict)
# backtracking the point in both cases (safe or bomb)
for status in domain: # domain is {0, 1}, 1 stands for bomb
pointDict[point] = status
# check if it satisfy the constraints
if constraint(pointDict):
result = self.backtrack(pointDict.copy(), domain, output, constraint)
# remove assignment
pointDict[point] = None
if result == False:
return False
def constraint(self, frontier):
def build(pointDict) -> bool:
for point in frontier:
if not self.checkConstraint(pointDict, point):
return False
return True
return build
def checkConstraint(self, pointDict, point):
current = []
for i in self.surroundPoints(point):
if i in self.bombs:
current.append(1)
elif self.board.get(i) == '.':
current.append(pointDict[i])
if None in current:
return current.count(1) <= self.board.get(point)
elif current.count(1) == self.board.get(point):
return True
return False
def cspSetUp(self, uncoveredFrontiers, pointDict):
constraint = self.constraint(uncoveredFrontiers)
output = []
configurationResult = defaultdict(int)
backtrackResult = self.backtrack(pointDict, {0, 1}, output, constraint)
if backtrackResult == False and len(self.safePoints) == 0:
self.randomChoose()
for i in output:
for point in i:
configurationResult[point] += i[point]
canFindWithoutProbability = False
for point in configurationResult:
if configurationResult[point] == 0:
self.safePoints.add(point)
canFindWithoutProbability = True
elif configurationResult[point] == len(output):
self.bombs.add(point)
canFindWithoutProbability = True
if canFindWithoutProbability:
return configurationResult
for point in configurationResult:
# calculate the probability
configurationResult[point] = configurationResult[point] / len(output)
self.triggered = True
return configurationResult
def randomChoose(self):
for y in range(self.__rowDimension):
for x in range(self.__colDimension):
if self.board.get((x, y)) == '.' and (x, y) not in self.frontier and (x, y) not in self.bombs:
self.safePoints.add((x, y))
break
def findRoot(self, point):
while self.cluster[point] != point:
point = self.cluster[point]
return point
def grouping(self, pivot: (int, int), other: (int, int)):
if pivot not in self.cluster:
self.cluster[pivot] = pivot
if other not in self.cluster:
self.cluster[other] = other
self.cluster[self.findRoot(pivot)] = self.findRoot(other)
def showCluster(self):
output = defaultdict(set)
for i in self.cluster:
output[self.findRoot(i)].add(i)
output[self.findRoot(i)].add(self.findRoot(i))
return output.values()
def getFrontier(self) -> (set, dict): # revised version
self.cluster = {}
for point in self.searchingPoints.keys():
coveredFrontiers = []
for i in self.getCovered(point):
coveredFrontiers.append(i)
pivot = coveredFrontiers[0]
for i in coveredFrontiers:
self.grouping(pivot, i)
result = []
for points in self.showCluster():
uncoveredFrontiers = set()
for i in points:
for p in self.getUncovered(i):
if self.board.get(p) > 0:
uncoveredFrontiers.add(p)
pointDict = {i: None for i in points}
result.append((uncoveredFrontiers, pointDict))
return result
def getAction(self, number: int) -> "Action Object":
# preprocessing begins, it only handles easiest cases (rules of thumbs)
self.board.set(self.lastCoordinate, number)
if not self.hasWon():
surround_points = self.surroundPoints(self.lastCoordinate)
if number == 0:
for i in surround_points:
if self.board.get(i) == '.':
self.safePoints.add(i)
if number != 0:
self.searchingPoints[self.lastCoordinate] = number
for i in surround_points:
if self.board.get(i) == '.' and i not in self.safePoints:
self.frontier.add(i)
for point in self.searchingPoints.keys():
surround = self.getCovered(point)
surroundNum = len(self.surroundPoints(point))
count = self.countUncovered(point)
if count == surroundNum - self.searchingPoints[point]:
self.flag(surround) # find the bombs
if self.searchingPoints[point] == self.countBombs(point):
for i in surround:
if i not in self.bombs and self.board.get(i) == '.':
self.safePoints.add(i)
# eliminate unneeded uncovered frontiers (bombs)
self.deleteFrontier()
# eliminate unneeded covered frontiers
self.deleteSearchingPoints()
# add secure uncovered tiles to safe points
for i in self.frontier:
if i not in self.bombs and self.board.get(i) == '.' and len(self.bombs) == self.__totalMines:
self.safePoints.add(i)
if len(self.safePoints) > 0:
return self.uncover()
# First layer ends here
# print(f'searchingPoints: {self.searchingPoints.keys()}')
# print(f'frontier: {self.frontier}')
# print(f'bombs: {self.bombs}')
# Second layer
if not self.hasWon():
for coveredPoint in self.frontier:
satisfiable = True
self.bombs.add(coveredPoint) #TO DO: logic here got problems!!!
# print(coveredPoint)
for uncoveredPoint in self.searchingPoints.keys():
if self.searchingPoints[uncoveredPoint] - self.countBombs(uncoveredPoint) != 0:
satisfiable = False
if len(self.bombs) == self.__totalMines: # we could find all mines that we could by now, but we can't
self.safePoints.add(coveredPoint) # determine safe points unless there's no mines left
if not satisfiable:
# print(f'removed {coveredPoint}')
self.bombs.remove(coveredPoint)
self.deleteFrontier()
self.deleteSearchingPoints()
if len(self.safePoints) > 0:
return self.uncover()
# Start find frontier
'''
if self.getFrontiers():
self.doModelChecking()
# End find frontier
'''
# csp begin
# try:
if not self.hasWon():
self.time = time.perf_counter()
result = {}
for uncoveredFrontiers, pointDict in sorted(self.getFrontier(), key=lambda x: len(x[1])):
# print(f'uncovered: {uncoveredFrontiers}')
# print(f'pointDict: {pointDict}')
# print(f'searching points: {self.searchingPoints.keys()}')
cspResult = self.cspSetUp(uncoveredFrontiers, pointDict)
if self.triggered:
result.update(cspResult)
# except KeyError as e:
# self.randomChoose()
# print(e)
self.deleteFrontier()
self.deleteSearchingPoints()
#print('csp model checking')
#print(self.bombs)
if len(self.safePoints) > 0:
return self.uncover()
# probability
if len(result) != 0:
min = 1
chosenPoint = None
for point, prob in result.items():
if prob < min:
min = prob
chosenPoint = point
randomProb = (self.__totalMines - len(self.bombs)) / self.__coveredTiles
if min <= randomProb:
self.safePoints.add(chosenPoint)
else:
self.randomChoose()
self.deleteFrontier()
self.deleteSearchingPoints()
#print('probability')
if len(self.safePoints) > 0:
return self.uncover()
# totally random
if len(self.safePoints) == 0:
#print('random')
self.randomChoose()
if len(self.safePoints) > 0:
return self.uncover()
# Second layer ends here
# print(self.bombs)
return Action(AI.Action.LEAVE)
# def getFrontiers(self): # False means did that failed, no new value add to frontier True is ...
# self.partialUncoveredFrontier = []
# self.partialCoveredFrontier = []
# if len(self.searchingPoints) == 0:
# return False
# #'''
# for i in self.searchingPoints.keys():
# if i not in self.partialUncoveredFrontierCopy:
# self.partialUncoveredFrontier.append(i)
# break
# #'''
# #self.partialUncoveredFrontier.append(list(self.searchingPoints.keys())[0])
# IsNewValueAdded = True
# while IsNewValueAdded:
# countUncovered = len(self.partialUncoveredFrontier)
# countCovered = len(self.partialCoveredFrontier)
# for i in self.partialUncoveredFrontier:
# # add covered tiles surrounding by i
# if self.countCovered(i) > 0:
# for j in self.getCovered(i):
# if j not in self.partialCoveredFrontier:
# self.partialCoveredFrontier.append(j)
#
# for i in self.partialCoveredFrontier:
# # add uncovered tiles surrounding by i
# if self.countUncovered(i) > 0:
# for j in self.getUncovered(i):
# if j not in self.partialUncoveredFrontier and j in self.searchingPoints:
# self.partialUncoveredFrontier.append(j)
# if len(self.partialUncoveredFrontier) == countUncovered and len(self.partialCoveredFrontier) == countCovered:
# IsNewValueAdded = False
# self.partialUncoveredFrontierCopy = self.partialUncoveredFrontier.copy()
# return True
#
# def doModelChecking(self):
# if time.perf_counter() - self.time > 5:
# return False
# if len(self.partialUncoveredFrontierCopy) == 0:
# return False
# checkPoint = self.partialUncoveredFrontierCopy.pop(0)
# if self.searchingPoints[checkPoint] - self.countBombs(checkPoint) < 0:
# return False
# potentialBombs = (checkPoint, [i for i in combinations(self.getCovered(checkPoint), self.searchingPoints[checkPoint] - self.countBombs(checkPoint))])
# potentialBombs[1].append((-1, -1))
#
# if len(self.partialUncoveredFrontierCopy) == 0:
# for bombs in potentialBombs[1]:
# self.appliedBombs.append([])
# if bombs == (-1, -1):
# if self.isValid():
# return True
# else:
# self.appliedBombs.pop(-1)
# else:
# for bomb in bombs:
# if bomb not in self.bombs:
# self.appliedBombs[-1].append(bomb)
# self.bombs.add(bomb)
# if self.isValid():
# return True
# else:
# for i in self.appliedBombs[-1]:
# self.bombs.discard(i)
# self.appliedBombs.pop(-1)
# self.partialUncoveredFrontierCopy.insert(0, checkPoint)
# return False
#
# for bombs in potentialBombs[1]:
# self.appliedBombs.append([])
# if bombs == (-1, -1):
# if self.doModelChecking():
# return True
# else:
# self.appliedBombs.pop(-1)
# break
# for bomb in bombs:
# if bomb in self.bombs:
# pass
# else:
# self.appliedBombs[-1].append(bomb)
# self.bombs.add(bomb)
# if self.doModelChecking():
# return True
# for i in self.appliedBombs[-1]:
# self.bombs.discard(i)
# self.appliedBombs.pop(-1)
# self.partialUncoveredFrontierCopy.insert(0, checkPoint)
# return False
#
#
# def isValid(self):
# for i in self.partialUncoveredFrontier:
# if self.searchingPoints[i] != self.countBombs(i):
# return False
# for j in self.partialCoveredFrontier:
# if j not in self.bombs:
# self.safePoints.add(j)
# return True
| 39.67126 | 160 | 0.52553 | 19,502 | 0.967697 | 0 | 0 | 0 | 0 | 0 | 0 | 6,561 | 0.325559 |
334b5d97c085709c9962cb03b981ba238c44a614 | 3,383 | py | Python | BinaryTree/Node.py | Pedro29152/binary-search-tree-python | e2457d0d2ce5db7044697ec34acd6195aafb71f6 | [
"MIT"
] | null | null | null | BinaryTree/Node.py | Pedro29152/binary-search-tree-python | e2457d0d2ce5db7044697ec34acd6195aafb71f6 | [
"MIT"
] | null | null | null | BinaryTree/Node.py | Pedro29152/binary-search-tree-python | e2457d0d2ce5db7044697ec34acd6195aafb71f6 | [
"MIT"
] | null | null | null | class Node():
def __init__(self, id: int, value = None, right: 'Node' = None, left: 'Node' = None):
self.id = id
self.value = value
self.right = right
self.left = left
self.parent: 'Node' = None
def add(self, node: 'Node'):
if not node:
raise ValueError('node value invalid')
if node.id == self.id:
raise ValueError('The id sent is alredy on the tree')
if node.id > self.id:
if not self.right:
node.parent = self
self.right = node
else:
self.right.add(node)
if node.id < self.id:
if not self.left:
node.parent = self
self.left = node
else:
self.left.add(node)
def get_size(self):
size_l = self.left.get_size() if self.left else 0
size_r = self.right.get_size() if self.right else 0
return 1 + size_l + size_r
def get_height(self):
h_l = self.left.get_height() if self.left else 0
h_r = self.right.get_height() if self.right else 0
if h_r > h_l:
return 1 + h_r
return 1 + h_l
def get_node(self, id: int):
if self.id == id:
return self
if id > self.id:
if self.right:
return self.right.get_node(id)
if id < self.id:
if self.left:
return self.left.get_node(id)
return None
def get_min_node(self):
if not self.left:
return self
return self.left.get_min_node()
def get_max_node(self):
if not self.right:
return self
return self.right.get_max_node()
def get_sorted_list(self, max_size: int=None, ascending: bool=True):
if max_size == None:
return self.__get_list(ascending)
return self.__get_list_by_size(max_size, ascending)
def __get_list(self, ascending: bool):
list_e = self.left.__get_list(ascending) if self.left else []
list_d = self.right.__get_list(ascending) if self.right else []
if ascending:
return list_e + [self.id] + list_d
return list_d + [self.id] + list_e
def __get_list_by_size(self, max_size: int, ascending: bool):
if ascending:
st = 'left'
fi = 'right'
else:
st = 'right'
fi = 'left'
list_st = self[st].__get_list_by_size(max_size=max_size, ascending=ascending) if self[st] else []
if max_size <= len(list_st):
return list_st
elif max_size <= len(list_st) + 1:
return list_st + [self.id]
else:
curr_size = len(list_st) + 1
list_fi = self[fi].__get_list_by_size(max_size=max_size-curr_size, ascending=ascending) if self[fi] else []
return list_st + [self.id] + list_fi
def __getitem__(self, name):
return getattr(self, name)
def __setitem__(self, name, value):
return setattr(self, name, value)
def __str__(self):
str_e = self.left.__str__() if self.left else None
str_d = self.right.__str__() if self.right else None
if not (str_e or str_d):
return f'[({self.id})]'
return f'[({self.id}) {str_e}, {str_d}]'
| 30.754545 | 119 | 0.544487 | 3,374 | 0.99734 | 0 | 0 | 0 | 0 | 0 | 0 | 154 | 0.045522 |
334de2467a9aad2d75dceb60553a04a3e344347f | 2,895 | py | Python | src/moreos/parsing.py | sigmavirus24/moreos | a32056d89fa519499e704f978db32b737977e2d7 | [
"MIT"
] | 3 | 2020-12-16T16:43:57.000Z | 2021-06-03T10:54:55.000Z | src/moreos/parsing.py | sigmavirus24/moreos | a32056d89fa519499e704f978db32b737977e2d7 | [
"MIT"
] | null | null | null | src/moreos/parsing.py | sigmavirus24/moreos | a32056d89fa519499e704f978db32b737977e2d7 | [
"MIT"
] | null | null | null | """Parsing utilities for moreos."""
import re
import attr
@attr.s(frozen=True)
class ABNF:
"""Container of regular expressions both raw and compiled for parsing."""
# From https://tools.ietf.org/html/rfc2616#section-2.2
ctl = control_characters = "\x7f\x00-\x1f"
digit = "0-9"
separators = r"\[\]\(\)<>@,;:\\\"/?={}\s"
token = f"[^{ctl}{separators}]+"
# RFC1123 date components
wkday = "(?:Mon|Tue|Wed|Thu|Fri|Sat|Sun)"
month = "(?:Jan|Feb|Mar|Apr|May|Jun|Jul|Aug|Sep|Oct|Nov|Dec)"
time = f"[{digit}]{{2}}:[{digit}]{{2}}:[{digit}]{{2}}"
date1 = f"[{digit}]{{1,2}} {month} [{digit}]{{4}}"
# NOTE(sigmavirus24) This allows some nonsense but it's a decent
# high-level check
rfc1123_date = f"{wkday}, {date1} {time} GMT"
# From https://tools.ietf.org/html/rfc1034#section-3.5, enhanced by
# https://tools.ietf.org/html/rfc1123#section-2.1
letter = "A-Za-z"
let_dig = f"{letter}{digit}"
let_dig_hyp = f"{let_dig}-"
ldh_str = f"[{let_dig_hyp}]+"
# This is where the update from rfc1123#section2.1 is relevant
label = f"[{let_dig}](?:(?:{ldh_str})?[{let_dig}])?"
subdomain = f"\\.?(?:{label}\\.)*(?:{label})"
# From https://tools.ietf.org/html/rfc6265#section-3.1
# NOTE: \x5b = [, \x5d = ] so let's escape those directly
cookie_octet = "[\x21\x23-\x2b\\\x2d-\x3a\x3c-\\[\\]-\x7e]"
cookie_value = f'(?:{cookie_octet}*|"{cookie_octet}*")'
cookie_name = token
cookie_pair = f"(?P<name>{cookie_name})=(?P<value>{cookie_value})"
_any_char_except_ctls_or_semicolon = f"[^;{ctl}]+"
extension_av = _any_char_except_ctls_or_semicolon
httponly_av = "(?P<httponly>HttpOnly)"
secure_av = "(?P<secure>Secure)"
path_value = _any_char_except_ctls_or_semicolon
path_av = f"Path=(?P<path>{path_value})"
domain_value = subdomain
domain_av = f"Domain=(?P<domain>{domain_value})"
non_zero_digit = "1-9"
max_age_av = f"Max-Age=(?P<max_age>[{non_zero_digit}][{digit}]*)"
sane_cookie_date = rfc1123_date
expires_av = f"Expires=(?P<expires>{sane_cookie_date})"
samesite_value = "(?:Strict|Lax|None)"
samesite_av = f"SameSite=(?P<samesite>{samesite_value})"
cookie_av = (
f"(?:{expires_av}|{max_age_av}|{domain_av}|{path_av}|"
f"{secure_av}|{httponly_av}|{samesite_av}|{extension_av})"
)
set_cookie_string = f"{cookie_pair}(?:; {cookie_av})*"
# Not specified in either RFC
client_cookie_string = f"(?:({cookie_name})=({cookie_value}))(?:; )?"
# Pre-compiled version of the above abnf
separators_re = re.compile(f"[{separators}]+")
control_characters_re = re.compile(f"[{ctl}]+")
cookie_name_re = token_re = re.compile(token)
cookie_value_re = re.compile(cookie_value)
set_cookie_string_re = re.compile(set_cookie_string)
client_cookie_string_re = re.compile(client_cookie_string)
| 40.208333 | 77 | 0.638342 | 2,812 | 0.97133 | 0 | 0 | 2,833 | 0.978584 | 0 | 0 | 1,681 | 0.580656 |
334e95ed5c389e7fbe37935a81f7aca297e6dd30 | 2,367 | py | Python | tests/factorys.py | ireneontheway5/pymilvus | b812449a98602b4370b3b3430bdeb18b24035e53 | [
"Apache-2.0"
] | null | null | null | tests/factorys.py | ireneontheway5/pymilvus | b812449a98602b4370b3b3430bdeb18b24035e53 | [
"Apache-2.0"
] | null | null | null | tests/factorys.py | ireneontheway5/pymilvus | b812449a98602b4370b3b3430bdeb18b24035e53 | [
"Apache-2.0"
] | null | null | null | # STL imports
import random
import logging
import string
import time
import datetime
import random
import struct
import sys
from functools import wraps
# Third party imports
import numpy as np
import faker
from faker.providers import BaseProvider
logging.getLogger('faker').setLevel(logging.ERROR)
sys.path.append('.')
def gen_vectors(num, dim):
return [[random.random() for _ in range(dim)] for _ in range(num)]
def gen_single_vector(dim):
return [[random.random() for _ in range(dim)]]
def gen_vector(nb, d, seed=np.random.RandomState(1234)):
xb = seed.rand(nb, d).astype("float32")
return xb.tolist()
def gen_unique_str(str=None):
prefix = "".join(random.choice(string.ascii_letters + string.digits) for _ in range(8))
return prefix if str is None else str + "_" + prefix
def get_current_day():
return time.strftime('%Y-%m-%d', time.localtime())
def get_last_day(day):
tmp = datetime.datetime.now() - datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def get_next_day(day):
tmp = datetime.datetime.now() + datetime.timedelta(days=day)
return tmp.strftime('%Y-%m-%d')
def gen_long_str(num):
string = ''
for _ in range(num):
char = random.choice('tomorrow')
string += char
class FakerProvider(BaseProvider):
def collection_name(self):
return 'collection_names' + str(random.randint(1000, 9999))
def name(self):
return 'name' + str(random.randint(1000, 9999))
def dim(self):
return random.randint(0, 999)
fake = faker.Faker()
fake.add_provider(FakerProvider)
def collection_name_factory():
return fake.collection_name()
def records_factory(dimension, nq):
return [[random.random() for _ in range(dimension)] for _ in range(nq)]
def binary_records_factory(dim, nq):
# uint8 values range is [0, 256), so we specify the high range is 256.
xnb = np.random.randint(256, size=[nq, (dim // 8)], dtype="uint8")
xb = [bytes(b) for b in xnb]
return xb
def integer_factory(nq):
return [random.randint(0, 128) for _ in range(nq)]
def time_it(func):
@wraps(func)
def inner(*args, **kwrgs):
pref = time.perf_counter()
result = func(*args, **kwrgs)
delt = time.perf_counter() - pref
print(f"[{func.__name__}][{delt:.4}s]")
return result
return inner
| 22.330189 | 91 | 0.669624 | 269 | 0.113646 | 0 | 0 | 228 | 0.096324 | 0 | 0 | 233 | 0.098437 |
334f49d258643f6f1e499de6346d97b992cad8f0 | 604 | py | Python | tests/test_resources/fixtures/py3_tokens.py | cbillingham/docconvert | 2843f7446546ae90ba3f38e1246e69d208e0f053 | [
"BSD-3-Clause"
] | 8 | 2019-10-07T22:49:20.000Z | 2021-12-30T22:31:28.000Z | tests/test_resources/fixtures/py3_tokens.py | cbillingham/docconvert | 2843f7446546ae90ba3f38e1246e69d208e0f053 | [
"BSD-3-Clause"
] | 5 | 2019-09-17T21:03:38.000Z | 2020-07-23T04:47:21.000Z | tests/test_resources/fixtures/py3_tokens.py | cbillingham/docconvert | 2843f7446546ae90ba3f38e1246e69d208e0f053 | [
"BSD-3-Clause"
] | null | null | null | """Module docstring!"""
a = 1
b = 2
@bleh
@blah
def greet(
name: str,
age: int,
*args,
test='oh yeah',
**kwargs
) -> ({a: 1, b: 2}
):
"""Generic short description
Longer description of this function that does nothing
:param arg1: Desc for arg1
:type arg1: arg1_type
:returns: Desc for return
:rtype: `return_type`
:raises: RaisesError
"""
pass
def greet2(name: str, age: int, *args, test='oh yeah', **kwargs) -> {a: 1, b: 2}: return 1
"""This should not be considered a docstring.
:param arg1: Desc for arg1
:type arg1: arg1_type
""" | 17.764706 | 90 | 0.596026 | 0 | 0 | 0 | 0 | 373 | 0.61755 | 0 | 0 | 374 | 0.619205 |
334fb858f20d8c85bdcd5c7fdc799a281a724b78 | 3,116 | py | Python | src/editor/selection.py | rehmanx/PandaEditor | 125c79605fd46a045201e5ff6a88709764ac104f | [
"MIT"
] | null | null | null | src/editor/selection.py | rehmanx/PandaEditor | 125c79605fd46a045201e5ff6a88709764ac104f | [
"MIT"
] | null | null | null | src/editor/selection.py | rehmanx/PandaEditor | 125c79605fd46a045201e5ff6a88709764ac104f | [
"MIT"
] | null | null | null | import panda3d.core as pm
from editor.p3d.object import Object
from editor.p3d.marquee import Marquee
from editor.p3d.mousePicker import MousePicker
from editor.constants import TAG_IGNORE, TAG_PICKABLE
class Selection(Object):
BBOX_TAG = 'bbox'
def __init__(self, *args, **kwargs):
Object.__init__(self, *args, **kwargs)
self.rootNp.set_python_tag(self.rootNp, TAG_IGNORE)
self.append = False
self.selected_nps = []
# Create a marquee
self.marquee = Marquee('marquee', *args, **kwargs)
# Create node picker - set its collision mask to hit both geom nodes
# and collision nodes
bit_mask = pm.GeomNode.getDefaultCollideMask() | pm.CollisionNode.getDefaultCollideMask()
self.picker = MousePicker('picker', *args, fromCollideMask=bit_mask, **kwargs)
def get_nodepath_under_mouse(self):
"""
Returns the closest node under the mouse, or None if there isn't one.
"""
self.picker.on_update(None)
picked_np = self.picker.GetFirstNodePath()
return picked_np
def set_selected(self, nps, append=False):
if type(nps) is not list:
print("Selection --> set_selected argument must be of type list")
return
if not append:
self.deselect_all()
self.selected_nps = nps
for np in self.selected_nps:
np.showTightBounds()
def deselect(self, nps):
pass
def deselect_all(self):
for np in self.selected_nps:
np.hideBounds()
self.selected_nps.clear()
def start_drag_select(self, append=False):
"""
Start the marquee and put the tool into append mode if specified.
"""
if self.marquee.mouseWatcherNode.hasMouse():
self.append = append
self.marquee.Start()
def stop_drag_select(self):
"""
Stop the marquee and get all the node paths under it with the correct
tag. Also append any node which was under the mouse at the end of the
operation.
"""
self.marquee.Stop()
new_selection = []
if self.append:
for np in self.selected_nps:
new_selection.append(np)
else:
self.deselect_all()
for pick_np in self.rootNp.findAllMatches('**'):
if pick_np is not None:
if self.marquee.IsNodePathInside(pick_np) and pick_np.hasNetPythonTag(TAG_PICKABLE):
np = pick_np.getNetPythonTag("PICKABLE")
if np not in new_selection:
new_selection.append(np)
# Add any node path which was under the mouse to the selection.
np = self.get_nodepath_under_mouse()
if np is not None and np.hasNetPythonTag(TAG_PICKABLE):
np = np.getNetPythonTag("PICKABLE")
if np not in new_selection:
new_selection.append(np)
for np in new_selection:
self.selected_nps.append(np)
return new_selection
def update(self):
pass
| 31.474747 | 100 | 0.614249 | 2,910 | 0.93389 | 0 | 0 | 0 | 0 | 0 | 0 | 647 | 0.207638 |
3350e65017c77b155a623adbb0c445784ce6a443 | 33,168 | py | Python | cinder/tests/unit/policies/test_volume.py | arunvinodqmco/cinder | 62cb72c6890e458427ba0601646b186b7b36dc01 | [
"Apache-2.0"
] | 571 | 2015-01-01T17:47:26.000Z | 2022-03-23T07:46:36.000Z | cinder/tests/unit/policies/test_volume.py | arunvinodqmco/cinder | 62cb72c6890e458427ba0601646b186b7b36dc01 | [
"Apache-2.0"
] | 37 | 2015-01-22T23:27:04.000Z | 2021-02-05T16:38:48.000Z | cinder/tests/unit/policies/test_volume.py | arunvinodqmco/cinder | 62cb72c6890e458427ba0601646b186b7b36dc01 | [
"Apache-2.0"
] | 841 | 2015-01-04T17:17:11.000Z | 2022-03-31T12:06:51.000Z | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from http import HTTPStatus
from unittest import mock
import ddt
from cinder.api.contrib import volume_encryption_metadata
from cinder.api.contrib import volume_tenant_attribute
from cinder.api.v3 import volumes
from cinder import exception
from cinder.policies import volumes as volume_policies
from cinder.tests.unit.api import fakes as fake_api
from cinder.tests.unit import fake_constants
from cinder.tests.unit.policies import base
from cinder.tests.unit.policies import test_base
from cinder.tests.unit import utils as test_utils
from cinder.volume import api as volume_api
# TODO(yikun): The below policy test cases should be added:
# * HOST_ATTRIBUTE_POLICY
# * MIG_ATTRIBUTE_POLICY
class VolumePolicyTests(test_base.CinderPolicyTests):
def test_admin_can_create_volume(self):
admin_context = self.admin_context
path = '/v3/%(project_id)s/volumes' % {
'project_id': admin_context.project_id
}
body = {"volume": {"size": 1}}
response = self._get_request_response(admin_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
def test_nonadmin_user_can_create_volume(self):
user_context = self.user_context
path = '/v3/%(project_id)s/volumes' % {
'project_id': user_context.project_id
}
body = {"volume": {"size": 1}}
response = self._get_request_response(user_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
def test_admin_can_create_volume_from_image(self):
admin_context = self.admin_context
path = '/v3/%(project_id)s/volumes' % {
'project_id': admin_context.project_id
}
body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}}
response = self._get_request_response(admin_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
def test_nonadmin_user_can_create_volume_from_image(self):
user_context = self.user_context
path = '/v3/%(project_id)s/volumes' % {
'project_id': user_context.project_id
}
body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}}
response = self._get_request_response(user_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
@mock.patch.object(volume_api.API, 'get_volume')
def test_admin_can_show_volumes(self, mock_volume):
# Make sure administrators are authorized to list volumes
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
self.assertEqual(response.json_body['volume']['id'], volume.id)
@mock.patch.object(volume_api.API, 'get_volume')
def test_owner_can_show_volumes(self, mock_volume):
# Make sure owners are authorized to list their volumes
user_context = self.user_context
volume = self._create_fake_volume(user_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
self.assertEqual(response.json_body['volume']['id'], volume.id)
@mock.patch.object(volume_api.API, 'get_volume')
def test_owner_cannot_show_volumes_for_others(self, mock_volume):
# Make sure volumes are only exposed to their owners
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(non_owner_context, path, 'GET')
# NOTE(lbragstad): Technically, this user isn't supposed to see this
# volume, because they didn't create it and it lives in a different
# project. Does cinder return a 404 in cases like this? Or is a 403
# expected?
self.assertEqual(HTTPStatus.NOT_FOUND, response.status_int)
def test_admin_can_get_all_volumes_detail(self):
# Make sure administrators are authorized to list volumes
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context)
path = '/v3/%(project_id)s/volumes/detail' % {
'project_id': admin_context.project_id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volumes'][0]
self.assertEqual(volume.id, res_vol['id'])
def test_owner_can_get_all_volumes_detail(self):
# Make sure owners are authorized to list volumes
user_context = self.user_context
volume = self._create_fake_volume(user_context)
path = '/v3/%(project_id)s/volumes/detail' % {
'project_id': user_context.project_id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volumes'][0]
self.assertEqual(volume.id, res_vol['id'])
@mock.patch.object(volume_api.API, 'get')
def test_admin_can_update_volumes(self, mock_volume):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
body = {"volume": {"name": "update_name"}}
response = self._get_request_response(admin_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_can_update_volumes(self, mock_volume):
user_context = self.user_context
volume = self._create_fake_volume(user_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
body = {"volume": {"name": "update_name"}}
response = self._get_request_response(user_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_update_volumes_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
body = {"volume": {"name": "update_name"}}
response = self._get_request_response(non_owner_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_can_delete_volumes(self, mock_volume):
user_context = self.user_context
volume = self._create_fake_volume(user_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(user_context, path, 'DELETE')
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_admin_can_delete_volumes(self, mock_volume):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(admin_context, path, 'DELETE')
self.assertEqual(HTTPStatus.ACCEPTED, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_delete_volumes_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(non_owner_context, path,
'DELETE')
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@mock.patch.object(volume_api.API, 'get_volume')
def test_admin_can_show_tenant_id_in_volume(self, mock_volume):
# Make sure administrators are authorized to show tenant_id
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volume']
self.assertEqual(admin_context.project_id,
res_vol['os-vol-tenant-attr:tenant_id'])
@mock.patch.object(volume_api.API, 'get_volume')
def test_owner_can_show_tenant_id_in_volume(self, mock_volume):
# Make sure owners are authorized to show tenant_id in volume
user_context = self.user_context
volume = self._create_fake_volume(user_context)
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volume']
self.assertEqual(user_context.project_id,
res_vol['os-vol-tenant-attr:tenant_id'])
def test_admin_can_show_tenant_id_in_volume_detail(self):
# Make sure admins are authorized to show tenant_id in volume detail
admin_context = self.admin_context
self._create_fake_volume(admin_context)
path = '/v3/%(project_id)s/volumes/detail' % {
'project_id': admin_context.project_id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volumes'][0]
# Make sure owners are authorized to show tenant_id
self.assertEqual(admin_context.project_id,
res_vol['os-vol-tenant-attr:tenant_id'])
def test_owner_can_show_tenant_id_in_volume_detail(self):
# Make sure owners are authorized to show tenant_id in volume detail
user_context = self.user_context
self._create_fake_volume(user_context)
path = '/v3/%(project_id)s/volumes/detail' % {
'project_id': user_context.project_id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_vol = response.json_body['volumes'][0]
# Make sure owners are authorized to show tenant_id
self.assertEqual(user_context.project_id,
res_vol['os-vol-tenant-attr:tenant_id'])
def test_admin_can_create_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k1": "v1"}}
response = self._get_request_response(admin_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
def test_admin_can_get_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(admin_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v', res_meta['k'])
def test_admin_can_update_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': admin_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k": "v2"}}
response = self._get_request_response(admin_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v2', res_meta['k'])
def test_admin_can_delete_metadata(self):
admin_context = self.admin_context
volume = self._create_fake_volume(admin_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % {
'project_id': admin_context.project_id, 'volume_id': volume.id,
'key': 'k'
}
response = self._get_request_response(admin_context, path, 'DELETE')
self.assertEqual(HTTPStatus.OK, response.status_int)
def test_owner_can_create_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k1": "v1"}}
response = self._get_request_response(user_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
def test_owner_can_get_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(user_context, path, 'GET')
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v', res_meta['k'])
def test_owner_can_update_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': user_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k": "v2"}}
response = self._get_request_response(user_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.OK, response.status_int)
res_meta = response.json_body['metadata']
self.assertIn('k', res_meta)
self.assertEqual('v2', res_meta['k'])
def test_owner_can_delete_metadata(self):
user_context = self.user_context
volume = self._create_fake_volume(user_context, metadata={"k": "v"})
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % {
'project_id': user_context.project_id, 'volume_id': volume.id,
'key': 'k'
}
response = self._get_request_response(user_context, path, 'DELETE')
self.assertEqual(HTTPStatus.OK, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_create_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k1": "v1"}}
response = self._get_request_response(non_owner_context, path, 'POST',
body=body)
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_get_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
response = self._get_request_response(non_owner_context, path, 'GET')
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_update_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata' % {
'project_id': non_owner_context.project_id, 'volume_id': volume.id
}
body = {"metadata": {"k": "v2"}}
response = self._get_request_response(non_owner_context, path, 'PUT',
body=body)
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@mock.patch.object(volume_api.API, 'get')
def test_owner_cannot_delete_metadata_for_others(self, mock_volume):
owner_context = self.user_context
non_owner_context = self.other_user_context
volume = self._create_fake_volume(owner_context, metadata={"k": "v"})
mock_volume.return_value = volume
path = '/v3/%(project_id)s/volumes/%(volume_id)s/metadata/%(key)s' % {
'project_id': non_owner_context.project_id,
'volume_id': volume.id,
'key': 'k'
}
response = self._get_request_response(non_owner_context, path,
'DELETE')
self.assertEqual(HTTPStatus.FORBIDDEN, response.status_int)
@ddt.ddt
class VolumesPolicyTest(base.BasePolicyTest):
authorized_readers = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
]
unauthorized_readers = [
'system_member',
'system_reader',
'system_foo',
'other_project_member',
'other_project_reader',
]
authorized_members = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
]
unauthorized_members = [
'system_member',
'system_reader',
'system_foo',
'other_project_member',
'other_project_reader',
]
create_authorized_users = [
'legacy_admin',
'legacy_owner',
'system_admin',
'project_admin',
'project_member',
'project_reader',
'project_foo',
# The other_* users are allowed because we don't have any check
# mechanism in the code to validate this, these are validated on
# the WSGI layer
'other_project_member',
'other_project_reader',
]
create_unauthorized_users = [
'system_member',
'system_reader',
'system_foo',
]
# Basic policy test is without enforcing scope (which cinder doesn't
# yet support) and deprecated rules enabled.
def setUp(self, enforce_scope=False, enforce_new_defaults=False,
*args, **kwargs):
super().setUp(enforce_scope, enforce_new_defaults, *args, **kwargs)
self.controller = volumes.VolumeController(mock.MagicMock())
self.api_path = '/v3/%s/volumes' % (self.project_id)
def _create_volume(self):
vol_type = test_utils.create_volume_type(self.project_admin_context,
name='fake_vol_type',
testcase_instance=self)
volume = test_utils.create_volume(self.project_member_context,
volume_type_id=vol_type.id,
testcase_instance=self)
return volume
@ddt.data(*base.all_users)
def test_create_volume_policy(self, user_id):
rule_name = volume_policies.CREATE_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url)
req.method = 'POST'
body = {"volume": {"size": 1}}
unauthorized_exceptions = []
self.common_policy_check(user_id, self.create_authorized_users,
self.create_unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.create, req,
body=body)
@ddt.data(*base.all_users)
@mock.patch('cinder.api.v3.volumes.VolumeController._image_uuid_from_ref',
return_value=fake_constants.IMAGE_ID)
@mock.patch('cinder.api.v3.volumes.VolumeController._get_image_snapshot',
return_value=None)
@mock.patch('cinder.volume.flows.api.create_volume.'
'ExtractVolumeRequestTask._get_image_metadata',
return_value=None)
def test_create_volume_from_image_policy(
self, user_id, mock_image_from_ref, mock_image_snap,
mock_img_meta):
rule_name = volume_policies.CREATE_FROM_IMAGE_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url)
req.method = 'POST'
body = {"volume": {"size": 1, "image_id": fake_constants.IMAGE_ID}}
unauthorized_exceptions = []
self.common_policy_check(user_id, self.create_authorized_users,
self.create_unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.create, req,
body=body)
@ddt.data(*base.all_users)
def test_create_multiattach_volume_policy(self, user_id):
vol_type = test_utils.create_volume_type(
self.project_admin_context, name='multiattach_type',
extra_specs={'multiattach': '<is> True'})
rule_name = volume_policies.MULTIATTACH_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url)
req.method = 'POST'
body = {"volume": {"size": 1, "volume_type": vol_type.id}}
# Relax the CREATE_POLICY in order to get past that check.
self.policy.set_rules({volume_policies.CREATE_POLICY: ""},
overwrite=False)
unauthorized_exceptions = []
self.common_policy_check(user_id, self.create_authorized_users,
self.create_unauthorized_users,
unauthorized_exceptions,
rule_name, self.controller.create, req,
body=body)
@ddt.data(*base.all_users)
def test_get_volume_policy(self, user_id):
volume = self._create_volume()
rule_name = volume_policies.GET_POLICY
url = '%s/%s' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url)
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.common_policy_check(user_id,
self.authorized_readers,
self.unauthorized_readers,
unauthorized_exceptions,
rule_name, self.controller.show, req,
id=volume.id)
@ddt.data(*base.all_users)
def test_get_all_volumes_policy(self, user_id):
self._create_volume()
rule_name = volume_policies.GET_ALL_POLICY
url = self.api_path
req = fake_api.HTTPRequest.blank(url)
# Generally, any logged in user can list all volumes.
authorized_users = [user_id]
unauthorized_users = []
# The exception is when deprecated rules are disabled, in which case
# roles are enforced. Users without the 'reader' role should be
# blocked.
if self.enforce_new_defaults:
context = self.create_context(user_id)
if 'reader' not in context.roles:
authorized_users = []
unauthorized_users = [user_id]
response = self.common_policy_check(user_id, authorized_users,
unauthorized_users, [],
rule_name,
self.controller.index, req)
# For some users, even if they're authorized, the list of volumes
# will be empty if they are not in the volume's project.
empty_response_users = [
*self.unauthorized_readers,
# legacy_admin and system_admin do not have a project_id, and
# so the list of volumes returned will be empty.
'legacy_admin',
'system_admin',
]
volumes = response['volumes'] if response else []
volume_count = 0 if user_id in empty_response_users else 1
self.assertEqual(volume_count, len(volumes))
@ddt.data(*base.all_users)
@mock.patch('cinder.db.volume_encryption_metadata_get')
def test_get_volume_encryption_meta_policy(self, user_id,
mock_encrypt_meta):
encryption_key_id = fake_constants.ENCRYPTION_KEY_ID
mock_encrypt_meta.return_value = (
{'encryption_key_id': encryption_key_id})
controller = (
volume_encryption_metadata.VolumeEncryptionMetadataController())
volume = self._create_volume()
rule_name = volume_policies.ENCRYPTION_METADATA_POLICY
url = '%s/%s/encryption' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url)
unauthorized_exceptions = [
exception.VolumeNotFound,
]
resp = self.common_policy_check(
user_id, self.authorized_readers,
self.unauthorized_readers,
unauthorized_exceptions,
rule_name, controller.index, req,
volume.id)
if user_id in self.authorized_readers:
self.assertEqual(encryption_key_id, resp['encryption_key_id'])
@ddt.data(*base.all_users)
def test_get_volume_tenant_attr_policy(self, user_id):
controller = volume_tenant_attribute.VolumeTenantAttributeController()
volume = self._create_volume()
volume = volume.obj_to_primitive()['versioned_object.data']
rule_name = volume_policies.TENANT_ATTRIBUTE_POLICY
url = '%s/%s' % (self.api_path, volume['id'])
req = fake_api.HTTPRequest.blank(url)
req.get_db_volume = mock.MagicMock()
req.get_db_volume.return_value = volume
resp_obj = mock.MagicMock(obj={'volume': volume})
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.assertNotIn('os-vol-tenant-attr:tenant_id', volume.keys())
self.common_policy_check(
user_id, self.authorized_readers,
self.unauthorized_readers,
unauthorized_exceptions,
rule_name, controller.show, req,
resp_obj, volume['id'], fatal=False)
if user_id in self.authorized_readers:
self.assertIn('os-vol-tenant-attr:tenant_id', volume.keys())
@ddt.data(*base.all_users)
def test_update_volume_policy(self, user_id):
volume = self._create_volume()
rule_name = volume_policies.UPDATE_POLICY
url = '%s/%s' % (self.api_path, volume.id)
body = {"volume": {"name": "update_name"}}
req = fake_api.HTTPRequest.blank(url)
req.method = 'PUT'
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.common_policy_check(
user_id, self.authorized_members,
self.unauthorized_members,
unauthorized_exceptions,
rule_name, self.controller.update, req,
id=volume.id, body=body)
@ddt.data(*base.all_users)
def test_delete_volume_policy(self, user_id):
volume = self._create_volume()
rule_name = volume_policies.DELETE_POLICY
url = '%s/%s' % (self.api_path, volume.id)
req = fake_api.HTTPRequest.blank(url)
req.method = 'DELETE'
unauthorized_exceptions = [
exception.VolumeNotFound,
]
self.common_policy_check(
user_id, self.authorized_members,
self.unauthorized_members,
unauthorized_exceptions,
rule_name, self.controller.delete, req,
id=volume.id)
class VolumesPolicySecureRbacTest(VolumesPolicyTest):
create_authorized_users = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
'other_project_member',
]
create_unauthorized_users = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'other_project_reader',
'project_foo',
'project_reader',
]
authorized_readers = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
'project_reader',
]
unauthorized_readers = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'project_foo',
'other_project_member',
'other_project_reader',
]
authorized_members = [
'legacy_admin',
'system_admin',
'project_admin',
'project_member',
]
unauthorized_members = [
'legacy_owner',
'system_member',
'system_reader',
'system_foo',
'project_reader',
'project_foo',
'other_project_member',
'other_project_reader',
]
def setUp(self, *args, **kwargs):
# Test secure RBAC by disabling deprecated policy rules (scope
# is still not enabled).
super().setUp(enforce_scope=False, enforce_new_defaults=True,
*args, **kwargs)
| 40.00965 | 78 | 0.630306 | 31,882 | 0.961228 | 0 | 0 | 21,231 | 0.640105 | 0 | 0 | 7,042 | 0.212313 |
33515a257e86ab2541599ce4633c5188ac0eb93e | 4,393 | py | Python | ddot/api.py | agary-ucsd/ddot | 6f3755843e11bcf308634f188caca7fc9d4e2da3 | [
"MIT"
] | null | null | null | ddot/api.py | agary-ucsd/ddot | 6f3755843e11bcf308634f188caca7fc9d4e2da3 | [
"MIT"
] | null | null | null | ddot/api.py | agary-ucsd/ddot | 6f3755843e11bcf308634f188caca7fc9d4e2da3 | [
"MIT"
] | null | null | null | import sys
import argparse
import bottle
import pandas as pd
from bottle import Bottle, HTTPError, request
from gevent.pywsgi import WSGIServer
from geventwebsocket.handler import WebSocketHandler
from ddot import Ontology
import tempfile
import os
import csv
path_this = os.path.dirname(os.path.abspath(__file__))
os.environ["PATH"] += os.pathsep + os.path.join(path_this, '..')
print(os.environ["PATH"])
from ddot import generate_clixo_file
bottle.BaseRequest.MEMFILE_MAX = 1024 * 1024
api = Bottle()
@api.get('/api/networks')
def get_networks():
return 'get_networks complete'
@api.post('/api/ontology')
def upload_file():
#=================
# default values
#=================
alpha = 0.05
beta = 0.5
try:
data = request.files.get('file')
except Exception as e:
raise HTTPError(500, e)
if data and data.file:
if (request.query.alpha):
alpha = request.query.alpha
if (request.query.beta):
beta = request.query.beta
with tempfile.NamedTemporaryFile('w', delete=False) as f:
f.write(data.file.read())
f_name = f.name
f.close()
try:
clixo_file = generate_clixo_file(f_name, alpha, beta)
return_json = {}
with open(clixo_file, 'r') as tsvfile:
reader = csv.DictReader(filter(lambda row: row[0] != '#', tsvfile), dialect='excel-tab', fieldnames=['a', 'b', 'c', 'd'])
counter = 0
for row in reader:
return_json[counter] = [row.get('a'), row.get('b'), row.get('c'), row.get('d')]
counter += 1
return return_json
except OverflowError as ofe:
print('Error with running clixo')
@api.post('/api/ontology')
def upload_file():
#=================
# default values
#=================
alpha = 0.05
beta = 0.5
try:
data = request.files.get('file')
except Exception as e:
raise HTTPError(500, e)
if data and data.file:
if (request.query.alpha):
alpha = request.query.alpha
if (request.query.beta):
beta = request.query.beta
with tempfile.NamedTemporaryFile('w', delete=False) as f:
f.write(data.file.read())
f_name = f.name
f.close()
try:
clixo_file = generate_clixo_file(f_name, alpha, beta)
with open(clixo_file, 'r') as f_saved:
df = pd.read_csv(f_saved, sep='\t', engine='python', header=None, comment='#')
print(df.columns)
ont1 = Ontology.from_table(df, clixo_format=True, parent=0, child=1)
ont_url, G = ont1.to_ndex(name='MODY',
ndex_server='http://test.ndexbio.org',
ndex_pass='scratch2',
ndex_user='scratch2',
layout='bubble-collect',
visibility='PUBLIC')
if ont_url is not None and len(ont_url) > 0 and 'http' in ont_url:
uuid = ont_url.split('/')[-1]
return 'File has been processed. UUID:: %s \n' % uuid
else:
return 'File has been processed. UUID: %s \n' % ont_url
print('File has been processed: %s' % ont_url)
except OverflowError as ofe:
print('Error with running clixo')
else:
raise HTTPError(422, '**** FILE IS MISSING ****')
return "Unable to complete process. See stack message above."
# run the web server
def main():
status = 0
parser = argparse.ArgumentParser()
parser.add_argument('port', nargs='?', type=int, help='HTTP port', default=8383)
args = parser.parse_args()
print 'starting web server on port %s' % args.port
print 'press control-c to quit'
try:
server = WSGIServer(('0.0.0.0', args.port), api, handler_class=WebSocketHandler)
server.serve_forever()
except KeyboardInterrupt:
print('exiting main loop')
except Exception as e:
exit_str = 'could not start web server: %s' % e
print(exit_str)
status = 1
print('exiting with status %d', status)
return status
if __name__ == '__main__':
sys.exit(main())
| 28.712418 | 137 | 0.556795 | 0 | 0 | 0 | 0 | 3,149 | 0.716822 | 0 | 0 | 782 | 0.17801 |
335166caa2fb8bf7ef55ff8016ed6d28cfe4b088 | 4,302 | py | Python | testing/logging/test_formatter.py | christian-steinmeyer/pytest | 5cc295e74b81ca7a106e5a096834043738f14dc5 | [
"MIT"
] | 4 | 2018-03-10T16:59:59.000Z | 2019-12-17T09:16:09.000Z | testing/logging/test_formatter.py | christian-steinmeyer/pytest | 5cc295e74b81ca7a106e5a096834043738f14dc5 | [
"MIT"
] | 71 | 2015-10-28T08:10:14.000Z | 2021-12-06T03:02:07.000Z | testing/logging/test_formatter.py | christian-steinmeyer/pytest | 5cc295e74b81ca7a106e5a096834043738f14dc5 | [
"MIT"
] | 2 | 2020-08-01T22:09:38.000Z | 2020-10-13T08:17:24.000Z | import logging
from typing import Any
from _pytest._io import TerminalWriter
from _pytest.logging import ColoredLevelFormatter
def test_coloredlogformatter() -> None:
logfmt = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s"
record = logging.LogRecord(
name="dummy",
level=logging.INFO,
pathname="dummypath",
lineno=10,
msg="Test Message",
args=(),
exc_info=None,
)
class ColorConfig:
class option:
pass
tw = TerminalWriter()
tw.hasmarkup = True
formatter = ColoredLevelFormatter(tw, logfmt)
output = formatter.format(record)
assert output == (
"dummypath 10 \x1b[32mINFO \x1b[0m Test Message"
)
tw.hasmarkup = False
formatter = ColoredLevelFormatter(tw, logfmt)
output = formatter.format(record)
assert output == ("dummypath 10 INFO Test Message")
def test_multiline_message() -> None:
from _pytest.logging import PercentStyleMultiline
logfmt = "%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s"
record: Any = logging.LogRecord(
name="dummy",
level=logging.INFO,
pathname="dummypath",
lineno=10,
msg="Test Message line1\nline2",
args=(),
exc_info=None,
)
# this is called by logging.Formatter.format
record.message = record.getMessage()
ai_on_style = PercentStyleMultiline(logfmt, True)
output = ai_on_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\n"
" line2"
)
ai_off_style = PercentStyleMultiline(logfmt, False)
output = ai_off_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\nline2"
)
ai_none_style = PercentStyleMultiline(logfmt, None)
output = ai_none_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\nline2"
)
record.auto_indent = False
output = ai_on_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\nline2"
)
record.auto_indent = True
output = ai_off_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\n"
" line2"
)
record.auto_indent = "False"
output = ai_on_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\nline2"
)
record.auto_indent = "True"
output = ai_off_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\n"
" line2"
)
# bad string values default to False
record.auto_indent = "junk"
output = ai_off_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\nline2"
)
# anything other than string or int will default to False
record.auto_indent = dict()
output = ai_off_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\nline2"
)
record.auto_indent = "5"
output = ai_off_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\n line2"
)
record.auto_indent = 5
output = ai_off_style.format(record)
assert output == (
"dummypath 10 INFO Test Message line1\n line2"
)
def test_colored_short_level() -> None:
logfmt = "%(levelname).1s %(message)s"
record = logging.LogRecord(
name="dummy",
level=logging.INFO,
pathname="dummypath",
lineno=10,
msg="Test Message",
args=(),
exc_info=None,
)
class ColorConfig:
class option:
pass
tw = TerminalWriter()
tw.hasmarkup = True
formatter = ColoredLevelFormatter(tw, logfmt)
output = formatter.format(record)
# the I (of INFO) is colored
assert output == ("\x1b[32mI\x1b[0m Test Message")
| 28.490066 | 80 | 0.57113 | 114 | 0.026499 | 0 | 0 | 0 | 0 | 0 | 0 | 1,466 | 0.340772 |
3351e52b5194a9276589bc9c2f3638bd2c933566 | 12,356 | py | Python | crystaltoolgui/tabs/tabresmatcher.py | jingshenSN2/CrystalTool | 18f07963ff5f2a54ac2c93e2fa59fada51346232 | [
"MIT"
] | null | null | null | crystaltoolgui/tabs/tabresmatcher.py | jingshenSN2/CrystalTool | 18f07963ff5f2a54ac2c93e2fa59fada51346232 | [
"MIT"
] | null | null | null | crystaltoolgui/tabs/tabresmatcher.py | jingshenSN2/CrystalTool | 18f07963ff5f2a54ac2c93e2fa59fada51346232 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'tabresmatcher.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_tabresmatcher(object):
def setupUi(self, tabresmatcher):
tabresmatcher.setObjectName("tabresmatcher")
tabresmatcher.resize(697, 570)
self.horizontalLayout = QtWidgets.QHBoxLayout(tabresmatcher)
self.horizontalLayout.setObjectName("horizontalLayout")
self.vL_match_1 = QtWidgets.QVBoxLayout()
self.vL_match_1.setObjectName("vL_match_1")
self.l_match_res = QtWidgets.QLabel(tabresmatcher)
self.l_match_res.setObjectName("l_match_res")
self.vL_match_1.addWidget(self.l_match_res)
self.lV_match_res = QtWidgets.QListView(tabresmatcher)
self.lV_match_res.setSelectionMode(QtWidgets.QAbstractItemView.ExtendedSelection)
self.lV_match_res.setObjectName("lV_match_res")
self.vL_match_1.addWidget(self.lV_match_res)
self.hL_match_pB1 = QtWidgets.QHBoxLayout()
self.hL_match_pB1.setObjectName("hL_match_pB1")
self.pB_match_choose_res = QtWidgets.QPushButton(tabresmatcher)
self.pB_match_choose_res.setObjectName("pB_match_choose_res")
self.hL_match_pB1.addWidget(self.pB_match_choose_res)
self.l_solve_count = QtWidgets.QLabel(tabresmatcher)
self.l_solve_count.setObjectName("l_solve_count")
self.hL_match_pB1.addWidget(self.l_solve_count)
self.vL_match_1.addLayout(self.hL_match_pB1)
self.horizontalLayout.addLayout(self.vL_match_1)
self.line = QtWidgets.QFrame(tabresmatcher)
self.line.setFrameShape(QtWidgets.QFrame.VLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.horizontalLayout.addWidget(self.line)
self.vL_match_2 = QtWidgets.QVBoxLayout()
self.vL_match_2.setContentsMargins(-1, 25, -1, -1)
self.vL_match_2.setObjectName("vL_match_2")
self.hL_match_pdb = QtWidgets.QHBoxLayout()
self.hL_match_pdb.setObjectName("hL_match_pdb")
self.pB_pdb = QtWidgets.QPushButton(tabresmatcher)
self.pB_pdb.setObjectName("pB_pdb")
self.hL_match_pdb.addWidget(self.pB_pdb)
self.l_pdb = QtWidgets.QLabel(tabresmatcher)
self.l_pdb.setObjectName("l_pdb")
self.hL_match_pdb.addWidget(self.l_pdb)
self.vL_match_2.addLayout(self.hL_match_pdb)
self.fL_match_1 = QtWidgets.QFormLayout()
self.fL_match_1.setObjectName("fL_match_1")
self.l_old_algorithm = QtWidgets.QLabel(tabresmatcher)
self.l_old_algorithm.setObjectName("l_old_algorithm")
self.fL_match_1.setWidget(0, QtWidgets.QFormLayout.LabelRole, self.l_old_algorithm)
self.l_loss_atom = QtWidgets.QLabel(tabresmatcher)
self.l_loss_atom.setObjectName("l_loss_atom")
self.fL_match_1.setWidget(1, QtWidgets.QFormLayout.LabelRole, self.l_loss_atom)
self.sB_loss_atom = QtWidgets.QSpinBox(tabresmatcher)
self.sB_loss_atom.setObjectName("sB_loss_atom")
self.fL_match_1.setWidget(1, QtWidgets.QFormLayout.FieldRole, self.sB_loss_atom)
self.l_threshold = QtWidgets.QLabel(tabresmatcher)
self.l_threshold.setObjectName("l_threshold")
self.fL_match_1.setWidget(2, QtWidgets.QFormLayout.LabelRole, self.l_threshold)
self.cB_threshold = QtWidgets.QComboBox(tabresmatcher)
self.cB_threshold.setObjectName("cB_threshold")
self.cB_threshold.addItem("")
self.cB_threshold.addItem("")
self.cB_threshold.addItem("")
self.cB_threshold.addItem("")
self.cB_threshold.addItem("")
self.cB_threshold.addItem("")
self.cB_threshold.addItem("")
self.fL_match_1.setWidget(2, QtWidgets.QFormLayout.FieldRole, self.cB_threshold)
self.l_dBS_threshold = QtWidgets.QLabel(tabresmatcher)
self.l_dBS_threshold.setObjectName("l_dBS_threshold")
self.fL_match_1.setWidget(3, QtWidgets.QFormLayout.LabelRole, self.l_dBS_threshold)
self.dSB_threshold = QtWidgets.QDoubleSpinBox(tabresmatcher)
self.dSB_threshold.setObjectName("dSB_threshold")
self.fL_match_1.setWidget(3, QtWidgets.QFormLayout.FieldRole, self.dSB_threshold)
self.rB_old_algorithm = QtWidgets.QRadioButton(tabresmatcher)
self.rB_old_algorithm.setText("")
self.rB_old_algorithm.setObjectName("rB_old_algorithm")
self.fL_match_1.setWidget(0, QtWidgets.QFormLayout.FieldRole, self.rB_old_algorithm)
self.vL_match_2.addLayout(self.fL_match_1)
self.hL_match_thickness = QtWidgets.QHBoxLayout()
self.hL_match_thickness.setObjectName("hL_match_thickness")
self.l_match_thick = QtWidgets.QLabel(tabresmatcher)
self.l_match_thick.setObjectName("l_match_thick")
self.hL_match_thickness.addWidget(self.l_match_thick)
self.cB_thick_x = QtWidgets.QCheckBox(tabresmatcher)
self.cB_thick_x.setObjectName("cB_thick_x")
self.hL_match_thickness.addWidget(self.cB_thick_x)
self.cB_thick_y = QtWidgets.QCheckBox(tabresmatcher)
self.cB_thick_y.setObjectName("cB_thick_y")
self.hL_match_thickness.addWidget(self.cB_thick_y)
self.cB_thick_z = QtWidgets.QCheckBox(tabresmatcher)
self.cB_thick_z.setObjectName("cB_thick_z")
self.hL_match_thickness.addWidget(self.cB_thick_z)
self.vL_match_2.addLayout(self.hL_match_thickness)
self.gL_match_output = QtWidgets.QGridLayout()
self.gL_match_output.setObjectName("gL_match_output")
self.cB_Ra = QtWidgets.QCheckBox(tabresmatcher)
self.cB_Ra.setChecked(True)
self.cB_Ra.setObjectName("cB_Ra")
self.gL_match_output.addWidget(self.cB_Ra, 4, 1, 1, 1)
self.cB_Rb = QtWidgets.QCheckBox(tabresmatcher)
self.cB_Rb.setChecked(True)
self.cB_Rb.setObjectName("cB_Rb")
self.gL_match_output.addWidget(self.cB_Rb, 4, 2, 1, 1)
self.l_match_output = QtWidgets.QLabel(tabresmatcher)
self.l_match_output.setObjectName("l_match_output")
self.gL_match_output.addWidget(self.l_match_output, 1, 0, 1, 1)
self.cB_Nm = QtWidgets.QCheckBox(tabresmatcher)
self.cB_Nm.setChecked(True)
self.cB_Nm.setObjectName("cB_Nm")
self.gL_match_output.addWidget(self.cB_Nm, 1, 2, 1, 1)
self.cB_Tm = QtWidgets.QCheckBox(tabresmatcher)
self.cB_Tm.setChecked(True)
self.cB_Tm.setObjectName("cB_Tm")
self.gL_match_output.addWidget(self.cB_Tm, 1, 1, 1, 1)
self.cB_R1 = QtWidgets.QCheckBox(tabresmatcher)
self.cB_R1.setChecked(True)
self.cB_R1.setObjectName("cB_R1")
self.gL_match_output.addWidget(self.cB_R1, 5, 1, 1, 1)
self.cB_Alpha = QtWidgets.QCheckBox(tabresmatcher)
self.cB_Alpha.setChecked(True)
self.cB_Alpha.setObjectName("cB_Alpha")
self.gL_match_output.addWidget(self.cB_Alpha, 5, 3, 1, 1)
self.cB_Rweak = QtWidgets.QCheckBox(tabresmatcher)
self.cB_Rweak.setChecked(True)
self.cB_Rweak.setObjectName("cB_Rweak")
self.gL_match_output.addWidget(self.cB_Rweak, 5, 2, 1, 1)
self.cB_Rw = QtWidgets.QCheckBox(tabresmatcher)
self.cB_Rw.setChecked(True)
self.cB_Rw.setObjectName("cB_Rw")
self.gL_match_output.addWidget(self.cB_Rw, 1, 3, 1, 1)
self.cB_Rc = QtWidgets.QCheckBox(tabresmatcher)
self.cB_Rc.setChecked(True)
self.cB_Rc.setObjectName("cB_Rc")
self.gL_match_output.addWidget(self.cB_Rc, 4, 3, 1, 1)
self.vL_match_2.addLayout(self.gL_match_output)
self.hL_match_sort = QtWidgets.QHBoxLayout()
self.hL_match_sort.setObjectName("hL_match_sort")
self.l_match_sort = QtWidgets.QLabel(tabresmatcher)
self.l_match_sort.setObjectName("l_match_sort")
self.hL_match_sort.addWidget(self.l_match_sort)
self.lE_match_sort = QtWidgets.QLineEdit(tabresmatcher)
self.lE_match_sort.setInputMask("")
self.lE_match_sort.setMaxLength(32767)
self.lE_match_sort.setObjectName("lE_match_sort")
self.hL_match_sort.addWidget(self.lE_match_sort)
self.vL_match_2.addLayout(self.hL_match_sort)
spacerItem = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Expanding)
self.vL_match_2.addItem(spacerItem)
self.hL_match_start = QtWidgets.QHBoxLayout()
self.hL_match_start.setObjectName("hL_match_start")
self.pB_match_start = QtWidgets.QPushButton(tabresmatcher)
self.pB_match_start.setObjectName("pB_match_start")
self.hL_match_start.addWidget(self.pB_match_start)
self.l_match_start = QtWidgets.QLabel(tabresmatcher)
self.l_match_start.setObjectName("l_match_start")
self.hL_match_start.addWidget(self.l_match_start)
self.vL_match_2.addLayout(self.hL_match_start)
self.bar_match = QtWidgets.QProgressBar(tabresmatcher)
self.bar_match.setProperty("value", 0)
self.bar_match.setAlignment(QtCore.Qt.AlignLeading|QtCore.Qt.AlignLeft|QtCore.Qt.AlignVCenter)
self.bar_match.setObjectName("bar_match")
self.vL_match_2.addWidget(self.bar_match)
self.horizontalLayout.addLayout(self.vL_match_2)
self.retranslateUi(tabresmatcher)
self.cB_threshold.setCurrentIndex(0)
QtCore.QMetaObject.connectSlotsByName(tabresmatcher)
def retranslateUi(self, tabresmatcher):
_translate = QtCore.QCoreApplication.translate
tabresmatcher.setWindowTitle(_translate("tabresmatcher", "Form"))
self.l_match_res.setText(_translate("tabresmatcher", "RES文件"))
self.pB_match_choose_res.setText(_translate("tabresmatcher", "选择RES文件"))
self.l_solve_count.setText(_translate("tabresmatcher", "已选0个"))
self.pB_pdb.setText(_translate("tabresmatcher", "选择待搜索结构(pdb)"))
self.l_pdb.setText(_translate("tabresmatcher", "未选择"))
self.l_old_algorithm.setText(_translate("tabresmatcher", "使用旧算法"))
self.l_loss_atom.setText(_translate("tabresmatcher", "可损失原子数"))
self.l_threshold.setText(_translate("tabresmatcher", "汇报阈值基于"))
self.cB_threshold.setCurrentText(_translate("tabresmatcher", "无"))
self.cB_threshold.setItemText(0, _translate("tabresmatcher", "无"))
self.cB_threshold.setItemText(1, _translate("tabresmatcher", "Tm(匹配上次数)"))
self.cB_threshold.setItemText(2, _translate("tabresmatcher", "Nm(匹配上原子数)"))
self.cB_threshold.setItemText(3, _translate("tabresmatcher", "Rwm(质量加权匹配比例)"))
self.cB_threshold.setItemText(4, _translate("tabresmatcher", "Rwe2(电子加权匹配比例)"))
self.cB_threshold.setItemText(5, _translate("tabresmatcher", "Ram(元素匹配相似度)"))
self.cB_threshold.setItemText(6, _translate("tabresmatcher", "Rc(坐标匹配相似度)"))
self.l_dBS_threshold.setText(_translate("tabresmatcher", "汇报阈值"))
self.l_match_thick.setText(_translate("tabresmatcher", "晶胞加层"))
self.cB_thick_x.setText(_translate("tabresmatcher", "x"))
self.cB_thick_y.setText(_translate("tabresmatcher", "y"))
self.cB_thick_z.setText(_translate("tabresmatcher", "z"))
self.cB_Ra.setText(_translate("tabresmatcher", "Ra"))
self.cB_Rb.setText(_translate("tabresmatcher", "Rb"))
self.l_match_output.setText(_translate("tabresmatcher", "输出指标"))
self.cB_Nm.setText(_translate("tabresmatcher", "Nm"))
self.cB_Tm.setText(_translate("tabresmatcher", "Tm"))
self.cB_R1.setText(_translate("tabresmatcher", "R1"))
self.cB_Alpha.setText(_translate("tabresmatcher", "Alpha"))
self.cB_Rweak.setText(_translate("tabresmatcher", "Rweak"))
self.cB_Rw.setText(_translate("tabresmatcher", "Rw"))
self.cB_Rc.setText(_translate("tabresmatcher", "Rc"))
self.l_match_sort.setText(_translate("tabresmatcher", "排序规则"))
self.lE_match_sort.setText(_translate("tabresmatcher", "-Tm,-Nm"))
self.pB_match_start.setText(_translate("tabresmatcher", "开始匹配"))
self.l_match_start.setText(_translate("tabresmatcher", "未开始匹配"))
| 56.420091 | 114 | 0.717627 | 12,222 | 0.972779 | 0 | 0 | 0 | 0 | 0 | 0 | 1,890 | 0.15043 |
3353952224984b77381aeb9e737b1658da3774db | 727 | py | Python | src/bioregistry/export/cli.py | biopragmatics/bioregistry | 1c994b97f5bfe7151e01edb8e6a26edbaa30f33c | [
"MIT"
] | 17 | 2021-09-14T17:58:16.000Z | 2022-03-11T06:22:11.000Z | src/bioregistry/export/cli.py | biopragmatics/bioregistry | 1c994b97f5bfe7151e01edb8e6a26edbaa30f33c | [
"MIT"
] | 148 | 2021-09-18T08:52:46.000Z | 2022-03-23T15:57:35.000Z | src/bioregistry/export/cli.py | biopragmatics/bioregistry | 1c994b97f5bfe7151e01edb8e6a26edbaa30f33c | [
"MIT"
] | 9 | 2021-09-28T20:18:53.000Z | 2022-03-14T17:23:35.000Z | # -*- coding: utf-8 -*-
"""Export the Bioregistry."""
import click
@click.command()
@click.pass_context
def export(ctx: click.Context):
"""Export the Bioregistry."""
from .prefix_maps import generate_contexts
from .rdf_export import export_rdf
from .sssom_export import export_sssom
from .tables_export import export_tables
from .tsv_export import export_tsv
from .warnings_export import export_warnings
from .yaml_export import export_yaml
ctx.invoke(export_warnings)
ctx.invoke(export_rdf)
ctx.invoke(export_tsv)
ctx.invoke(export_yaml)
ctx.invoke(export_sssom)
ctx.invoke(export_tables)
ctx.invoke(generate_contexts)
if __name__ == "__main__":
export()
| 23.451613 | 48 | 0.727648 | 0 | 0 | 0 | 0 | 613 | 0.843191 | 0 | 0 | 91 | 0.125172 |
335499578c54e600ef5b6cd7a2e1c167305703ec | 1,327 | py | Python | tests/test_main.py | brettcannon/release-often | f28034d5525faf1953927d321d162e42943361f6 | [
"MIT"
] | 8 | 2020-04-06T00:32:45.000Z | 2020-06-09T22:45:02.000Z | tests/test_main.py | brettcannon/release-often | f28034d5525faf1953927d321d162e42943361f6 | [
"MIT"
] | 1 | 2020-05-14T18:32:20.000Z | 2020-05-14T18:32:20.000Z | tests/test_main.py | brettcannon/release-often | f28034d5525faf1953927d321d162e42943361f6 | [
"MIT"
] | null | null | null | import json
from unittest import mock
import gidgethub.abc
import pytest
from release_often import __main__ as main
class TestMatchingPR:
@pytest.mark.asyncio
async def test_pr_found(self, data_path):
"""Test when a PR number is specified in the initial commit's message."""
gh_mock = mock.AsyncMock(gidgethub.abc.GitHubAPI)
push_data = data_path / "push.json"
push_event = json.loads(push_data.read_text(encoding="utf-8"))
pr_data = data_path / "PR.json"
pr_event = json.loads(pr_data.read_text(encoding="utf-8"))
gh_mock.getitem.return_value = pr_event
result = await main.matching_pr(gh_mock, push_event)
assert result == pr_event
gh_mock.getitem.assert_called_with(
push_event["repository"]["pulls_url"], {"number": "108"}
)
@pytest.mark.asyncio
async def test_no_pr_number(self, data_path):
"""Test when no PR number is specified in the initial commit message."""
gh_mock = mock.AsyncMock(gidgethub.abc.GitHubAPI)
push_data = data_path / "push.json"
push_event = json.loads(push_data.read_text(encoding="utf-8"))
push_event["commits"][0]["message"] = "No PR to see here!"
result = await main.matching_pr(gh_mock, push_event)
assert not result
| 35.864865 | 81 | 0.6737 | 1,206 | 0.908817 | 0 | 0 | 1,174 | 0.884702 | 1,124 | 0.847023 | 271 | 0.20422 |
335622d55b7a6b99e8e7667af2ca6f6644e1aaee | 23,330 | py | Python | src/ezdxf/path/tools.py | dmtvanzanten/ezdxf | 6fe9d0aa961e011c87768aa6511256de21a662dd | [
"MIT"
] | null | null | null | src/ezdxf/path/tools.py | dmtvanzanten/ezdxf | 6fe9d0aa961e011c87768aa6511256de21a662dd | [
"MIT"
] | null | null | null | src/ezdxf/path/tools.py | dmtvanzanten/ezdxf | 6fe9d0aa961e011c87768aa6511256de21a662dd | [
"MIT"
] | null | null | null | # Copyright (c) 2020-2021, Manfred Moitzi
# License: MIT License
from typing import (
TYPE_CHECKING,
List,
Iterable,
Tuple,
Optional,
Dict,
Sequence,
)
import math
import itertools
from ezdxf.math import (
Vec3,
Z_AXIS,
OCS,
Matrix44,
BoundingBox,
ConstructionEllipse,
cubic_bezier_from_ellipse,
Bezier4P,
Bezier3P,
BSpline,
reverse_bezier_curves,
bulge_to_arc,
)
from ezdxf.query import EntityQuery
from .path import Path
from .commands import Command
from . import converter
if TYPE_CHECKING:
from ezdxf.eztypes import Vertex, Layout, EntityQuery
__all__ = [
"bbox",
"fit_paths_into_box",
"transform_paths",
"transform_paths_to_ocs",
"render_lwpolylines",
"render_polylines2d",
"render_polylines3d",
"render_lines",
"render_hatches",
"render_mpolygons",
"render_splines_and_polylines",
"add_bezier4p",
"add_bezier3p",
"add_ellipse",
"add_2d_polyline",
"add_spline",
"to_multi_path",
"single_paths",
]
MAX_DISTANCE = 0.01
MIN_SEGMENTS = 4
G1_TOL = 1e-4
IS_CLOSE_TOL = 1e-10
def to_multi_path(paths: Iterable[Path]) -> Path:
"""Returns a multi-path object from all given paths and their sub-paths.
.. versionadded:: 0.17
"""
multi_path = Path()
for p in paths:
multi_path.extend_multi_path(p)
return multi_path
def single_paths(paths: Iterable[Path]) -> Iterable[Path]:
"""Yields all given paths and their sub-paths as single path objects.
.. versionadded:: 0.17
"""
for p in paths:
if p.has_sub_paths:
yield from p.sub_paths()
else:
yield p
def transform_paths(paths: Iterable[Path], m: Matrix44) -> List[Path]:
"""Transform multiple :class:`Path` objects at once by transformation
matrix `m`. Returns a list of the transformed :class:`Path` objects.
Args:
paths: iterable of :class:`Path` objects
m: transformation matrix of type :class:`~ezdxf.math.Matrix44`
"""
def decompose(path: Path):
vertices.append(path.start)
commands.append(Command.START_PATH)
for cmd in path:
commands.extend(itertools.repeat(cmd.type, len(cmd)))
vertices.extend(cmd)
def rebuild(vertices):
# localize variables:
start_path, line_to, curve3_to, curve4_to, move_to = Command
path = None
collect = []
for vertex, cmd in zip(vertices, commands):
if cmd == start_path:
if path is not None:
transformed_paths.append(path)
path = Path(vertex)
elif cmd == line_to:
path.line_to(vertex)
elif cmd == curve3_to:
collect.append(vertex)
if len(collect) == 2:
path.curve3_to(collect[0], collect[1])
collect.clear()
elif cmd == curve4_to:
collect.append(vertex)
if len(collect) == 3:
path.curve4_to(collect[0], collect[1], collect[2])
collect.clear()
elif cmd == move_to:
path.move_to(vertex)
if path is not None:
transformed_paths.append(path)
vertices = []
commands = []
transformed_paths = []
for path in paths:
decompose(path)
if len(commands):
rebuild(m.transform_vertices(vertices))
return transformed_paths
def transform_paths_to_ocs(paths: Iterable[Path], ocs: OCS) -> List[Path]:
"""Transform multiple :class:`Path` objects at once from WCS to OCS.
Returns a list of the transformed :class:`Path` objects.
Args:
paths: iterable of :class:`Path` objects
ocs: OCS transformation of type :class:`~ezdxf.math.OCS`
"""
t = ocs.matrix.copy()
t.transpose()
return transform_paths(paths, t)
def bbox(
paths: Iterable[Path], flatten=0.01, segments: int = 16
) -> BoundingBox:
"""Returns the :class:`~ezdxf.math.BoundingBox` for the given paths.
Args:
paths: iterable of :class:`~ezdxf.path.Path` objects
flatten: value != 0 for bounding box calculation from the flattened
path and value == 0 for bounding box from the control vertices.
Default value is 0.01 as max flattening distance.
segments: minimal segment count for flattening
"""
box = BoundingBox()
for p in paths:
if flatten:
box.extend(p.flattening(distance=abs(flatten), segments=segments))
else:
box.extend(p.control_vertices())
return box
def fit_paths_into_box(
paths: Iterable[Path],
size: Tuple[float, float, float],
uniform: bool = True,
source_box: BoundingBox = None,
) -> List[Path]:
"""Scale the given `paths` to fit into a box of the given `size`,
so that all path vertices are inside this borders.
If `source_box` is ``None`` the default source bounding box is calculated
from the control points of the `paths`.
`Note:` if the target size has a z-size of 0, the `paths` are
projected into the xy-plane, same is true for the x-size, projects into
the yz-plane and the y-size, projects into and xz-plane.
Args:
paths: iterable of :class:`~ezdxf.path.Path` objects
size: target box size as tuple of x-, y- ond z-size values
uniform: ``True`` for uniform scaling
source_box: pass precalculated source bounding box, or ``None`` to
calculate the default source bounding box from the control vertices
"""
paths = list(paths)
if len(paths) == 0:
return paths
if source_box is None:
current_box = bbox(paths, flatten=0)
else:
current_box = source_box
if not current_box.has_data or current_box.size == (0, 0, 0):
return paths
target_size = Vec3(size)
if target_size == (0, 0, 0) or min(target_size) < 0:
raise ValueError("invalid target size")
if uniform:
sx, sy, sz = _get_uniform_scaling(current_box.size, target_size)
else:
sx, sy, sz = _get_non_uniform_scaling(current_box.size, target_size)
m = Matrix44.scale(sx, sy, sz)
return transform_paths(paths, m)
def _get_uniform_scaling(current_size: Vec3, target_size: Vec3):
TOL = 1e-6
scale_x = math.inf
if current_size.x > TOL and target_size.x > TOL:
scale_x = target_size.x / current_size.x
scale_y = math.inf
if current_size.y > TOL and target_size.y > TOL:
scale_y = target_size.y / current_size.y
scale_z = math.inf
if current_size.z > TOL and target_size.z > TOL:
scale_z = target_size.z / current_size.z
uniform_scale = min(scale_x, scale_y, scale_z)
if uniform_scale is math.inf:
raise ArithmeticError("internal error")
scale_x = uniform_scale if target_size.x > TOL else 0
scale_y = uniform_scale if target_size.y > TOL else 0
scale_z = uniform_scale if target_size.z > TOL else 0
return scale_x, scale_y, scale_z
def _get_non_uniform_scaling(current_size: Vec3, target_size: Vec3):
TOL = 1e-6
scale_x = 1.0
if current_size.x > TOL:
scale_x = target_size.x / current_size.x
scale_y = 1.0
if current_size.y > TOL:
scale_y = target_size.y / current_size.y
scale_z = 1.0
if current_size.z > TOL:
scale_z = target_size.z / current_size.z
return scale_x, scale_y, scale_z
# Path to entity converter and render utilities:
def render_lwpolylines(
layout: "Layout",
paths: Iterable[Path],
*,
distance: float = MAX_DISTANCE,
segments: int = MIN_SEGMENTS,
extrusion: "Vertex" = Z_AXIS,
dxfattribs: Optional[Dict] = None
) -> EntityQuery:
"""Render the given `paths` into `layout` as
:class:`~ezdxf.entities.LWPolyline` entities.
The `extrusion` vector is applied to all paths, all vertices are projected
onto the plane normal to this extrusion vector. The default extrusion vector
is the WCS z-axis. The plane elevation is the distance from the WCS origin
to the start point of the first path.
Args:
layout: the modelspace, a paperspace layout or a block definition
paths: iterable of :class:`Path` objects
distance: maximum distance, see :meth:`Path.flattening`
segments: minimum segment count per Bézier curve
extrusion: extrusion vector for all paths
dxfattribs: additional DXF attribs
Returns:
created entities in an :class:`~ezdxf.query.EntityQuery` object
.. versionadded:: 0.16
"""
lwpolylines = list(
converter.to_lwpolylines(
paths,
distance=distance,
segments=segments,
extrusion=extrusion,
dxfattribs=dxfattribs,
)
)
for lwpolyline in lwpolylines:
layout.add_entity(lwpolyline)
return EntityQuery(lwpolylines)
def render_polylines2d(
layout: "Layout",
paths: Iterable[Path],
*,
distance: float = 0.01,
segments: int = 4,
extrusion: "Vertex" = Z_AXIS,
dxfattribs: Optional[Dict] = None
) -> EntityQuery:
"""Render the given `paths` into `layout` as 2D
:class:`~ezdxf.entities.Polyline` entities.
The `extrusion` vector is applied to all paths, all vertices are projected
onto the plane normal to this extrusion vector.The default extrusion vector
is the WCS z-axis. The plane elevation is the distance from the WCS origin
to the start point of the first path.
Args:
layout: the modelspace, a paperspace layout or a block definition
paths: iterable of :class:`Path` objects
distance: maximum distance, see :meth:`Path.flattening`
segments: minimum segment count per Bézier curve
extrusion: extrusion vector for all paths
dxfattribs: additional DXF attribs
Returns:
created entities in an :class:`~ezdxf.query.EntityQuery` object
.. versionadded:: 0.16
"""
polylines2d = list(
converter.to_polylines2d(
paths,
distance=distance,
segments=segments,
extrusion=extrusion,
dxfattribs=dxfattribs,
)
)
for polyline2d in polylines2d:
layout.add_entity(polyline2d)
return EntityQuery(polylines2d)
def render_hatches(
layout: "Layout",
paths: Iterable[Path],
*,
edge_path: bool = True,
distance: float = MAX_DISTANCE,
segments: int = MIN_SEGMENTS,
g1_tol: float = G1_TOL,
extrusion: "Vertex" = Z_AXIS,
dxfattribs: Optional[Dict] = None
) -> EntityQuery:
"""Render the given `paths` into `layout` as
:class:`~ezdxf.entities.Hatch` entities.
The `extrusion` vector is applied to all paths, all vertices are projected
onto the plane normal to this extrusion vector. The default extrusion vector
is the WCS z-axis. The plane elevation is the distance from the WCS origin
to the start point of the first path.
Args:
layout: the modelspace, a paperspace layout or a block definition
paths: iterable of :class:`Path` objects
edge_path: ``True`` for edge paths build of LINE and SPLINE edges,
``False`` for only LWPOLYLINE paths as boundary paths
distance: maximum distance, see :meth:`Path.flattening`
segments: minimum segment count per Bézier curve to flatten polyline paths
g1_tol: tolerance for G1 continuity check to separate SPLINE edges
extrusion: extrusion vector for all paths
dxfattribs: additional DXF attribs
Returns:
created entities in an :class:`~ezdxf.query.EntityQuery` object
.. versionadded:: 0.16
"""
hatches = list(
converter.to_hatches(
paths,
edge_path=edge_path,
distance=distance,
segments=segments,
g1_tol=g1_tol,
extrusion=extrusion,
dxfattribs=dxfattribs,
)
)
for hatch in hatches:
layout.add_entity(hatch)
return EntityQuery(hatches)
def render_mpolygons(
layout: "Layout",
paths: Iterable[Path],
*,
distance: float = MAX_DISTANCE,
segments: int = MIN_SEGMENTS,
extrusion: "Vertex" = Z_AXIS,
dxfattribs: Optional[Dict] = None
) -> EntityQuery:
"""Render the given `paths` into `layout` as
:class:`~ezdxf.entities.MPolygon` entities. The MPOLYGON entity supports
only polyline boundary paths. All curves will be approximated.
The `extrusion` vector is applied to all paths, all vertices are projected
onto the plane normal to this extrusion vector. The default extrusion vector
is the WCS z-axis. The plane elevation is the distance from the WCS origin
to the start point of the first path.
Args:
layout: the modelspace, a paperspace layout or a block definition
paths: iterable of :class:`Path` objects
distance: maximum distance, see :meth:`Path.flattening`
segments: minimum segment count per Bézier curve to flatten polyline paths
extrusion: extrusion vector for all paths
dxfattribs: additional DXF attribs
Returns:
created entities in an :class:`~ezdxf.query.EntityQuery` object
.. versionadded:: 0.17
"""
polygons = list(
converter.to_mpolygons(
paths,
distance=distance,
segments=segments,
extrusion=extrusion,
dxfattribs=dxfattribs,
)
)
for polygon in polygons:
layout.add_entity(polygon)
return EntityQuery(polygons)
def render_polylines3d(
layout: "Layout",
paths: Iterable[Path],
*,
distance: float = MAX_DISTANCE,
segments: int = MIN_SEGMENTS,
dxfattribs: Optional[Dict] = None
) -> EntityQuery:
"""Render the given `paths` into `layout` as 3D
:class:`~ezdxf.entities.Polyline` entities.
Args:
layout: the modelspace, a paperspace layout or a block definition
paths: iterable of :class:`Path` objects
distance: maximum distance, see :meth:`Path.flattening`
segments: minimum segment count per Bézier curve
dxfattribs: additional DXF attribs
Returns:
created entities in an :class:`~ezdxf.query.EntityQuery` object
.. versionadded:: 0.16
"""
polylines3d = list(
converter.to_polylines3d(
paths,
distance=distance,
segments=segments,
dxfattribs=dxfattribs,
)
)
for polyline3d in polylines3d:
layout.add_entity(polyline3d)
return EntityQuery(polylines3d)
def render_lines(
layout: "Layout",
paths: Iterable[Path],
*,
distance: float = MAX_DISTANCE,
segments: int = MIN_SEGMENTS,
dxfattribs: Optional[Dict] = None
) -> EntityQuery:
"""Render the given `paths` into `layout` as
:class:`~ezdxf.entities.Line` entities.
Args:
layout: the modelspace, a paperspace layout or a block definition
paths: iterable of :class:`Path` objects
distance: maximum distance, see :meth:`Path.flattening`
segments: minimum segment count per Bézier curve
dxfattribs: additional DXF attribs
Returns:
created entities in an :class:`~ezdxf.query.EntityQuery` object
.. versionadded:: 0.16
"""
lines = list(
converter.to_lines(
paths,
distance=distance,
segments=segments,
dxfattribs=dxfattribs,
)
)
for line in lines:
layout.add_entity(line)
return EntityQuery(lines)
def render_splines_and_polylines(
layout: "Layout",
paths: Iterable[Path],
*,
g1_tol: float = G1_TOL,
dxfattribs: Optional[Dict] = None
) -> EntityQuery:
"""Render the given `paths` into `layout` as :class:`~ezdxf.entities.Spline`
and 3D :class:`~ezdxf.entities.Polyline` entities.
Args:
layout: the modelspace, a paperspace layout or a block definition
paths: iterable of :class:`Path` objects
g1_tol: tolerance for G1 continuity check
dxfattribs: additional DXF attribs
Returns:
created entities in an :class:`~ezdxf.query.EntityQuery` object
.. versionadded:: 0.16
"""
entities = list(
converter.to_splines_and_polylines(
paths,
g1_tol=g1_tol,
dxfattribs=dxfattribs,
)
)
for entity in entities:
layout.add_entity(entity)
return EntityQuery(entities)
def add_ellipse(
path: Path, ellipse: ConstructionEllipse, segments=1, reset=True
) -> None:
"""Add an elliptical arc as multiple cubic Bèzier-curves to the given
`path`, use :meth:`~ezdxf.math.ConstructionEllipse.from_arc` constructor
of class :class:`~ezdxf.math.ConstructionEllipse` to add circular arcs.
Auto-detect the connection point to the given `path`, if neither the start-
nor the end point of the ellipse is close to the path end point, a line from
the path end point to the ellipse start point will be added automatically
(see :func:`add_bezier4p`).
By default the start of an **empty** path is set to the start point of
the ellipse, setting argument `reset` to ``False`` prevents this
behavior.
Args:
path: :class:`~ezdxf.path.Path` object
ellipse: ellipse parameters as :class:`~ezdxf.math.ConstructionEllipse`
object
segments: count of Bèzier-curve segments, at least one segment for
each quarter (pi/2), ``1`` for as few as possible.
reset: set start point to start of ellipse if path is empty
"""
if abs(ellipse.param_span) < 1e-9:
return
if len(path) == 0 and reset:
path.start = ellipse.start_point
add_bezier4p(path, cubic_bezier_from_ellipse(ellipse, segments))
def add_bezier4p(path: Path, curves: Iterable[Bezier4P]) -> None:
"""Add multiple cubic Bèzier-curves to the given `path`.
Auto-detect the connection point to the given `path`, if neither the start-
nor the end point of the curves is close to the path end point, a line from
the path end point to the start point of the first curve will be added
automatically.
.. versionchanged:: 0.16.2
add linear Bézier curve segments as LINE_TO commands
"""
curves = list(curves)
if not len(curves):
return
end = curves[-1].control_points[-1]
if path.end.isclose(end):
# connect to new curves end point
curves = reverse_bezier_curves(curves)
for curve in curves:
start, ctrl1, ctrl2, end = curve.control_points
if not start.isclose(path.end):
path.line_to(start)
# add linear bezier segments as LINE_TO commands
if start.isclose(ctrl1) and end.isclose(ctrl2):
path.line_to(end)
else:
path.curve4_to(end, ctrl1, ctrl2)
def add_bezier3p(path: Path, curves: Iterable[Bezier3P]) -> None:
"""Add multiple quadratic Bèzier-curves to the given `path`.
Auto-detect the connection point to the given `path`, if neither the start-
nor the end point of the curves is close to the path end point, a line from
the path end point to the start point of the first curve will be added
automatically.
.. versionchanged:: 0.16.2
add linear Bézier curve segments as LINE_TO commands
"""
curves = list(curves)
if not len(curves):
return
end = curves[-1].control_points[-1]
if path.end.isclose(end):
# connect to new curves end point
curves = reverse_bezier_curves(curves)
for curve in curves:
start, ctrl, end = curve.control_points
if not start.isclose(path.end, abs_tol=0): # only rel_tol=1e-9
path.line_to(start)
# add linear bezier segments as LINE_TO commands, use only rel_tol=1e-9
if start.isclose(ctrl, abs_tol=0) or end.isclose(ctrl, abs_tol=0):
path.line_to(end)
else:
path.curve3_to(end, ctrl)
def add_2d_polyline(
path: Path,
points: Iterable[Sequence[float]],
close: bool,
ocs: OCS,
elevation: float,
) -> None:
"""Internal API to add 2D polylines which may include bulges to an
**empty** path.
"""
def bulge_to(p1: Vec3, p2: Vec3, bulge: float):
if p1.isclose(p2, rel_tol=IS_CLOSE_TOL, abs_tol=0):
return
center, start_angle, end_angle, radius = bulge_to_arc(p1, p2, bulge)
ellipse = ConstructionEllipse.from_arc(
center,
radius,
Z_AXIS,
math.degrees(start_angle),
math.degrees(end_angle),
)
curves = list(cubic_bezier_from_ellipse(ellipse))
curve0 = curves[0]
cp0 = curve0.control_points[0]
if cp0.isclose(p2, rel_tol=IS_CLOSE_TOL, abs_tol=0):
curves = reverse_bezier_curves(curves)
add_bezier4p(path, curves)
if len(path):
raise ValueError("Requires an empty path.")
prev_point = None
prev_bulge = 0
for x, y, bulge in points:
# Bulge values near 0 but != 0 cause crashes! #329
if abs(bulge) < 1e-6:
bulge = 0
point = Vec3(x, y)
if prev_point is None:
path.start = point
prev_point = point
prev_bulge = bulge
continue
if prev_bulge:
bulge_to(prev_point, point, prev_bulge)
else:
path.line_to(point)
prev_point = point
prev_bulge = bulge
if close and not path.start.isclose(
path.end, rel_tol=IS_CLOSE_TOL, abs_tol=0
):
if prev_bulge:
bulge_to(path.end, path.start, prev_bulge)
else:
path.line_to(path.start)
if ocs.transform or elevation:
path.to_wcs(ocs, elevation)
def add_spline(path: Path, spline: BSpline, level=4, reset=True) -> None:
"""Add a B-spline as multiple cubic Bèzier-curves.
Non-rational B-splines of 3rd degree gets a perfect conversion to
cubic bezier curves with a minimal count of curve segments, all other
B-spline require much more curve segments for approximation.
Auto-detect the connection point to the given `path`, if neither the start-
nor the end point of the B-spline is close to the path end point, a line
from the path end point to the start point of the B-spline will be added
automatically. (see :meth:`add_bezier4p`).
By default the start of an **empty** path is set to the start point of
the spline, setting argument `reset` to ``False`` prevents this
behavior.
Args:
path: :class:`~ezdxf.path.Path` object
spline: B-spline parameters as :class:`~ezdxf.math.BSpline` object
level: subdivision level of approximation segments
reset: set start point to start of spline if path is empty
"""
if len(path) == 0 and reset:
path.start = spline.point(0)
if spline.degree == 3 and not spline.is_rational and spline.is_clamped:
curves = [Bezier4P(points) for points in spline.bezier_decomposition()]
else:
curves = spline.cubic_bezier_approximation(level=level)
add_bezier4p(path, curves)
| 30.982736 | 82 | 0.642992 | 0 | 0 | 288 | 0.012338 | 0 | 0 | 0 | 0 | 11,001 | 0.471276 |
335691f1a44d761858600316cd4fa61528733a39 | 466 | py | Python | LeetCodeSolutions/python/322_Coin_Change.py | ChuanleiGuo/AlgorithmsPlayground | 90b6287b742c8bfd3797540c408d679be2821a40 | [
"MIT"
] | 1 | 2017-03-27T13:38:37.000Z | 2017-03-27T13:38:37.000Z | LeetCodeSolutions/python/322_Coin_Change.py | ChuanleiGuo/AlgorithmsPlayground | 90b6287b742c8bfd3797540c408d679be2821a40 | [
"MIT"
] | null | null | null | LeetCodeSolutions/python/322_Coin_Change.py | ChuanleiGuo/AlgorithmsPlayground | 90b6287b742c8bfd3797540c408d679be2821a40 | [
"MIT"
] | null | null | null | class Solution(object):
def coinChange(self, coins, amount):
"""
:type coins: List[int]
:type amount: int
:rtype: int
"""
dp = [0] + [2 ** 31 - 1] * amount
for i in xrange(1, amount + 1):
for coin in coins:
if i >= coin and dp[i - coin] != (2 ** 31 - 1):
dp[i] = min(dp[i], dp[i - coin] + 1)
return dp[amount] if dp[amount] != (2 ** 31 - 1) else -1
| 31.066667 | 64 | 0.433476 | 465 | 0.997854 | 0 | 0 | 0 | 0 | 0 | 0 | 92 | 0.197425 |
335a3079554e04318e100ccbe95efe67d12785a4 | 367 | py | Python | 14_TokenAuthentication/userpost/serializer.py | LIkelion-at-KOREATECH/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | 28 | 2019-10-15T13:15:26.000Z | 2021-11-08T08:23:45.000Z | 14_TokenAuthentication/userpost/serializer.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | null | null | null | 14_TokenAuthentication/userpost/serializer.py | jhleed/LikeLion_Django_Study_Summary | c788182af5bcfd16bdd4b57235a48659758e494b | [
"MIT"
] | 17 | 2019-09-09T00:15:36.000Z | 2021-01-28T13:08:51.000Z | from .models import UserPost
from rest_framework import serializers
class UserPostSerializer(serializers.ModelSerializer):
author_name = serializers.ReadOnlyField(
source='author.username'
)
class Meta:
model = UserPost
fields = [
'pk',
'author_name',
'title',
'body',
]
| 19.315789 | 54 | 0.580381 | 296 | 0.80654 | 0 | 0 | 0 | 0 | 0 | 0 | 47 | 0.128065 |
335dbfd8eeada3150621b5e132ff38b941943c70 | 153 | py | Python | app/cogs/base/__init__.py | fossabot/Starboard-2 | 798e2d04995ae7d920e76708b9ea8fae6f4af319 | [
"MIT"
] | 16 | 2021-01-19T19:12:00.000Z | 2021-12-21T12:00:04.000Z | app/cogs/base/__init__.py | Davi-the-Mudkip/Starboard-2 | 4de3c689ffef007e4f4a279251d107d890b69b15 | [
"MIT"
] | 15 | 2021-04-02T16:58:48.000Z | 2022-03-28T06:09:49.000Z | app/cogs/base/__init__.py | Davi-the-Mudkip/Starboard-2 | 4de3c689ffef007e4f4a279251d107d890b69b15 | [
"MIT"
] | 13 | 2021-01-21T14:26:00.000Z | 2021-09-29T18:55:17.000Z | from app.classes.bot import Bot
from . import base_commands, base_events
def setup(bot: Bot):
base_commands.setup(bot)
base_events.setup(bot)
| 17 | 40 | 0.745098 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3360e8ecae40c8b015c6b7e2a1cd267c16fb7712 | 502 | py | Python | model/combination_file.py | dokzai/WholeFoodsFrugality | 0239dca601353524af0e1b317def40cdd51e4ea3 | [
"CC0-1.0"
] | 42 | 2021-06-05T01:16:23.000Z | 2021-07-05T02:51:54.000Z | model/combination_file.py | dokzai/WholeFoodsFrugality | 0239dca601353524af0e1b317def40cdd51e4ea3 | [
"CC0-1.0"
] | 2 | 2021-06-05T19:29:06.000Z | 2021-09-01T19:10:53.000Z | model/combination_file.py | dokzai/WholeFoodsFrugality | 0239dca601353524af0e1b317def40cdd51e4ea3 | [
"CC0-1.0"
] | 6 | 2021-06-05T02:44:16.000Z | 2021-08-06T14:54:59.000Z | import model
from model import whole_foods_sale
from model import aldis_au_sale
from model import aldis_us_sale
from model import aldis_uk_sale
def go(inputs, store_name):
if store_name == 'WholeFoods':
final_df = whole_foods_sale.items_on_sale()
elif store_name == 'Aldi AU':
final_df = aldis_au_sale.items_on_sale()
elif store_name == 'Aldi US':
final_df = aldis_us_sale.items_on_sale()
elif store_name == 'Aldi UK':
final_df = aldis_uk_sale.items_on_sale()
return final_df.to_html()
| 27.888889 | 45 | 0.7749 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 39 | 0.077689 |
3362692f46d8ce99fe607f99e82c59552aa25f04 | 8,926 | py | Python | generalRunFiles/tideCompare.py | wesleybowman/karsten | ef4b2d6debae605902d76cd0484e71c0ba74fdd1 | [
"MIT"
] | 1 | 2015-05-04T17:48:56.000Z | 2015-05-04T17:48:56.000Z | generalRunFiles/tideCompare.py | wesleybowman/karsten | ef4b2d6debae605902d76cd0484e71c0ba74fdd1 | [
"MIT"
] | null | null | null | generalRunFiles/tideCompare.py | wesleybowman/karsten | ef4b2d6debae605902d76cd0484e71c0ba74fdd1 | [
"MIT"
] | 1 | 2021-11-15T17:53:19.000Z | 2021-11-15T17:53:19.000Z | from __future__ import division
import numpy as np
import pandas as pd
import netCDF4 as nc
from datetime import datetime, timedelta
import cPickle as pickle
import sys
sys.path.append('/home/wesley/github/UTide/')
from utide import ut_solv
import scipy.io as sio
from stationClass import station
def mjd2num(x):
y = x + 678942
return y
def closest_point(points, lon, lat):
point_list = np.array([lon,lat]).T
closest_dist = ((point_list[:, 0] - points[:, 0, None])**2 +
(point_list[:, 1] - points[:, 1, None])**2)
closest_point_indexes = np.argmin(closest_dist, axis=1)
return closest_point_indexes
def datetime2matlabdn(dt):
# ordinal = dt.toordinal()
mdn = dt + timedelta(days=366)
frac = (dt-datetime(dt.year, dt.month, dt.day, 0, 0, 0)).seconds / \
(24.0 * 60.0 * 60.0)
return mdn.toordinal() + frac
def tideGauge(datafiles, Struct):
dgFilename = '/array/home/rkarsten/common_tidal_files/data/observed/DG/TideGauge/DigbyWharf_015893_20140115_2221_Z.mat'
gpFilename = '/array/home/rkarsten/common_tidal_files/data/observed/GP/TideGauge/Westport_015892_20140325_1212_Z.mat'
dgtg = sio.loadmat(dgFilename, struct_as_record=False, squeeze_me=True)
gptg = sio.loadmat(gpFilename, struct_as_record=False, squeeze_me=True)
ut_constits = ['M2','S2','N2','K2','K1','O1','P1','Q1']
print 'Westport TideGauge'
coef_gptg = ut_solv(gptg['RBR'].date_num_Z,
(gptg['RBR'].data-np.mean(gptg['RBR'].data)), [],
gptg['RBR'].lat, cnstit=ut_constits, notrend=True,
rmin=0.95, method='ols', nodiagn=True, linci=True,
ordercnstit='frq')
print 'DigbyWharf TideGauge'
coef_dgtg = ut_solv(dgtg['RBR'].date_num_Z,
(dgtg['RBR'].data-np.mean(dgtg['RBR'].data)), [],
dgtg['RBR'].lat, cnstit=ut_constits, notrend=True,
rmin=0.95, method='ols', nodiagn=True, linci=True,
ordercnstit='frq')
struct = np.array([])
for filename in datafiles:
print filename
data = nc.Dataset(filename, 'r')
lat = data.variables['lat'][:]
lon = data.variables['lon'][:]
time = data.variables['time'][:]
time = mjd2num(time)
tg_gp_id = np.argmin(np.sqrt((lon-gptg['RBR'].lon)**2+(lat-gptg['RBR'].lat)**2))
tg_dg_id = np.argmin(np.sqrt((lon-dgtg['RBR'].lon)**2+(lat-dgtg['RBR'].lat)**2))
#elgp = data.variables['zeta'][tg_gp_id, :]
#eldg = data.variables['zeta'][tg_dg_id, :]
elgp = data.variables['zeta'][:, tg_gp_id]
eldg = data.variables['zeta'][:, tg_dg_id]
coef_dg = ut_solv(time, eldg, [], dgtg['RBR'].lat, cnstit=ut_constits,
notrend=True, rmin=0.95, method='ols', nodiagn=True,
linci=True, ordercnstit='frq')
coef_gp = ut_solv(time, elgp, [], gptg['RBR'].lat, cnstit=ut_constits,
notrend=True, rmin=0.95, method='ols', nodiagn=True,
linci=True, ordercnstit='frq')
Name = filename.split('/')[-3]
Name = '2012_run'
print Name
obs_loc = {'name':Name, 'type':'TideGauge',
'mod_time':time, 'dg_time':dgtg['RBR'].date_num_Z,
'gp_time':gptg['RBR'].date_num_Z,
'lon':lon, 'lat':lat,
'dg_tidegauge_harmonics': coef_dgtg,
'gp_tidegauge_harmonics':coef_gptg,
'dg_mod_harmonics': coef_dg,
'gp_mod_harmonics': coef_gp,
'dg_tg_data':dgtg['RBR'].data,
'gp_tg_data':gptg['RBR'].data,
'eldg':eldg, 'elgp':elgp}
struct = np.hstack((struct, obs_loc))
Struct[Name] = np.hstack((Struct[Name], struct))
#pickle.dump(struct, open("structADCP.p", "wb"))
return Struct
def adcp(datafiles, debug=False):
if debug:
adcpFilename = '/home/wesley/github/karsten/adcp/testADCP.txt'
else:
adcpFilename = '/array/home/107002b/github/karsten/adcp/acadia_dngrid_adcp_2012.txt'
#adcpFilename = '/home/wesleyb/github/karsten/adcp/dngrid_adcp_2012.txt'
adcp = pd.read_csv(adcpFilename)
for i,v in enumerate(adcp['Latitude']):
path = adcp.iloc[i, -1]
if path != 'None':
print adcp.iloc[i, 0]
#print lonlat[i,1], uvnodell[ii,1]
ADCP = pd.read_csv(path, index_col=0)
ADCP.index = pd.to_datetime(ADCP.index)
adcpTime = np.empty(ADCP.index.shape)
for j, jj in enumerate(ADCP.index):
adcpTime[j] = datetime2matlabdn(jj)
adcpCoef = ut_solv(adcpTime, ADCP['u'].values,
ADCP['v'].values, v,
cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True,
conf_int=True)
adcpData = adcpCoef
obs = pd.DataFrame({'u':ADCP['u'].values, 'v':ADCP['v'].values})
Struct = {}
for filename in datafiles:
print filename
data = nc.Dataset(filename, 'r')
#x = data.variables['x'][:]
#y = data.variables['y'][:]
lon = data.variables['lon'][:]
lat = data.variables['lat'][:]
lonc = data.variables['lonc'][:]
latc = data.variables['latc'][:]
ua = data.variables['ua']
va = data.variables['va']
time = data.variables['time'][:]
#trinodes = data.variables['nv'][:]
time = mjd2num(time)
lonlat = np.array([adcp['Longitude'], adcp['Latitude']]).T
#index = closest_point(lonlat, lon, lat)
index = closest_point(lonlat, lonc, latc)
adcpData = pd.DataFrame()
runData = pd.DataFrame()
Name = filename.split('/')[-3]
Name = '2012_run'
print Name
struct = np.array([])
for i, ii in enumerate(index):
path = adcp.iloc[i, -1]
if path != 'None':
print adcp.iloc[i, 0]
coef = ut_solv(time, ua[:, ii], va[:, ii], lonlat[i, 1],
cnstit='auto', rmin=0.95, notrend=True,
method='ols', nodiagn=True, linci=True,
conf_int=True)
runData = coef
mod = pd.DataFrame({'ua':ua[:, ii], 'va':va[:, ii]})
obs_loc = {'name':adcp.iloc[i,0], 'type':'ADCP', 'lat':lonlat[i,-1],
'lon':lonlat[0,0], 'obs_timeseries':obs,
'mod_timeseries':mod, 'obs_time':adcpTime,
'mod_time':time,'speed_obs_harmonics':adcpData,
'speed_mod_harmonics':runData}
struct = np.hstack((struct, obs_loc))
Struct[Name] = struct
return Struct
def main(debug=False):
if debug:
datafiles = ['/array/data1/rkarsten/dncoarse_bctest_old/output/dn_coarse_0001.nc',
'/array/data1/rkarsten/dncoarse_bctest/output/dn_coarse_0001.nc']
#datafiles = ['/home/wesley/ncfiles/smallcape_force_0001.nc']
else:
# datafiles = ['/array/data1/rkarsten/dncoarse_bctest_old/output/dn_coarse_0001.nc',
# '/array/data1/rkarsten/dncoarse_bctest/output/dn_coarse_0001.nc',
# '/array/data1/rkarsten/dncoarse_bctest2/output/dn_coarse_0001.nc',
# '/array/data1/rkarsten/dncoarse_bctest_all/output/dn_coarse_0001.nc',
# '/array/data1/rkarsten/dncoarse_bctest_EC/output/dn_coarse_0001.nc',
# '/array/data1/rkarsten/dncoarse_bctest_timeseries/output/dn_coarse_0001.nc']
#datafiles = ['/array2/data3/rkarsten/dncoarse_3D/output2/dn_coarse_station_timeseries.nc']
# datafiles = ['/EcoII/EcoEII_server_data_tree/data/simulated/FVCOM/dncoarse/calibration/bottom_roughness/0.0015/output/dngrid_0001.nc',
# '/EcoII/EcoEII_server_data_tree/data/simulated/FVCOM/dncoarse/calibration/bottom_roughness/0.0020/output/dngrid_0001.nc',
# '/EcoII/EcoEII_server_data_tree/data/simulated/FVCOM/dncoarse/calibration/bottom_roughness/0.0025/output/dngrid_0001.nc',
# '/EcoII/EcoEII_server_data_tree/data/simulated/FVCOM/dncoarse/calibration/bottom_roughness/0.0030/output/dngrid_0001.nc']
#
datafiles = ['/array/home/116822s/2012_run.nc']
#'/array/data1/rkarsten/dncoarse_stationtest/output/dn_coarse_0001.nc']
saveName = 'struct2012_run.p'
Struct = adcp(datafiles, debug=False)
if debug:
pickle.dump(Struct, open("structADCP.p", "wb"))
Struct = tideGauge(datafiles, Struct)
pickle.dump(Struct, open(saveName, "wb"))
return Struct
if __name__ == '__main__':
main()
| 36.284553 | 143 | 0.578871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,955 | 0.331055 |
336617ee14351211c66aecbca46f7869d9b1ed6c | 2,158 | py | Python | gff/Scripts/gff/gff_to_genbank.py | bgruening/bcbb | dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027 | [
"MIT"
] | 339 | 2015-01-04T13:23:04.000Z | 2022-03-25T23:09:09.000Z | gff/Scripts/gff/gff_to_genbank.py | bgruening/bcbb | dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027 | [
"MIT"
] | 39 | 2015-01-14T21:31:09.000Z | 2021-11-18T15:15:33.000Z | gff/Scripts/gff/gff_to_genbank.py | bgruening/bcbb | dbfb52711f0bfcc1d26c5a5b53c9ff4f50dc0027 | [
"MIT"
] | 176 | 2015-01-10T17:40:44.000Z | 2022-03-25T05:14:21.000Z | #!/usr/bin/env python
"""Convert a GFF and associated FASTA file into GenBank format.
Usage:
gff_to_genbank.py <GFF annotation file> [<FASTA sequence file> <molecule type>]
FASTA sequence file: input sequences matching records in GFF. Optional if sequences
are in the GFF
molecule type: type of molecule in the GFF file. Defaults to DNA, the most common case.
"""
from __future__ import print_function
import sys
import os
from Bio import SeqIO
from BCBio import GFF
def main(gff_file, fasta_file=None, molecule_type="DNA"):
out_file = "%s.gb" % os.path.splitext(gff_file)[0]
if fasta_file:
fasta_input = SeqIO.to_dict(SeqIO.parse(fasta_file, "fasta"))
else:
fasta_input = {}
gff_iter = GFF.parse(gff_file, fasta_input)
SeqIO.write(_check_gff(_fix_ncbi_id(gff_iter), molecule_type), out_file, "genbank")
def _fix_ncbi_id(fasta_iter):
"""GenBank identifiers can only be 16 characters; try to shorten NCBI.
"""
for rec in fasta_iter:
if len(rec.name) > 16 and rec.name.find("|") > 0:
new_id = [x for x in rec.name.split("|") if x][-1]
print("Warning: shortening NCBI name %s to %s" % (rec.id, new_id))
rec.id = new_id
rec.name = new_id
yield rec
def _check_gff(gff_iterator, molecule_type):
"""Check GFF files before feeding to SeqIO to be sure they have sequences.
"""
for rec in gff_iterator:
if "molecule_type" not in rec.annotations:
rec.annotations["molecule_type"] = molecule_type
yield _flatten_features(rec)
def _flatten_features(rec):
"""Make sub_features in an input rec flat for output.
GenBank does not handle nested features, so we want to make
everything top level.
"""
out = []
for f in rec.features:
cur = [f]
while len(cur) > 0:
nextf = []
for curf in cur:
out.append(curf)
if len(curf.sub_features) > 0:
nextf.extend(curf.sub_features)
cur = nextf
rec.features = out
return rec
if __name__ == "__main__":
main(*sys.argv[1:])
| 29.162162 | 88 | 0.639018 | 0 | 0 | 724 | 0.335496 | 0 | 0 | 0 | 0 | 799 | 0.37025 |
336659ff363b0054a9eac553468e5da25a44ed50 | 762 | py | Python | hard-gists/5267494/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 21 | 2019-07-08T08:26:45.000Z | 2022-01-24T23:53:25.000Z | hard-gists/5267494/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 5 | 2019-06-15T14:47:47.000Z | 2022-02-26T05:02:56.000Z | hard-gists/5267494/snippet.py | jjhenkel/dockerizeme | eaa4fe5366f6b9adf74399eab01c712cacaeb279 | [
"Apache-2.0"
] | 17 | 2019-05-16T03:50:34.000Z | 2021-01-14T14:35:12.000Z | import webapp2
from twilio import twiml
from twilio.rest import TwilioRestClient
class SendSMS(webapp2.RequestHandler):
def get(self):
# replace with your credentials from: https://www.twilio.com/user/account
account_sid = "ACxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"
auth_token = "xxxxxxxxxxxxxxxxxxxxxxxxxx"
client = TwilioRestClient(account_sid, auth_token)
# replace "to" and "from_" with real numbers
rv = client.sms.messages.create(to="+14155551212",
from_="+14085551212",
body="Hello Monkey!")
self.response.write(str(rv))
app = webapp2.WSGIApplication([('/send_sms', SendSMS)],
debug=True) | 42.333333 | 81 | 0.611549 | 579 | 0.759843 | 0 | 0 | 0 | 0 | 0 | 0 | 234 | 0.307087 |
3366d6f4ce4dfb447093519d06275a1e825d7959 | 555 | py | Python | app/calc/permissions.py | sajeeshen/WebCalculatorAPI | d951e688e84741cc594877914d292fbddb4e9542 | [
"MIT"
] | null | null | null | app/calc/permissions.py | sajeeshen/WebCalculatorAPI | d951e688e84741cc594877914d292fbddb4e9542 | [
"MIT"
] | null | null | null | app/calc/permissions.py | sajeeshen/WebCalculatorAPI | d951e688e84741cc594877914d292fbddb4e9542 | [
"MIT"
] | null | null | null | from rest_framework import permissions
class IsSuperUser(permissions.IsAdminUser):
def has_permission(self, request, view):
is_admin = super().has_permission(request, view)
return request.method in permissions.SAFE_METHODS or is_admin
class IsUser(permissions.BasePermission):
def has_object_permission(self, request, view, obj):
if request.user:
if request.user.is_superuser:
return True
else:
return obj == request.user
else:
return False
| 26.428571 | 69 | 0.652252 | 510 | 0.918919 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
3367ac8d7631adea65a66b8a1b9f9c5b430f34ba | 335 | py | Python | src/eduid_graphdb/exceptions.py | SUNET/eduid-groupdb | c9b4631ff944211eab82595f005bc7e707142216 | [
"BSD-3-Clause"
] | 1 | 2020-09-05T03:14:37.000Z | 2020-09-05T03:14:37.000Z | src/eduid_graphdb/exceptions.py | SUNET/eduid-graphdb | c9b4631ff944211eab82595f005bc7e707142216 | [
"BSD-3-Clause"
] | null | null | null | src/eduid_graphdb/exceptions.py | SUNET/eduid-graphdb | c9b4631ff944211eab82595f005bc7e707142216 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
__author__ = 'lundberg'
class EduIDGroupDBError(Exception):
pass
class VersionMismatch(EduIDGroupDBError):
pass
class MultipleReturnedError(EduIDGroupDBError):
pass
class MultipleUsersReturned(MultipleReturnedError):
pass
class MultipleGroupsReturned(MultipleReturnedError):
pass
| 13.958333 | 52 | 0.758209 | 271 | 0.808955 | 0 | 0 | 0 | 0 | 0 | 0 | 33 | 0.098507 |
3368e67a6dc113ec2b5e093fd23dd2116d31dc6a | 1,485 | py | Python | pyledserver/mqtt/client.py | oct0f1sh/PyLEDServer | a41ef37bf4564c02d0439a89f13f1bb7d841cc1d | [
"MIT"
] | 1 | 2018-10-15T18:14:55.000Z | 2018-10-15T18:14:55.000Z | pyledserver/mqtt/client.py | oct0f1sh/PyLEDServer | a41ef37bf4564c02d0439a89f13f1bb7d841cc1d | [
"MIT"
] | null | null | null | pyledserver/mqtt/client.py | oct0f1sh/PyLEDServer | a41ef37bf4564c02d0439a89f13f1bb7d841cc1d | [
"MIT"
] | null | null | null | import json
import logging
import mqtt.callbacks as mqtt_util
import paho.mqtt.client as mqtt
logger = logging.getLogger('pyledserver.PyLEDClient')
logger.setLevel(logging.DEBUG)
class PyLEDClient(mqtt.Client):
def __init__(self, client_id, credentials, mqtt_topic, led_strip):
logger.debug('Creating client: {}'.format(client_id))
# create and associate callbacks
super().__init__(client_id=client_id, clean_session=False)
self.callback = mqtt_util.CallbackContainer(led_strip)
self.on_message = self.callback.on_message
self.on_publish = self.callback.on_publish
self.on_subscribe = self.callback.on_subscribe
self.on_connect = self.callback.on_connect
self.on_disconnect = self.callback.on_disconnect
# assign user credentials to client
self.username_pw_set(credentials.mqtt_username, credentials.mqtt_password)
# connect to MQTT server and subscribe to topic
logger.info('Connecting to server {}:{}'.format(credentials.mqtt_url, credentials.mqtt_port))
self.connect(credentials.mqtt_url, int(credentials.mqtt_port))
self.subscribe(mqtt_topic, 0)
success = {'message': 'gradient',
'args': {}}
# publish connection message to ensure successful connection
self.publish(mqtt_topic, json.dumps(success, ensure_ascii=True))
@property
def is_connected(self):
return self.callback.is_connected | 38.076923 | 101 | 0.709764 | 1,303 | 0.877441 | 0 | 0 | 79 | 0.053199 | 0 | 0 | 273 | 0.183838 |
3368ecf59d5565e94b4557cb0eb66a2c6e950c80 | 1,406 | py | Python | pilco/policies/transformed_policy.py | sbrml/pilco | 77b6d8b9033ffdb23cae4936b028f42144f37846 | [
"MIT"
] | null | null | null | pilco/policies/transformed_policy.py | sbrml/pilco | 77b6d8b9033ffdb23cae4936b028f42144f37846 | [
"MIT"
] | 4 | 2020-11-13T18:43:28.000Z | 2022-02-10T01:17:03.000Z | pilco/policies/transformed_policy.py | sbrml/pilco | 77b6d8b9033ffdb23cae4936b028f42144f37846 | [
"MIT"
] | 1 | 2020-03-22T10:14:21.000Z | 2020-03-22T10:14:21.000Z | from pilco.policies.policy import Policy
import tensorflow as tf
class TransformedPolicy(Policy):
def __init__(self,
policy,
transform,
name="sine_bounded_action_policy",
**kwargs):
super().__init__(state_dim=policy.state_dim,
action_dim=policy.action_dim,
name=name,
dtype=policy.dtype,
**kwargs)
self.policy = policy
self.transform = transform
@property
def parameters(self):
return self.policy.parameters
@property
def action_indices(self):
return tf.range(self.state_dim, self.state_dim + self.action_dim)
def reset(self):
self.policy.reset()
def match_moments(self, state_loc, state_cov, joint_result=True):
# We first match the moments through the base policy
loc, cov = self.policy.match_moments(state_loc, state_cov)
loc, cov = self.transform.match_moments(loc=loc,
cov=cov,
indices=self.action_indices)
return loc, cov
def call(self, state):
full_vec = tf.concat([state, [self.policy(state)]], axis=0)
return self.transform(full_vec, indices=self.action_indices)[self.state_dim:]
| 29.914894 | 85 | 0.559033 | 1,337 | 0.950925 | 0 | 0 | 186 | 0.13229 | 0 | 0 | 80 | 0.056899 |
33691ce250a8717c15ad812e29257b13be0864ea | 33 | py | Python | src/petronia/defimpl/configuration/file/defs.py | groboclown/petronia | 486338023d19cee989e92f0c5692680f1a37811f | [
"MIT"
] | 19 | 2017-06-21T10:28:24.000Z | 2021-12-31T11:49:28.000Z | src/petronia/defimpl/configuration/file/defs.py | groboclown/petronia | 486338023d19cee989e92f0c5692680f1a37811f | [
"MIT"
] | 10 | 2016-11-11T18:57:57.000Z | 2021-02-01T15:33:43.000Z | src/petronia/defimpl/configuration/file/defs.py | groboclown/petronia | 486338023d19cee989e92f0c5692680f1a37811f | [
"MIT"
] | 3 | 2017-09-17T03:29:35.000Z | 2019-06-03T10:43:08.000Z |
"""
Basic type definitions.
"""
| 6.6 | 23 | 0.606061 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 31 | 0.939394 |
336a6fab7ed0883fc7d394502eed5edc3d498110 | 7,986 | py | Python | clone-zadara-volume.py | harvard-dce/mh-backup | 78a6f987759de22eb27b3b7e29943c7aba70cbac | [
"Apache-2.0"
] | null | null | null | clone-zadara-volume.py | harvard-dce/mh-backup | 78a6f987759de22eb27b3b7e29943c7aba70cbac | [
"Apache-2.0"
] | null | null | null | clone-zadara-volume.py | harvard-dce/mh-backup | 78a6f987759de22eb27b3b7e29943c7aba70cbac | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import time
import logging
import logging.config
import logging.handlers
import yaml
from zadarest import ZConsoleClient
from zadarest import ZVpsaClient
logger = None
def setup_logging( log_conf=None ):
if log_conf is None:
logging.basicConfig( level=logging.DEBUG )
else:
logging.config.dictConfig( dict( log_conf ) )
logging.info( 'start logging for %s at %s' %
( __name__, time.strftime( "%y%m%d-%H%M", time.localtime() ) ) )
def read_config( config_file_path ):
with open( config_file_path, 'r' ) as ymlfile:
config = yaml.load( ymlfile )
# some validation of config
if 'zadara_cloud_console' not in config.keys() or 'url' not in config['zadara_cloud_console'].keys():
logger.critical('missing zadara CLOUD CONSOLE URL config')
exit( 1 )
if 'zadara_vpsa' not in config.keys() or 'volume_export_path' not in config['zadara_vpsa'].keys():
logger.critical('missing zadara volume EXPORT PATH config')
exit( 1 )
if 'logging' not in config.keys():
config['logging'] = None
return config
def get_value_from_env_or_user_input( env_var_name, msg="enter your value: " ):
value = None
if env_var_name in os.environ:
value = os.environ[ env_var_name ]
while not value:
value = str( raw_input( msg ) )
return value
def setup_zadara_console_client():
token = get_value_from_env_or_user_input(
'ZADARA_CONSOLE_ACCESS_TOKEN',
'enter your zadara CONSOLE access token: ' )
zcon = ZConsoleClient( cfg['zadara_cloud_console']['url'], token )
logger.debug('set zconsole for url(%s)' % cfg['zadara_cloud_console']['url'] )
logger.debug('zconsole object is (%s)' % zcon )
return zcon
def setup_zadara_vpsa_client( z_console_client, vpsa_id ):
token = get_value_from_env_or_user_input(
'ZADARA_VPSA_ACCESS_TOKEN',
'enter your zadara VPSA token: ' )
zvpsa = ZVpsaClient( z_console_client, vpsa_token=token, vpsa_id=vpsa_id )
logger.debug('set zvpsa for id (%d)' % vpsa_id )
logger.debug('zvpsa object is (%s)' % zvpsa )
return zvpsa
def setup_zadara_client():
zcon = setup_zadara_console_client()
vpsa_token = get_value_from_env_or_user_input(
'ZADARA_VPSA_ACCESS_TOKEN',
'enter your zadara VPSA token: ' )
os.environ['ZADARA_VPSA_ACCESS_TOKEN'] = vpsa_token
vpsa = zcon.vpsa_by_export_path( cfg['zadara_vpsa']['volume_export_path'], vpsa_token )
if vpsa is None:
logger.critical(
'vpsa with export_path(%s) not found; maybe it is hibernated?' %
cfg['zadara_vpsa']['volume_export_path'] )
exit( 1 )
logger.debug('found vpsa with export_path (%s)! it has id (%d)' % (
cfg['zadara_vpsa']['volume_export_path'], vpsa['id']) )
zcli = setup_zadara_vpsa_client( zcon, vpsa['id'] )
return zcli
def print_snapshot_list_from_volume( cli, volume ):
snapshots = {}
snap_list = cli.get_snapshots_for_cgroup( volume['cg_name'] )
if snap_list is None or 0 == len( snap_list ):
logger.critical(
'no snapshots available for volume with export_path(%s)' %
volume['nfs_export_path'] )
exit( 1 )
logger.debug('return from snapshot list has (%d) elements' % len( snap_list ) )
i = 1
print 'available snapshots for volume with export_path(%s):' % volume['nfs_export_path']
for s in snap_list:
print '%d: %s [%s]' % ( i, s['modified_at'], s['display_name'] )
snapshots[i] = s
i += 1
return snapshots
def clone_from_snapshot( cli, volume, snapshot_id ):
timestamp = time.strftime( "%y%m%d_%H%M", time.localtime() )
#clone_volume_display_name = 'clone_snap_%s_on_%s' % ( snapshot_id.replace('-', '_'), timestamp )
clone_volume_display_name = 'clone_on_%s' % timestamp
logger.debug( 'cloning volume (%s) with display_name (%s), from snapshot_id (%s)' %
( volume['cg_name'], clone_volume_display_name, snapshot_id ) )
clone = cli.clone_volume(
cgroup=volume['cg_name'],
clone_name=clone_volume_display_name,
snap_id=snapshot_id )
timeout_in_sec = 5
max_checks = 5
i = 0
while clone is None and i < max_checks:
time.sleep( timeout_in_sec )
clone = cli.get_volume_by_display_name( clone_volume_display_name )
i += 1
if i == max_checks and clone is None:
logger.critical('error cloning volume')
exit( 1 )
logger.debug( 'cloned volume object is (%s)' % clone )
return clone
def shift_export_paths( cli, source_volume, clone_volume ):
timestamp = time.strftime( "%y%m%d-%H%M", time.localtime() )
de_facto_export_path = source_volume['nfs_export_path']
inactive_export_path = '%s_%s' % ( source_volume['nfs_export_path'], timestamp )
logger.debug('preparing to shift export paths: (%s)-->(%s)-->X(%s)' %
( inactive_export_path, de_facto_export_path, clone_volume['nfs_export_path'] ) )
src_servers = cli.detach_volume_from_all_servers( source_volume['name'] )
src_volume_name = cli.update_export_name_for_volume(
source_volume['name'],
os.path.basename( inactive_export_path ) )
logger.debug('detached source_volume from all servers (%s)' % src_servers )
clone_volume_name = cli.update_export_name_for_volume(
clone_volume['name'],
os.path.basename( de_facto_export_path ) )
clone_servers = cli.attach_volume_to_servers( clone_volume['name'], src_servers )
logger.debug('attached all servers to clone volume (%s)' % clone_servers )
logger.debug('src_volume_name(%s) and clone_volume_name(%s)' % ( src_volume_name,
clone_volume_name ) )
return ( src_volume_name, clone_volume_name )
def copy_snapshot_policies( cli, source_volume, clone_volume ):
src_policies = cli.get_snapshot_policies_for_cgroup( source_volume['cg_name'] )
logger.debug('policies from src_volume (%s)' % src_policies )
for p in src_policies:
cli.attach_snapshot_policy_to_cgroup( clone_volume['cg_name'], p['name'] )
logger.debug('policies now attached to clone_volume as well...')
return src_policies
if __name__ == '__main__':
cfg = read_config( 'config.yml' )
setup_logging( cfg['logging'] )
logger = logging.getLogger( __name__ )
logger.info('STEP 1. logging configured!')
logger.info('STEP 2. setting up zadara client...')
zcli = setup_zadara_client()
logger.info('STEP 3. finding volume to be clone by export_path (%s)' %
cfg['zadara_vpsa']['volume_export_path'])
volume_to_clone_info = zcli.get_volume_by_export_path( cfg['zadara_vpsa']['volume_export_path'] )
logger.info('STEP 4. volume found (%s); printing snapshots available',
volume_to_clone_info['display_name'] )
snapshots = print_snapshot_list_from_volume( zcli, volume_to_clone_info )
s_index = None
while not s_index and s_index not in snapshots.keys():
s_index = int( raw_input('which snapshot to clone? [1..%d]: ' % len( snapshots ) ) )
logger.info('STEP 5. snapshot picked (%s), cloning...' % snapshots[ s_index
]['display_name'] )
clone_info = clone_from_snapshot(
zcli,
volume_to_clone_info,
snapshots[ s_index ]['name'] )
logger.info('STEP 6. cloned as volume (%s); changing export_paths...' %
clone_info['display_name'] )
( src_path, clone_path ) = shift_export_paths(
zcli,
volume_to_clone_info,
clone_info )
logger.info('STEP 7. attaching snapshot policies...')
p_list = copy_snapshot_policies(
zcli,
volume_to_clone_info,
clone_info )
logger.info('STEP 8. remount shared storage in mh nodes and we are done.')
| 33 | 105 | 0.660406 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,361 | 0.295642 |
336e88674a45678661a794705e119393a26a094e | 862 | py | Python | env/Lib/site-packages/bidict/_typing.py | NXPY123/gsoc-tagger | c85602ae9f5dbb9bec45cc5e9e50ec62177d46a5 | [
"MIT"
] | 9 | 2021-02-15T05:53:17.000Z | 2022-02-25T01:47:09.000Z | env/Lib/site-packages/bidict/_typing.py | NXPY123/gsoc-tagger | c85602ae9f5dbb9bec45cc5e9e50ec62177d46a5 | [
"MIT"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | env/Lib/site-packages/bidict/_typing.py | NXPY123/gsoc-tagger | c85602ae9f5dbb9bec45cc5e9e50ec62177d46a5 | [
"MIT"
] | 7 | 2022-02-05T20:29:14.000Z | 2022-03-26T13:16:44.000Z | # -*- coding: utf-8 -*-
# Copyright 2009-2020 Joshua Bronson. All Rights Reserved.
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""Provide typing-related objects."""
import typing as _t
KT = _t.TypeVar('KT')
VT = _t.TypeVar('VT')
IterItems = _t.Iterable[_t.Tuple[KT, VT]]
MapOrIterItems = _t.Union[_t.Mapping[KT, VT], IterItems[KT, VT]]
DT = _t.TypeVar('DT') #: for default arguments
VDT = _t.Union[VT, DT]
class _BareReprMeta(type):
def __repr__(cls) -> str:
return f'<{cls.__name__}>'
class _NONE(metaclass=_BareReprMeta):
"""Sentinel type used to represent 'missing'."""
OKT = _t.Union[KT, _NONE] #: optional key type
OVT = _t.Union[VT, _NONE] #: optional value type
| 25.352941 | 69 | 0.676334 | 181 | 0.209977 | 0 | 0 | 0 | 0 | 0 | 0 | 460 | 0.533643 |
33718dd21d032cfa041834f0670c883a14f8767a | 5,032 | py | Python | tests/util/test_mwi_validators.py | chimmy-changa/d-mdp | 2ffbb4e41c980a71d95bcdb56fe47b2d7a5893a1 | [
"BSD-2-Clause"
] | null | null | null | tests/util/test_mwi_validators.py | chimmy-changa/d-mdp | 2ffbb4e41c980a71d95bcdb56fe47b2d7a5893a1 | [
"BSD-2-Clause"
] | null | null | null | tests/util/test_mwi_validators.py | chimmy-changa/d-mdp | 2ffbb4e41c980a71d95bcdb56fe47b2d7a5893a1 | [
"BSD-2-Clause"
] | null | null | null | # Copyright 2021 The MathWorks, Inc.
"""Tests for functions in matlab_desktop_proxy/util/mwi_validators.py
"""
import pytest, os, tempfile, socket, random
import matlab_desktop_proxy
from matlab_desktop_proxy.util import mwi_validators
from matlab_desktop_proxy import mwi_environment_variables as mwi_env
from matlab_desktop_proxy.util.mwi_exceptions import NetworkLicensingError
def test_validate_mlm_license_file_for_invalid_string(monkeypatch):
"""Check if validator raises expected exception"""
# Delete the environment variables if they do exist
env_name = mwi_env.get_env_name_network_license_manager()
invalid_string = "/Invalid/String/"
monkeypatch.setenv(env_name, invalid_string)
nlm_conn_str = os.getenv(env_name)
with pytest.raises(NetworkLicensingError) as e_info:
conn_str = mwi_validators.validate_mlm_license_file(nlm_conn_str)
assert invalid_string in str(e_info.value)
def test_validate_mlm_license_file_for_valid_server_syntax(monkeypatch):
"""Check if port@hostname passes validation"""
env_name = mwi_env.get_env_name_network_license_manager()
license_manager_address = "1234@1.2_any-alphanumeric"
monkeypatch.setenv(env_name, license_manager_address)
conn_str = mwi_validators.validate_mlm_license_file(os.getenv(env_name))
assert conn_str == license_manager_address
def test_validate_mlm_license_file_for_valid_server_triad_syntax(monkeypatch):
"""Check if port@hostname passes validation"""
env_name = mwi_env.get_env_name_network_license_manager()
license_manager_address = (
"1234@1.2_any-alphanumeric,1234@1.2_any-alphanumeric,1234@1.2_any-alphanumeric"
)
monkeypatch.setenv(env_name, license_manager_address)
conn_str = mwi_validators.validate_mlm_license_file(os.getenv(env_name))
assert conn_str == license_manager_address
def test_validate_mlm_license_file_None():
"""Test to check if validate_mlm_license_file() returns None when nlm_conn_str is None."""
assert mwi_validators.validate_mlm_license_file(None) is None
def test_get_with_environment_variables(monkeypatch):
"""Check if path to license file passes validation"""
env_name = mwi_env.get_env_name_network_license_manager()
fd, path = tempfile.mkstemp()
monkeypatch.setenv(env_name, path)
try:
conn_str = mwi_validators.validate_mlm_license_file(os.getenv(env_name))
assert conn_str == str(path)
finally:
os.remove(path)
def test_validate_app_port_is_free_false():
"""Test to validate if supplied app port is free"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
with pytest.raises(SystemExit) as e:
mwi_validators.validate_app_port_is_free(port)
assert e.value.code == 1
s.close()
def test_validate_app_port_is_free_true():
"""Test to validate if supplied app port is free"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.bind(("", 0))
port = s.getsockname()[1]
s.close()
assert mwi_validators.validate_app_port_is_free(port) == port
def test_validate_app_port_None():
"""Tests if validated app port is None when MWI_APP_PORT env variable is not set.
If validated app port is None implies a random free port will be used at launch.
"""
assert mwi_validators.validate_app_port_is_free(None) is None
def test_validate_env_config_true():
"""Validate the default config which is used in this package."""
config = mwi_validators.validate_env_config(
matlab_desktop_proxy.get_default_config_name()
)
assert isinstance(config, dict)
def test_validate_env_config_false():
"""Passing a non existent config should raise SystemExit exception"""
with pytest.raises(SystemExit) as e:
config = mwi_validators.validate_env_config(str(random.randint(10, 100)))
assert e.value.code == 1
def test_get_configs():
"""Test to check if atleast 1 env config is discovered.
When this package is installed, we will have a default config.
"""
configs = mwi_validators.__get_configs()
assert len(configs.keys()) >= 1
@pytest.mark.parametrize(
"base_url, validated_base_url",
[
("", ""),
("/bla", "/bla"),
("/bla/", "/bla"),
],
ids=[
"Launch integration at root",
"Launch at custom path",
"Launch at custom with suffix: /",
],
)
def test_validate_base_url(base_url, validated_base_url):
"""Tests multiple base_urls which will beparsed and validated successfully.
Args:
base_url (str): base_url
validated_base_url (str): validated base_url
"""
assert mwi_validators.validate_base_url(base_url) == validated_base_url
def test_validate_base_url_no_prefix_error():
"""Test to check base_url will throw error when a prefix / is not present in it.[summary]"""
with pytest.raises(SystemExit) as e:
mwi_validators.validate_base_url("matlab/")
assert e.value.code == 1
| 35.43662 | 96 | 0.738275 | 0 | 0 | 0 | 0 | 597 | 0.118641 | 0 | 0 | 1,536 | 0.305246 |
3371e72f879cbac52d993d514cd9197823e924d2 | 1,758 | py | Python | pub/models.py | DASTUDIO/MyVHost | b9eda56a67c2df9236b7866087bc7f465542f951 | [
"MIT"
] | 2 | 2021-07-27T10:38:57.000Z | 2021-10-10T20:42:56.000Z | pub/models.py | DASTUDIO/MyVHost | b9eda56a67c2df9236b7866087bc7f465542f951 | [
"MIT"
] | null | null | null | pub/models.py | DASTUDIO/MyVHost | b9eda56a67c2df9236b7866087bc7f465542f951 | [
"MIT"
] | null | null | null | # coding=utf-8
from pub.tables.resources import *
from pub.tables.map.domain import *
from pub.tables.cache.token import *
from pub.tables.map.files import *
from pub.tables.user import *
from pub.tables.cache.suspend import *
from pub.tables.notice import *
from pub.tables.template import *
from pub.tables.comments import *
reg = admin.site.register
# resource
reg(resource_to_user,resource_to_user_decoration)
reg(resource_type, resource_type_decoration)
reg(resource_state,resource_state_decoration)
reg(resource_customed,resource_customed_decoration)
reg(resource_templated,resource_templated_decoration)
reg(resource_iframe,resource_iframe_decoration)
reg(resource_link,resource_link_decoration)
reg(resource_restful,resource_restful_decoration)
reg(resource_restful_item, resource_restful_item_decoration)
# domain
reg(domain_to_key,domain_to_key_decoration)
# token
reg(token,token_decoration)
# files
reg(file_key_to_path,file_key_to_path_decoration)
# auth_user
reg(auth_user,auth_user_decoration)
# supend
reg(cache_suspend,cache_suspend_decoration)
# user permission
reg(user_permission,user_permission_decoration)
# template
reg(template,template_decoration)
# resource info
reg(resource_info,resource_info_decoration)
# domain user
reg(domain_to_user,domain_to_user_decoration)
# file
reg(file_hash_to_key,file_hash_to_key_decoration)
reg(file_key_to_user,file_key_to_user_decoration)
# resource permission
reg(resource_permission,resource_permission_decoration)
# user info
reg(user_info,user_info_decoration)
# template info
reg(template_info,template_decoration)
# notices
reg(notice,notice_decoration)
# comments
reg(user_comments,user_comments_decoration)
reg(user_comments_likes_map,user_comments_likes_map_decoration) | 24.416667 | 63 | 0.85438 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 192 | 0.109215 |
33748e69536633ebb01ec7a16665fe6f91b383bc | 1,169 | py | Python | editor/templates/tests/test_review.py | gocept/alphaflow | 4b797cb12fb52254b1884159fd9a8b899c739f7c | [
"ZPL-2.1",
"ZPL-2.0"
] | null | null | null | editor/templates/tests/test_review.py | gocept/alphaflow | 4b797cb12fb52254b1884159fd9a8b899c739f7c | [
"ZPL-2.1",
"ZPL-2.0"
] | null | null | null | editor/templates/tests/test_review.py | gocept/alphaflow | 4b797cb12fb52254b1884159fd9a8b899c739f7c | [
"ZPL-2.1",
"ZPL-2.0"
] | 1 | 2021-11-01T07:58:18.000Z | 2021-11-01T07:58:18.000Z | # Copyright (c) 2005-2006 gocept gmbh & co. kg
# See also LICENSE.txt
# $Id: test_processmanager.py 4614 2007-03-26 20:12:22Z ctheune $
import unittest
from Products.AlphaFlow.tests.AlphaFlowTestCase import AlphaFlowTestCase
from Products.AlphaFlow.process import Process, ProcessVersion
class ParallelReviewTest(AlphaFlowTestCase):
interfaces_to_test = [ ]
def test_use_parallel_review_template(self):
wftool = self.portal.workflow_manager
wftool.processes['dummy'] = Process('dummy')
wftool.processes["dummy"].editable(ProcessVersion())
wftool.processes["dummy"].update()
process = wftool.processes['dummy'].current()
# Load form
self.assertPublish("@@template-parallelreview", process)
# Submit form
self.assertEquals(0, len(process.objectIds()))
self.assertPublish("@@template-parallelreview?form.actions.apply=Save&form.reviews=3&form.title=Review",
process)
self.assertEquals(6, len(process.objectIds()))
def test_suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(ParallelReviewTest))
return suite
| 33.4 | 112 | 0.700599 | 749 | 0.640719 | 0 | 0 | 0 | 0 | 0 | 0 | 303 | 0.259196 |
681bea3ce9c8135cbe81134242ec4563bc113cbf | 353 | py | Python | tools/find-broken-edition-parm.py | ctheune/assembly-cms | 20e000373fc30d9a14cb5dc882499b5eed1d86ee | [
"ZPL-2.1"
] | null | null | null | tools/find-broken-edition-parm.py | ctheune/assembly-cms | 20e000373fc30d9a14cb5dc882499b5eed1d86ee | [
"ZPL-2.1"
] | null | null | null | tools/find-broken-edition-parm.py | ctheune/assembly-cms | 20e000373fc30d9a14cb5dc882499b5eed1d86ee | [
"ZPL-2.1"
] | null | null | null | # Copyright (c) 2010 gocept gmbh & co. kg
# See also LICENSE.txt
import zope.traversing.api
stack = [root['summer10']]
while stack:
page = stack.pop()
for edition in page.editions:
for tag in edition.parameters:
if not ':' in tag:
print zope.traversing.api.getPath(edition)
stack.extend(page.subpages)
| 23.533333 | 58 | 0.637394 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 76 | 0.215297 |
681c71e1cbd6fe545a1017f7ab45742ad74b1e3f | 3,926 | py | Python | storm_summaries.py | uva-hydroinformatics-lab/precipitation_processing | 54ef1673900b6bb2ee38daec3aac33748a8402cd | [
"MIT"
] | 1 | 2019-01-08T03:57:49.000Z | 2019-01-08T03:57:49.000Z | storm_summaries.py | uva-hydroinformatics/precipitation_processing | 54ef1673900b6bb2ee38daec3aac33748a8402cd | [
"MIT"
] | null | null | null | storm_summaries.py | uva-hydroinformatics/precipitation_processing | 54ef1673900b6bb2ee38daec3aac33748a8402cd | [
"MIT"
] | null | null | null | from storm_stats_functions import *
########################################################################################################################
# Prepare Data##########################################################################################################
########################################################################################################################
combined_df = combine_data_frames()
date_range = get_date_range()
outline_poly = get_outline_polygon()
flavor = 'all data'
base_dir = 'C:/Users/jeff_dsktp/Box Sync/Sadler_1stPaper/rainfall/'
fig_dir = '{}figures/python/{}/'.format(base_dir, flavor)
data_dir = '{}data/{}/'.format(base_dir, flavor)
########################################################################################################################
# Plot/summarize data ##################################################################################################
########################################################################################################################
# date_range = ['2014-07-10']
# get daily summary ##
# daily_tots_df = get_daily_tots_df(combined_df, date_range)
#
# get storm durations ##
# durations = get_storm_durations(combined_df, date_range, 0.025)
#
# get subdaily summary ##
# timestep = "15T"
# t = get_subdaily_df(combined_df, date_range, timestep)
# t = combine_sub_daily_dfs(combined_df, t)
# for a in t:
# a[1].to_csv("{}{}-{}.csv".format(check_dir(data_dir), a[0], 'fifteen_min'))
# plot subdaily data ##
# plot_subdaily_scatter(t,
# False,
# timestep,
# units="Precip (mm)",
# type="subdaily",
# dty=fig_dir,
# marker_scale=5,
# threshold=-1,
# ply=outline_poly,
# label=True)
# #
# # for d in t:
# # d[1].to_csv("{}{}_{}.csv".format(check_dir(data_dir), timestep, d[0]))
#
# # plot daily data ##
timestep = "4/15/2014 11:15:00"
df = read_sub_daily('fif')
df = df.ix[:, ['x', 'y', 'src', timestep]]
plot_scatter_subplots(df,
units="Rainfall (mm)",
type=flavor,
dty=fig_dir,
title="sub_day",
marker_scale=15,
threshold=0.01,
ply=outline_poly)
#
# get and plot intensity data at 15 min step ##
# max_daily_intensities_fifteen = get_daily_max_intensities(combined_df, date_range, "15T")
# plot_scatter_subplots(max_daily_intensities_fifteen,
# units="Max daily intensity (mm/15 min)",
# type=flavor,
# dty=fig_dir,
# title="max_daily_intensities_15_min",
# marker_scale=1.3,
# threshold=1,
# ply=outline_poly)
# get and plot intensity data at hour step ##
# max_daily_intensities_hour = get_daily_max_intensities(combined_df, date_range, "H")
# plot_scatter_subplots(max_daily_intensities_hour,
# units="Max daily intensity (mm/hour)",
# type=flavor,
# dty=fig_dir,
# title="max_daily_intensities_hour",
# marker_scale=1.1,
# threshold=1,
# ply=outline_poly)
# plot summary scatter + bars ##
# plot_sum_by_station_bars(daily_tots_df, fig_dir, flavor, outline_poly)
# bar graph for mean rainfall for each day ##
plot_sum_by_day(daily_tots_df, 'summary.png')
# compile overall storm summary table ##
# create_summary_table(daily_tots_df,
# max_daily_intensities_hour,
# max_daily_intensities_fifteen,
# data_dir,
# "overall_storm_summary")
# | 40.474227 | 120 | 0.464595 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,244 | 0.826286 |
681ce2b89477a5b6b497d174c63e24c3840af009 | 573 | py | Python | jqi/completion.py | jan-g/jqi | f304f9fda33ac9b9eae98848d2a64acbe0893131 | [
"CC-BY-3.0",
"Apache-2.0"
] | 3 | 2020-04-15T13:40:59.000Z | 2021-06-30T10:09:33.000Z | jqi/completion.py | jan-g/jqi | f304f9fda33ac9b9eae98848d2a64acbe0893131 | [
"CC-BY-3.0",
"Apache-2.0"
] | null | null | null | jqi/completion.py | jan-g/jqi | f304f9fda33ac9b9eae98848d2a64acbe0893131 | [
"CC-BY-3.0",
"Apache-2.0"
] | null | null | null | from .lexer import lex
from .parser import top_level
from .completer import *
from .eval import make_env, splice
def completer(s, offset, start=top_level):
evaluator = start.parse(lex(s, offset))
def complete(stream="", env=None):
if env is None:
env = {}
env = make_env().update(env) # Install standard bindings
try:
_ = evaluator(splice(env, stream))
return []
except Completion as c:
return c.completions, c.pos if c.pos is not None else (offset, offset)
return complete
| 27.285714 | 82 | 0.616056 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 29 | 0.050611 |
681d29014f10010799276f1e756503fc734bd148 | 21 | py | Python | portfolio/Python/scrapy/loi/__init__.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/loi/__init__.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | null | null | null | portfolio/Python/scrapy/loi/__init__.py | 0--key/lib | ba7a85dda2b208adc290508ca617bdc55a5ded22 | [
"Apache-2.0"
] | 5 | 2016-03-22T07:40:46.000Z | 2021-05-30T16:12:21.000Z | ACCOUNT_NAME = 'Loi'
| 10.5 | 20 | 0.714286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5 | 0.238095 |
681d2bfe163a3550e1484324ef1588560df68dbc | 5,948 | py | Python | conda-store-server/conda_store_server/server/views/api.py | saulshanabrook/conda-store | 4d2c442d92cc92c236f52d138a403a693eff1eb9 | [
"BSD-3-Clause"
] | null | null | null | conda-store-server/conda_store_server/server/views/api.py | saulshanabrook/conda-store | 4d2c442d92cc92c236f52d138a403a693eff1eb9 | [
"BSD-3-Clause"
] | null | null | null | conda-store-server/conda_store_server/server/views/api.py | saulshanabrook/conda-store | 4d2c442d92cc92c236f52d138a403a693eff1eb9 | [
"BSD-3-Clause"
] | null | null | null | from flask import Blueprint, jsonify, redirect, request
import pydantic
from conda_store_server import api, schema, utils
from conda_store_server.server.utils import get_conda_store, get_auth
from conda_store_server.server.auth import Permissions
app_api = Blueprint("api", __name__)
@app_api.route("/api/v1/")
def api_status():
return jsonify({"status": "ok"})
@app_api.route("/api/v1/namespace/")
def api_list_namespaces():
conda_store = get_conda_store()
auth = get_auth()
orm_environments = auth.filter_namespaces(api.list_namespaces(conda_store.db))
namespaces = [schema.Namespace.from_orm(_).dict() for _ in orm_environments.all()]
return jsonify(namespaces)
@app_api.route("/api/v1/environment/")
def api_list_environments():
conda_store = get_conda_store()
auth = get_auth()
orm_environments = auth.filter_environments(api.list_environments(conda_store.db))
environments = [
schema.Environment.from_orm(_).dict(exclude={"build"})
for _ in orm_environments.all()
]
return jsonify(environments)
@app_api.route("/api/v1/environment/<namespace>/<name>/", methods=["GET"])
def api_get_environment(namespace, name):
conda_store = get_conda_store()
auth = get_auth()
auth.authorize_request(
f"{namespace}/{name}", {Permissions.ENVIRONMENT_READ}, require=True
)
environment = api.get_environment(conda_store.db, namespace=namespace, name=name)
if environment is None:
return jsonify({"status": "error", "error": "environment does not exist"}), 404
return jsonify(schema.Environment.from_orm(environment).dict())
@app_api.route("/api/v1/environment/<namespace>/<name>/", methods=["PUT"])
def api_update_environment_build(namespace, name):
conda_store = get_conda_store()
auth = get_auth()
auth.authorize_request(
f"{namespace}/{name}", {Permissions.ENVIRONMENT_UPDATE}, require=True
)
data = request.json
if "buildId" not in data:
return jsonify({"status": "error", "message": "build id not specificated"}), 400
try:
build_id = data["buildId"]
conda_store.update_environment_build(namespace, name, build_id)
except utils.CondaStoreError as e:
return e.response
return jsonify({"status": "ok"})
@app_api.route("/api/v1/specification/", methods=["POST"])
def api_post_specification():
conda_store = get_conda_store()
try:
specification = schema.CondaSpecification.parse_obj(request.json)
api.post_specification(conda_store, specification)
return jsonify({"status": "ok"})
except pydantic.ValidationError as e:
return jsonify({"status": "error", "error": e.errors()}), 400
@app_api.route("/api/v1/build/", methods=["GET"])
def api_list_builds():
conda_store = get_conda_store()
auth = get_auth()
orm_builds = auth.filter_builds(api.list_builds(conda_store.db))
builds = [
schema.Build.from_orm(build).dict(exclude={"specification", "packages"})
for build in orm_builds.all()
]
return jsonify(builds)
@app_api.route("/api/v1/build/<build_id>/", methods=["GET"])
def api_get_build(build_id):
conda_store = get_conda_store()
auth = get_auth()
build = api.get_build(conda_store.db, build_id)
if build is None:
return jsonify({"status": "error", "error": "build id does not exist"}), 404
auth.authorize_request(
f"{build.namespace.name}/{build.specification.name}",
{Permissions.ENVIRONMENT_READ},
require=True,
)
return jsonify(schema.Build.from_orm(build).dict())
@app_api.route("/api/v1/build/<build_id>/", methods=["PUT"])
def api_put_build(build_id):
conda_store = get_conda_store()
auth = get_auth()
build = api.get_build(conda_store.db, build_id)
if build is None:
return jsonify({"status": "error", "error": "build id does not exist"}), 404
auth.authorize_request(
f"{build.namespace.name}/{build.specification.name}",
{Permissions.ENVIRONMENT_READ},
require=True,
)
conda_store.create_build(build.namespace_id, build.specification.sha256)
return jsonify({"status": "ok", "message": "rebuild triggered"})
@app_api.route("/api/v1/build/<build_id>/", methods=["DELETE"])
def api_delete_build(build_id):
conda_store = get_conda_store()
auth = get_auth()
build = api.get_build(conda_store.db, build_id)
if build is None:
return jsonify({"status": "error", "error": "build id does not exist"}), 404
auth.authorize_request(
f"{build.namespace.name}/{build.specification.name}",
{Permissions.ENVIRONMENT_DELETE},
require=True,
)
conda_store.delete_build(build_id)
return jsonify({"status": "ok"})
@app_api.route("/api/v1/build/<build_id>/logs/", methods=["GET"])
def api_get_build_logs(build_id):
conda_store = get_conda_store()
auth = get_auth()
build = api.get_build(conda_store.db, build_id)
if build is None:
return jsonify({"status": "error", "error": "build id does not exist"}), 404
auth.authorize_request(
f"{build.namespace.name}/{build.specification.name}",
{Permissions.ENVIRONMENT_DELETE},
require=True,
)
return redirect(conda_store.storage.get_url(build.log_key))
@app_api.route("/api/v1/channel/", methods=["GET"])
def api_list_channels():
conda_store = get_conda_store()
orm_channels = api.list_conda_channels(conda_store.db)
channels = [
schema.CondaChannel.from_orm(channel).dict() for channel in orm_channels
]
return jsonify(channels)
@app_api.route("/api/v1/package/", methods=["GET"])
def api_list_packages():
conda_store = get_conda_store()
orm_packages = api.list_conda_packages(conda_store.db)
packages = [
schema.CondaPackage.from_orm(package).dict() for package in orm_packages
]
return jsonify(packages)
| 30.502564 | 88 | 0.686785 | 0 | 0 | 0 | 0 | 5,622 | 0.945192 | 0 | 0 | 1,081 | 0.181742 |
681dc97becea3bc42ad4fd6d5283822462ab98cd | 6,781 | py | Python | wrangle.py | brandonjbryant/regression-project | 1c7dabece335c28931bcca3c002cb3cfa733bcbd | [
"MIT"
] | null | null | null | wrangle.py | brandonjbryant/regression-project | 1c7dabece335c28931bcca3c002cb3cfa733bcbd | [
"MIT"
] | null | null | null | wrangle.py | brandonjbryant/regression-project | 1c7dabece335c28931bcca3c002cb3cfa733bcbd | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as numpy
from env import host, user, password
import os
from sklearn.model_selection import train_test_split
import sklearn.preprocessing
############################# Acquire Zillow #############################
# defines function to create a sql url using personal credentials
def get_connection(db, user=user, host=host, password=password):
'''
This function uses my info from my env file to
create a connection url to access the Codeup db.
'''
return f'mysql+pymysql://{user}:{password}@{host}/{db}'
# defines function to get zillow data from MySQL and return as a pandas DataFrame
def get_zillow_data():
'''
This function reads in the zillow data from the Codeup db,
selects all columns from the properties_2017 table,
joins predictions_2017 table,
and acquires single unit properties with transactions during May 2017 - August 2017
and returns a pandas DataFrame with all columns.
'''
#create SQL query
sql_query = '''
SELECT *
FROM properties_2017
JOIN predictions_2017 USING(parcelid)
WHERE propertylandusetypeid IN (260, 261, 263, 264, 265, 266, 273, 275, 276, 279)
AND transactiondate >= "2017-05-01" AND transactiondate <= "2017-08-31";
'''
#read in dataframe from Codeup db
df = pd.read_sql(sql_query, get_connection('zillow'))
return df
# adds caching to get_zillow_data and checks for local filename (zillow_df.csv)
# if file exists, uses the .csv file
# if file doesn't exist, then produces SQL & pandas necessary to create a df, then write the df to a .csv file
def cached_zillow(cached=False):
'''
This function reads in zillow data from Codeup database and writes data to
a csv file if cached == False or if cached == True reads in zillow df from
a csv file, returns df.
'''
if cached == False or os.path.isfile('zillow_df.csv') == False:
# Read fresh data from db into a DataFrame.
df = get_zillow_data()
# Write DataFrame to a csv file.
df.to_csv('zillow_df.csv')
else:
# If csv file exists or cached == True, read in data from csv.
df = pd.read_csv('zillow_df.csv', index_col=0)
return df
############################# Prepare Zillow #############################
# defines function to clean zillow data and return as a cleaned pandas DataFrame
def clean_zillow(df):
'''
clean_zillow will take one argument df, a pandas dataframe and will:
grab the features needed for estimating home value and confirming property location,
set parcelid as new index,
rename columns for readability,
calculate age of home,
drop null values,
convert data types to integers,
remove outliers from square_feet and tax_value,
and calculate tax rate
return: a single pandas dataframe with the above operations performed
'''
#select only certain features needed for project
features = ['parcelid',
'bedroomcnt',
'bathroomcnt',
'calculatedfinishedsquarefeet',
'fips',
'yearbuilt',
'taxvaluedollarcnt',
'taxamount']
df = df[features]
#set parcelid as index
df = df.set_index("parcelid")
#rename columns
df = df.rename(columns={"parcelid": "parcel_id",
"bedroomcnt": "bedrooms",
"bathroomcnt": "bathrooms",
"calculatedfinishedsquarefeet":"square_feet",
"fips": "county_fips",
"taxamount": "taxes",
"taxvaluedollarcnt": "tax_value",
"yearbuilt": "age"})
#convert year built to get the property age
df.age = 2017 - df.age
#drop the nulls
df = df.dropna(subset=['square_feet', 'age', 'tax_value', 'taxes'])
df = df.fillna(0)
#convert dtypes to integers
df.bedrooms = df.bedrooms.astype('int64')
df.square_feet = df.square_feet.astype('int64')
df.county_fips = df.county_fips.astype('int64')
df.age = df.age.astype('int64')
df.tax_value = df.tax_value.astype('int64')
#remove outliers from square_feet
#calculate IQR
q1sf, q3sf = df.square_feet.quantile([.25, .75])
iqrsf = q3sf - q1sf
#calculate upper and lower bounds, outlier if above or below these
uppersf = q3sf + (1.5 * iqrsf)
lowersf = q1sf - (1.5 * iqrsf)
#filter out the lower and upper outliers
df = df[df.square_feet > lowersf]
df = df[df.square_feet < uppersf]
#remove outliers from tax_value
#calculate IQRover
q1tv, q3tv = df.tax_value.quantile([.25, .75])
iqrtv = q3tv - q1tv
#calculate upper and lower bounds, outlier if above or below these
uppertv = q3tv + (1.5 * iqrtv)
lowertv = q1tv - (1.5 * iqrtv)
#filter out the lower and upper outliers
df = df[df.tax_value > lowertv]
df = df[df.tax_value < uppertv]
#calculate tax rate using property's assessed value and the amount paid each year
#tax paid / tax value * 100 = tax rate%
df['tax_rate'] = round(((df.taxes / df.tax_value) * 100), 2)
return df
# splits a dataframe into train, validate, test
def split(df):
'''
take in a DataFrame and return train, validate, and test DataFrames.
return train, validate, test DataFrames.
'''
train_validate, test = train_test_split(df, test_size=.2, random_state=123)
train, validate = train_test_split(train_validate,
test_size=.3,
random_state=123)
return train, validate, test
# defines MinMaxScaler() and returns scaled data
def Min_Max_Scaler(X_train, X_validate, X_test):
"""
Takes in X_train, X_validate and X_test dfs with numeric values only
makes, fits, and uses/transforms the data,
Returns X_train_scaled, X_validate_scaled, X_test_scaled dfs
"""
#make and fit
scaler = sklearn.preprocessing.MinMaxScaler().fit(X_train)
#use and turn numpy arrays into dataframes
X_train_scaled = pd.DataFrame(scaler.transform(X_train), index = X_train.index, columns = X_train.columns)
X_validate_scaled = pd.DataFrame(scaler.transform(X_validate), index = X_validate.index, columns = X_validate.columns)
X_test_scaled = pd.DataFrame(scaler.transform(X_test), index = X_test.index, columns = X_test.columns)
return X_train_scaled, X_validate_scaled, X_test_scaled | 33.078049 | 122 | 0.621442 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 3,788 | 0.55862 |
681e1d5a6599b7cdcc0f82c8c9f48fcd4cf5f272 | 5,107 | py | Python | server/shserver/GetCategory.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | 1 | 2017-05-02T10:02:28.000Z | 2017-05-02T10:02:28.000Z | server/shserver/GetCategory.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | null | null | null | server/shserver/GetCategory.py | AsherYang/ThreeLine | 351dc8bfd1c0a536ffbf36ce8b1af953cc71f93a | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Author: AsherYang
Email: 1181830457@qq.com
Date: 2017/7/24
Desc: get weidian token
@see https://wiki.open.weidian.com/api#94
url = https://api.vdian.com/api?param={"showNoCate":"0"}&public={"method":"weidian.cate.get.list","access_token":"9882ff6e635aac4740646cf93f2389320007487713","version":"1.0"}
必须为get 请求
"""
import json
import time
import DbUtil
import OpenRequest
import TokenConstant
from Category import Category
from ShJsonDecode import categoryDecode
import GetToken
"""
从微店获取商品分类
url = https://api.vdian.com/api?param={"showNoCate":"0"}&public={"method":"weidian.cate.get.list","access_token":"9882ff6e635aac4740646cf93f2389320007487713","version":"1.0"}
"""
def getCategoryFromNet(showNoCate="0", version="1.0", path="api"):
header = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36',
}
params = {"showNoCate": showNoCate}
# GetToken.doGetToken()
pub = {"method": "weidian.cate.get.list", "access_token": GetToken.doGetToken(),
"version": version,
"lang": "python",
"sdkversion": TokenConstant.version}
url = "%s%s?param=%s&public=%s" % (TokenConstant.domain, path, params, pub)
body = OpenRequest.http_get(url, header=header)
categoryList = json.loads(body, cls=categoryDecode)
# print "body = " + body
print len(categoryList)
# for category in categoryList:
# print category.cate_id
# print " , name = " + category.cate_name
# print " , description = " + category.description
# print category.update_time
return categoryList
"""
从数据库获取商品分类
"""
def getCategoryFromDb():
print '--- getCategoryFromDb start ---'
query = "select * from sh_category"
results = DbUtil.query(query)
print results
if results is None:
return None
categoryList = []
for row in results:
category = Category()
row_id = row[0]
cate_id = row[1]
cate_name = row[2]
parent_id = row[3]
parent_cate_name = row[4]
sort_num = row[5]
cate_item_num = row[6]
description = row[7]
listUrl = row[8]
shopName = row[9]
shopLogo = row[10]
updateTime = row[11]
category.cate_id = cate_id
category.cate_name = cate_name
category.parent_id = parent_id
category.parent_cate_name = parent_cate_name
category.sort_num = sort_num
category.cate_item_num = cate_item_num
category.description = description
category.listUrl = listUrl
category.shopName = shopName
category.shopLogo = shopLogo
category.update_time = updateTime
categoryList.append(category)
# print "row_id = %s, access_token = %s, expire_in = %s, update_time = %s " %(row_id, access_token, expire_in, update_time)
return categoryList
"""
保存商品分类进数据库
"""
def saveCategoryToDb(categoryList=None):
print '--- saveCategoryToDb start ---'
if categoryList is None or len(categoryList) == 0:
print "categoryList is None could not save to db."
return
else:
insert = 'insert into sh_category (cate_id, cate_name, parent_id, parent_cate_name, sort_num, cate_item_num,' \
' description, listUrl, shopName, shopLogo, update_time) '
sql_select_str = ''
currentTime = int(time.time())
for category in categoryList:
sql_select_str += "SELECT '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s' union " \
% (category.cate_id, category.cate_name, category.parent_id, category.parent_cate_name,
category.sort_num, category.cate_item_num, category.description, category.listUrl,
category.shopName, category.shopLogo, currentTime)
# 拼接sql 语句
insert = insert + sql_select_str
# 截取字符串
insert = insert[:-6]
# 保存category 需要先清空数据
delete = 'delete from sh_category;'
DbUtil.delete(delete)
DbUtil.insert(insert)
"""
category 更新策略:
每天只更新一次
返回值:当前有效的 category list
"""
def doGetCategory():
currentTime = int(time.time())
categoryList = getCategoryFromDb()
# 更新间隔 1天 = 24 * 60 * 60
updateInterval = 24 * 60 * 60
print currentTime
if categoryList is None:
print "categoryList is None 正在更新"
categoryNetList = getCategoryFromNet()
saveCategoryToDb(categoryNetList)
return categoryNetList
lastTime = (int)(categoryList[0].update_time)
if (currentTime - lastTime < updateInterval):
print "从数据库中拿到 %d 条 category 数据" %(len(categoryList))
return categoryList
else:
print "categoryList is 日期太久了 正在更新"
categoryNetList = getCategoryFromNet()
saveCategoryToDb(categoryNetList)
return categoryNetList
if __name__ == '__main__':
doGetCategory()
# categoryNetList = getCategoryFromNet()
# saveCategoryToDb(categoryNetList) | 34.046667 | 174 | 0.639319 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,094 | 0.394425 |
681e352b042459db2c73a2cf9e2013e8047fcc1a | 600 | py | Python | tests/test_parametrized_circuit.py | mdrft/Blueqat | 878d24e2038063e32b4d391a03bc2c2be0600470 | [
"Apache-2.0"
] | 25 | 2018-09-16T22:54:48.000Z | 2019-02-22T01:21:30.000Z | tests/test_parametrized_circuit.py | mdrft/blueqat | 6c5f26b377bc3ce0d02adec8b9132d70870b3d95 | [
"Apache-2.0"
] | 22 | 2018-09-20T02:47:56.000Z | 2019-02-08T05:25:30.000Z | tests/test_parametrized_circuit.py | mdrft/blueqat | 6c5f26b377bc3ce0d02adec8b9132d70870b3d95 | [
"Apache-2.0"
] | 5 | 2018-10-23T04:56:04.000Z | 2019-02-13T14:02:31.000Z | from blueqat import Circuit, ParametrizedCircuit
def compare_circuit(c1: Circuit, c2: Circuit) -> bool:
return repr(c1) == repr(c2)
def test_parametrized1():
assert compare_circuit(
ParametrizedCircuit().ry('a')[0].rz('b')[0].subs([1.2, 3.4]),
Circuit().ry(1.2)[0].rz(3.4)[0])
def test_parametrized2():
assert compare_circuit(
ParametrizedCircuit().ry('a')[0].rz('b')[0].subs({'a': 1.2, 'b': 3.4}),
Circuit().ry(1.2)[0].rz(3.4)[0])
def test_parametrized3():
assert compare_circuit(
ParametrizedCircuit().subs([]),
Circuit()
)
| 26.086957 | 79 | 0.605 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 18 | 0.03 |
68228cac371d172bb8b04045ab01daa77707fe2a | 453 | py | Python | add_admin.py | xcoders-hub/file-link-telegram-bot | 08fe2803766bf8027991698d057f927c02501d06 | [
"Apache-2.0"
] | 1 | 2020-08-11T09:13:11.000Z | 2020-08-11T09:13:11.000Z | add_admin.py | xcoders-hub/file-link-telegram-bot | 08fe2803766bf8027991698d057f927c02501d06 | [
"Apache-2.0"
] | null | null | null | add_admin.py | xcoders-hub/file-link-telegram-bot | 08fe2803766bf8027991698d057f927c02501d06 | [
"Apache-2.0"
] | 2 | 2021-01-01T15:17:15.000Z | 2021-02-02T14:42:45.000Z | import sqlite3
from config import DB_PATH
def exe_query(query):
con_obj = sqlite3.connect(DB_PATH)
courser = con_obj.execute(query)
res = courser.fetchall()
con_obj.commit()
con_obj.close()
return res
try:
admin_id = int(input('Enter admin id: '))
exe_query(f'INSERT INTO Admin (telegram_id) VALUES ({admin_id});')
print(f'Admin ({admin_id}) added successfully!')
except ValueError:
print('Invalid admin id')
| 22.65 | 70 | 0.686534 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.291391 |
68247d930223fc7ffe60ad9d81a38863a76ea6be | 2,832 | py | Python | resources/code/train/Python/cp.py | searene/PLDetector | a8052b1d2ba91bfcc3fd4a5252480cf511d8a210 | [
"MIT"
] | 1 | 2020-11-09T08:24:17.000Z | 2020-11-09T08:24:17.000Z | resources/code/train/Python/cp.py | searene/PLDetector | a8052b1d2ba91bfcc3fd4a5252480cf511d8a210 | [
"MIT"
] | null | null | null | resources/code/train/Python/cp.py | searene/PLDetector | a8052b1d2ba91bfcc3fd4a5252480cf511d8a210 | [
"MIT"
] | null | null | null | # This file is part of the Hotwire Shell project API.
# Copyright (C) 2007 Colin Walters <walters@verbum.org>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED,
# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A
# PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE X CONSORTIUM BE
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
# TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR
# THE USE OR OTHER DEALINGS IN THE SOFTWARE.
import os, sys, shutil, stat
import hotwire
import hotwire.fs
from hotwire.fs import FilePath
from hotwire.builtin import Builtin, BuiltinRegistry, MultiArgSpec
from hotwire.builtins.fileop import FileOpBuiltin
if '_' not in globals(): globals()['_'] = lambda x: x
class CpBuiltin(FileOpBuiltin):
__doc__ = _("""Copy sources to destination.""")
def __init__(self):
super(CpBuiltin, self).__init__('cp', aliases=['copy'],
hasstatus=True,
argspec=MultiArgSpec('files', min=2))
def execute(self, context, args):
assert len(args) > 0
target = FilePath(args[-1], context.cwd)
try:
target_is_dir = stat.S_ISDIR(os.stat(target).st_mode)
target_exists = True
except OSError, e:
target_is_dir = False
target_exists = False
sources = args[:-1]
assert len(sources) > 0
if (not target_is_dir) and len(sources) > 1:
raise ValueError(_("Can't copy multiple items to non-directory"))
sources_total = len(sources)
self._status_notify(context, sources_total, 0)
if target_is_dir:
for i,source in enumerate(sources):
hotwire.fs.copy_file_or_dir(FilePath(source, context.cwd), target, True)
self._status_notify(context, sources_total, i+1)
else:
hotwire.fs.copy_file_or_dir(FilePath(sources[0], context.cwd), target, False)
self._status_notify(context, sources_total, 1)
return []
BuiltinRegistry.getInstance().register_hotwire(CpBuiltin())
| 42.268657 | 89 | 0.676907 | 1,344 | 0.474576 | 0 | 0 | 0 | 0 | 0 | 0 | 1,238 | 0.437147 |
6825ecd07438de3cfbf3309ee1fabb498392216d | 2,973 | py | Python | src/other/other/features/tests.py | ManuelLobo/IBEnt | 2ea885770a843f30224920d1bf6f6381fdf98c4c | [
"MIT"
] | null | null | null | src/other/other/features/tests.py | ManuelLobo/IBEnt | 2ea885770a843f30224920d1bf6f6381fdf98c4c | [
"MIT"
] | null | null | null | src/other/other/features/tests.py | ManuelLobo/IBEnt | 2ea885770a843f30224920d1bf6f6381fdf98c4c | [
"MIT"
] | null | null | null | import glob
import random
import os
import subprocess
import time
import argparse
import codecs
import logging
def brown_clusters(crf):
#num_clusters -> param c
#num_col -> param ncollocs
#min_occur -> param min-occur
#crf -> stanford or crfsuite
final_results = open("src/other/features/brown_clusters.txt", "w") #TSV result, num_clusters, num_colls, min_occur
num_clusters_values = [82]
#num_col_values = [3]
min_occur_values = [3]
output = open("bin/temp/full_corpus.txt", "w")
lines = []
for file in glob.glob("corpora/hpo/all/hpo_corpus_text/" + "*"):
line = open(file, "r").read()
lines.append(line)
for line in lines:
output.write(str(line) + "\n")
output.close()
#subprocess.os.chdir("bin/geniass/")
#subprocess.call(["bin/geniass/geniass", "bin/temp/full_corpus.txt", "bin/temp/full_corpus_separated.txt"])
#subprocess.os.chdir("bin/brown-cluster/")
os.system("(cd bin/geniass/; ./geniass ../temp/full_corpus.txt ../temp/full_corpus_separated.txt)")
i = 0
for clu in num_clusters_values:
#for col in num_col_values:
for occ in min_occur_values:
i += 1
os.system("(cd bin/brown-cluster/; ./wcluster --text ../temp/full_corpus_separated.txt --c {} --min-occur {} --output_dir ../temp/clusters)".format(str(clu), str(occ))) # str(col) --ncollocs {}
os.system("cp bin/temp/clusters/paths data/")
#subprocess.call(["cp", "bin/temp/clusters/paths", "data/"])
f_measure = get_results(crf)
logging.info("cluster test {}".format(str(i)))
final_results.write(str(f_measure) + "\t" + str(clu) + "\t" + str(occ) + "\n")# + str(col) + "\n")
final_results.close()
def get_results(crf):
os.system("python src/main.py train --goldstd hpo_train --models models/hpo_train --entitytype hpo --crf {}".format(crf))
os.system("python src/main.py test --goldstd hpo_test -o pickle data/results_hpo_train --models models/hpo_train --entitytype hpo --crf {}".format(crf))
os.system("python src/evaluate.py evaluate hpo_test --results data/results_hpo_train --models models/hpo_train --entitytype hpo --rules andor stopwords small_ent twice_validated stopwords gowords posgowords longterms small_len quotes defwords digits lastwords")
results = open("data/results_hpo_train_report.txt").readlines()[:6]
precision = float(results[4].split(": ")[1])
recall = float(results[5].split(": ")[1])
f_measure = (2.0*precision*recall) / (precision + recall)
return f_measure
def main():
#Test Boolean Stanford NER Features
#os.system("python src/other/features/bool_feature_selection.py")
#Test Boolean Stanford NER Features
#os.system("python src/other/features/numerical_feature_selection.py")
#Test Brown Clustering -> Around 20 hours with current values.
brown_clusters("crfsuite")
if __name__ == "__main__":
main() | 39.64 | 265 | 0.670703 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,695 | 0.570131 |
6825edb56f620e4af468cbc1facebc1899cc9b2c | 4,882 | py | Python | tests/proc/test_manager.py | pmrowla/dvc-task | b2698c5b76dfe14099eefc90b2f9372ec774549d | [
"Apache-2.0"
] | 2 | 2022-02-15T12:28:13.000Z | 2022-03-08T17:19:38.000Z | tests/proc/test_manager.py | pmrowla/dvc-task | b2698c5b76dfe14099eefc90b2f9372ec774549d | [
"Apache-2.0"
] | 12 | 2022-02-15T12:55:13.000Z | 2022-03-30T09:24:13.000Z | tests/proc/test_manager.py | pmrowla/dvc-task | b2698c5b76dfe14099eefc90b2f9372ec774549d | [
"Apache-2.0"
] | 1 | 2022-03-08T07:15:18.000Z | 2022-03-08T07:15:18.000Z | """Process manager tests."""
import builtins
import signal
import sys
import pytest
from pytest_mock import MockerFixture
from pytest_test_utils import TmpDir
from dvc_task.proc.exceptions import (
ProcessNotTerminatedError,
UnsupportedSignalError,
)
from dvc_task.proc.manager import ProcessManager
from dvc_task.proc.process import ProcessInfo
from .conftest import PID_RUNNING
def test_send_signal(
mocker: MockerFixture,
process_manager: ProcessManager,
finished_process: str,
running_process: str,
):
"""Terminate signal should be sent."""
mock_kill = mocker.patch("os.kill")
process_manager.send_signal(running_process, signal.SIGTERM)
mock_kill.assert_called_once_with(PID_RUNNING, signal.SIGTERM)
mock_kill.reset_mock()
process_manager.send_signal(finished_process, signal.SIGTERM)
mock_kill.assert_not_called()
if sys.platform == "win32":
with pytest.raises(UnsupportedSignalError):
process_manager.send_signal(finished_process, signal.SIGABRT)
def test_dead_process(
mocker: MockerFixture,
process_manager: ProcessManager,
running_process: str,
):
"""Dead process lookup should fail."""
def side_effect(*args):
if sys.platform == "win32":
err = OSError()
err.winerror = 87
raise err
raise ProcessLookupError()
mocker.patch("os.kill", side_effect=side_effect)
with pytest.raises(ProcessLookupError):
process_manager.send_signal(running_process, signal.SIGTERM)
assert process_manager[running_process].returncode == -1
def test_kill(
mocker: MockerFixture,
process_manager: ProcessManager,
finished_process: str,
running_process: str,
):
"""Kill signal should be sent."""
mock_kill = mocker.patch("os.kill")
process_manager.kill(running_process)
if sys.platform == "win32":
mock_kill.assert_called_once_with(PID_RUNNING, signal.SIGTERM)
else:
mock_kill.assert_called_once_with(
PID_RUNNING, signal.SIGKILL # pylint: disable=no-member
)
mock_kill.reset_mock()
process_manager.kill(finished_process)
mock_kill.assert_not_called()
def test_terminate(
mocker: MockerFixture,
process_manager: ProcessManager,
running_process: str,
finished_process: str,
):
"""Terminate signal should be sent."""
mock_kill = mocker.patch("os.kill")
process_manager.terminate(running_process)
mock_kill.assert_called_once_with(PID_RUNNING, signal.SIGTERM)
mock_kill.reset_mock()
process_manager.terminate(finished_process)
mock_kill.assert_not_called()
def test_remove(
mocker: MockerFixture,
tmp_dir: TmpDir,
process_manager: ProcessManager,
running_process: str,
finished_process: str,
):
"""Process should be removed."""
mocker.patch("os.kill", return_value=None)
process_manager.remove(finished_process)
assert not (tmp_dir / finished_process).exists()
with pytest.raises(ProcessNotTerminatedError):
process_manager.remove(running_process)
assert (tmp_dir / running_process).exists()
process_manager.remove(running_process, True)
assert not (tmp_dir / running_process).exists()
@pytest.mark.parametrize("force", [True, False])
def test_cleanup( # pylint: disable=too-many-arguments
mocker: MockerFixture,
tmp_dir: TmpDir,
process_manager: ProcessManager,
running_process: str,
finished_process: str,
force: bool,
):
"""Process directory should be removed."""
mocker.patch("os.kill", return_value=None)
process_manager.cleanup(force)
assert (tmp_dir / running_process).exists() != force
assert not (tmp_dir / finished_process).exists()
def test_follow(
mocker: MockerFixture,
process_manager: ProcessManager,
running_process: str,
):
"""Output should be followed and not duplicated."""
orig_open = builtins.open
mock_file = mocker.mock_open()()
expected = ["foo\n", "bar\n", "b", "", "az\n"]
mock_file.readline = mocker.Mock(side_effect=expected)
def _open(path, *args, **kwargs):
if path.endswith(".out"):
return mock_file
return orig_open(path, *args, **kwargs)
mocker.patch("builtins.open", _open)
mock_sleep = mocker.patch("time.sleep")
follow_gen = process_manager.follow(running_process)
for line in expected:
if line:
assert line == next(follow_gen)
mock_sleep.assert_called_once_with(1)
# Process exit with no further output should cause StopIteration
# (raised as RuntimeError)
mocker.patch.object(
process_manager,
"__getitem__",
return_value=ProcessInfo(
pid=PID_RUNNING, stdin=None, stdout=None, stderr=None, returncode=0
),
)
with pytest.raises(RuntimeError):
next(follow_gen)
| 29.409639 | 79 | 0.705653 | 0 | 0 | 0 | 0 | 501 | 0.102622 | 0 | 0 | 606 | 0.124129 |