max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
todo/api/mock_helper.py | devord/todo | 0 | 12768151 | <gh_stars>0
import random
from faker import Faker
from faker.providers import lorem
from api.models import Label, Item
def random_subset(elements):
return random.sample(elements, k=int(random.triangular(0, len(elements), 2)))
def generate_mock_data(num_labels, num_items):
fake = Faker()
fake.add_provider(lorem)
labels = []
for _ in range(num_labels):
label = Label(name=fake.word())
label.save()
labels.append(label)
for _ in range(num_items):
item = Item(title=fake.sentence(),
description=fake.paragraph())
item.save()
item.labels.add(*random_subset(labels))
| 2.671875 | 3 |
setup.py | mechanicalsea/pytorch-light | 1 | 12768152 | <filename>setup.py
"""Step-by-step upload to pypi.org: https://pypi.org/project/pytorchlight/
Overview:
1. Ensuring the requirements of update
- `setuptools` and `wheel`: python -m pip install --user --upgrade setuptools wheel
- `twine`: python -m pip install --user --upgrade twine
- an account of pypi.org
2. Generating distribution archives such as dist/*.whl and dist/*.tar.gz
1. cd DIR{setup.py}
2. python setup.py sdist bdist_wheel
3. Uploading the distribution archives
1. python -m twine upload --repository-url https://upload.pypi.org/legacy/ dist/*
2. input username and password
4. Installing your newly uploaded package for validation
1. pip install PACKAGE_NAME{pytorchlight}
2. python -> import PACKAGE_NAME{pytorchlight}
Example::
- [A sample project that exists for PyPUG's "Tutorial on Packaging and Distributing Projects"](https://github.com/pypa/sampleproject)
- [Packaging and distributing projects](https://packaging.python.org/guides/distributing-packages-using-setuptools/)
"""
from setuptools import setup, find_packages
with open('README.md', 'r') as fh:
long_description = fh.read()
with open('requirements.txt', 'r') as fh:
requirements = fh.read().split('\n')
setup(name='pytorchlight',
version='0.0.1',
author='<NAME>',
author_email='<EMAIL>',
description='Easy-to-run Application based on PyTorch',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/mechanicalsea/pytorch-light',
packages=find_packages(exclude=['test',]),
license='BSD',
install_requires=requirements,
classifiers=[
"Programming Language :: Python :: 3.5",
"License :: OSI Approved :: BSD License",
"Operating System :: Microsoft :: Windows :: Windows 10",
],
)
| 2.140625 | 2 |
setup.py | mkorpela/judas | 3 | 12768153 | #!/usr/bin/env python
from distutils.core import setup
import os
from setuptools import find_packages
name = '<NAME>'
# I might be just a little bit too much afraid of those bots..
address = name.lower().replace(' ', '.')+chr(64)+'gmail.com'
setup(name='robpy',
version='0.1',
description='Test runner - Robot Framework pure Python runner',
author=name,
author_email=address,
url='https://github.com/mkorpela/robpy',
packages=find_packages(),
scripts = [os.path.join('scripts', 'robpy'), os.path.join('scripts', 'robpy.bat')],
license='Apache License, Version 2.0',
install_requires = ['robotframework'])
| 1.351563 | 1 |
Chapter1/sumoftwodice.py | galinadychko/IntroToPython | 0 | 12768154 | import random
import stdio
a = random.randrange(1, 7)
b = random.randrange(1, 7)
stdio.writeln("Sum after rolling 2 dices: " + str(a) + " + " + str(b) + " = " + str(a + b))
| 3.21875 | 3 |
gui/searchAllDialog.py | JoachimCoenen/Datapack-Editor | 1 | 12768155 | import re
from dataclasses import dataclass, field
from enum import Enum
from typing import Optional, Sequence, Union
from PyQt5.QtCore import QEventLoop, pyqtSignal, QTimer
from PyQt5.QtGui import QKeySequence
from PyQt5.QtWidgets import QDialog, QWidget, QApplication
from Cat.CatPythonGUI.GUI import CORNERS, PythonGUI
from Cat.CatPythonGUI.GUI.Widgets import HTMLDelegate
from Cat.CatPythonGUI.GUI.codeEditor import Position, SearchOptions, SearchMode
from Cat.CatPythonGUI.GUI.framelessWindow.catFramelessWindowMixin import CatFramelessWindowMixin
from Cat.CatPythonGUI.GUI.treeBuilders import DataTreeBuilder
from Cat.CatPythonGUI.utilities import connectOnlyOnce
from Cat.icons import icons
from Cat.utils import escapeForXml
from Cat.utils.collections_ import OrderedMultiDict
from Cat.utils.profiling import TimedMethod
from gui.datapackEditorGUI import ContextMenuEntries, makeTextSearcher
from model.Model import Datapack
from model.pathUtils import FilePath, ZipFilePool, loadTextFile
from session.session import getSession
@dataclass(unsafe_hash=True)
class Occurrence:
file: FilePath
position: Position
line: str
@dataclass
class SearchResult:
filesToSearch: list[FilePath] = field(default_factory=list)
occurrences: OrderedMultiDict[FilePath, Occurrence] = field(default_factory=OrderedMultiDict)
filesSearched: int = 0
error: Optional[Exception] = None
class SearchAllDialog(CatFramelessWindowMixin, QDialog):
def __init__(self, parent: Optional[QWidget] = None):
super().__init__(GUICls=PythonGUI, parent=parent)
self._includedDatapacks: list[Datapack] = []
self.searchExpr: str = ''
self.searchOptions: SearchOptions = SearchOptions(
searchMode=SearchMode.Normal,
isCaseSensitive=False,
isMultiLine=False,
)
self._searchResult: SearchResult = SearchResult()
self.htmlDelegate = HTMLDelegate()
self.setWindowTitle('Search')
progressSignal = pyqtSignal(int)
def resetUserInterface(self):
allFilePaths = self.filePathsToSearch
self._searchResult = SearchResult(allFilePaths)
def OnSidebarGUI(self, gui: PythonGUI):
includedDatapacks = []
with gui.vLayout(preventVStretch=True, verticalSpacing=0):
for dp in getSession().world.datapacks:
if gui.checkboxLeft(None, dp.name):
includedDatapacks.append(dp)
self._includedDatapacks = includedDatapacks
def OnGUI(self, gui: PythonGUI):
with gui.vLayout(preventVStretch=False):
with gui.vLayout(preventVStretch=False):
with gui.hLayout(horizontalSpacing=0):
self.searchExpr = gui.codeField(self.searchExpr, isMultiline=False, roundedCorners=CORNERS.NONE)
if gui.toolButton(icon=icons.search, overlap=(1, 0), roundedCorners=(False, True, False, True), default=True, windowShortcut=QKeySequence("Return")):
self.resetUserInterface()
QTimer.singleShot(1, self.search)
if self.searchOptions.searchMode == SearchMode.RegEx:
try:
re.compile(self.searchExpr)
except Exception as e:
gui.helpBox(str(e), 'error', hasLabel=False)
self._searchOptionsGUI(gui)
gui.progressBar(self.progressSignal, min=0, max=len(self._searchResult.filesToSearch), value=self._searchResult.filesSearched, format='', textVisible=True)
resultsGUI = gui.subGUI(PythonGUI, self._resultsGUI1, suppressRedrawLogging=False)
connectOnlyOnce(self, self.progressSignal, lambda i: resultsGUI.redrawGUI(), 'resultsGUI')
resultsGUI.redrawGUI()
self._resultsGUI2(gui)
def _searchOptionsGUI(self, gui: PythonGUI):
so = self.searchOptions
# ============ Search Options: ============
with gui.hLayout(preventHStretch=True):
# Search Mode:
so.searchMode = SearchMode.Normal if gui.radioButton(so.searchMode == SearchMode.Normal, 'Normal', group='searchMode', id=0) else so.searchMode
so.searchMode = SearchMode.RegEx if gui.radioButton(so.searchMode == SearchMode.RegEx, 'RegEx', group='searchMode', id=2) else so.searchMode
# Search Options:
so.isCaseSensitive = gui.toggleLeft(so.isCaseSensitive, 'Case sensitive')
so.isMultiLine = gui.toggleLeft(so.isMultiLine, 'Multiline', enabled=so.searchMode == SearchMode.RegEx)
def _resultsGUI1(self, gui: PythonGUI) -> None:
if self._searchResult.error is not None:
gui.helpBox(f'error during search: {self._searchResult.error}', style='error')
else:
gui.label(f'found {len(self._searchResult.occurrences)} occurrences in {len(self._searchResult.occurrences.uniqueKeys())} files ({self._searchResult.filesSearched} files searched total): (double-click to open)')
def _resultsGUI2(self, gui: PythonGUI) -> None:
def labelMaker(x: Union[SearchResult, FilePath, Occurrence], i: int) -> str:
if isinstance(x, Occurrence):
return x.line
else:
countInFile = len(self._searchResult.occurrences.getall(x))
if isinstance(x, tuple):
filename = x[1].rpartition('/')[2]
return f'{filename} - ({countInFile}) - "{str(x[0])}"'
elif isinstance(x, str):
filename = x.rpartition('/')[2]
return f'{filename} - (countInFile) - "{str(x[0])}"'
return '<root>'
def openDocument(x: Union[FilePath, Occurrence], *, s=self):
if isinstance(x, Occurrence):
s.parent()._tryOpenOrSelectDocument(x.file, x.position)
else:
s.parent()._tryOpenOrSelectDocument(x)
def onContextMenu(x: Union[SearchResult, FilePath, Occurrence], column: int, *, s=self):
if isinstance(x, Occurrence):
with gui.popupMenu(atMousePosition=True) as menu:
menu.addItems(ContextMenuEntries.fileItems(x.file, s.parent()._tryOpenOrSelectDocument))
elif not isinstance(x, SearchResult):
with gui.popupMenu(atMousePosition=True) as menu:
menu.addItems(ContextMenuEntries.fileItems(x, s.parent()._tryOpenOrSelectDocument))
def childrenMaker(x: Union[SearchResult, FilePath, Occurrence], *, s=self) -> Sequence:
if isinstance(x, SearchResult):
return list(x.occurrences.uniqueKeys())
# return [(fp, x.occurrences.getall(fp)) for fp in x.occurrences.uniqueKeys()]
elif isinstance(x, Occurrence):
return tuple()
else:
return self._searchResult.occurrences.getall(x)
gui.tree(
DataTreeBuilder(
self._searchResult,
childrenMaker, # lambda x: x.occurrences.items() if isinstance(x, SearchResult) else [],
labelMaker,
None, None, 1,
showRoot=False,
onDoubleClick=lambda x: openDocument(x),
onContextMenu=onContextMenu
),
headerVisible=True,
itemDelegate=self.htmlDelegate
)
@property
def filePathsToSearch(self) -> list[FilePath]:
filePathsToSearch: list[FilePath] = []
for datapack in self._includedDatapacks:
filePathsToSearch.extend(datapack.files)
return filePathsToSearch
@TimedMethod()
def search(self) -> None:
searchResult = self._searchResult
try:
try:
searcher = makeTextSearcher(self.searchExpr, self.searchOptions)
except Exception as e:
searchResult.error = e
return
with ZipFilePool() as zipFilePool:
for i, filePath in enumerate(searchResult.filesToSearch):
self._searchResult.filesSearched = i + 1
if i % 100 == 0:
self.progressSignal.emit(i+1)
QApplication.processEvents(QEventLoop.ExcludeUserInputEvents, 1)
try:
text = loadTextFile(filePath, zipFilePool)
except UnicodeDecodeError:
continue
lastStart = 0
lastLineNr = 0
for matchStart, matchEnd in searcher(text):
start = text.rfind('\n', 0, matchStart)
start = start + 1 # skip \n at beginning of line # if start != -1 else 0
end = text.find('\n', matchEnd)
end = end if end != -1 else len(text)
occurrenceStr = f'<font>{escapeForXml(text[start:matchStart])}<b>{escapeForXml(text[matchStart:matchEnd])}</b>{escapeForXml(text[matchEnd:end])}</font>'
lastLineNr += text.count('\n', lastStart, start)
lastStart = start
searchResult.occurrences.add(filePath, Occurrence(filePath, Position(lastLineNr, matchStart - start), occurrenceStr))
finally:
self._gui.redrawGUI()
| 1.796875 | 2 |
tests/test_paypalutil.py | TreZc0/donation-tracker | 39 | 12768156 | <reponame>TreZc0/donation-tracker
from django.test import TestCase
from paypal.standard.ipn.models import PayPalIPN
from tracker import paypalutil
class TestVerifyIPNRecipientEmail(TestCase):
def test_match_is_okay(self):
ipn = PayPalIPN(business='<EMAIL>')
paypalutil.verify_ipn_recipient_email(ipn, '<EMAIL>')
ipn = PayPalIPN(receiver_email='<EMAIL>')
paypalutil.verify_ipn_recipient_email(ipn, '<EMAIL>')
def test_mismatch_raises_exception(self):
ipn = PayPalIPN(business='<EMAIL>')
with self.assertRaises(paypalutil.SpoofedIPNException):
paypalutil.verify_ipn_recipient_email(ipn, '<EMAIL>')
ipn = PayPalIPN(receiver_email='<EMAIL>')
with self.assertRaises(paypalutil.SpoofedIPNException):
paypalutil.verify_ipn_recipient_email(ipn, '<EMAIL>')
| 2.25 | 2 |
tabler_icons/templatetags/tabler_icons.py | alex-oleshkevich/python-tabler-icons | 0 | 12768157 | <reponame>alex-oleshkevich/python-tabler-icons<gh_stars>0
from __future__ import annotations
from django import template
from django.utils.safestring import SafeString, mark_safe
from ..icons import Stringable, get_icon
register = template.Library()
@register.simple_tag
def tabler_icon(name: str, size: Stringable = 24, **svg_attrs: Stringable) -> str:
fixed_kwargs = {key: (value + "" if isinstance(value, SafeString) else value) for key, value in svg_attrs.items()}
return mark_safe(get_icon(name, size, **fixed_kwargs))
| 2.3125 | 2 |
cozy/polynomials.py | mernst/cozy | 188 | 12768158 | <reponame>mernst/cozy
"""Class for representing polynomials of one variable."""
import functools
@functools.total_ordering
class Polynomial(object):
__slots__ = ("terms",)
def __init__(self, terms=()):
terms = list(terms)
while terms and (terms[-1] == 0):
terms.pop()
self.terms = tuple(terms)
def __hash__(self):
return hash(self.terms)
def __eq__(self, other):
return self.terms == other.terms
def __lt__(self, other):
if len(self.terms) != len(other.terms):
return len(self.terms) < len(other.terms)
for i in reversed(range(len(self.terms))):
self_term = self.terms[i]
other_term = other.terms[i]
if self_term < other_term:
return True
if other_term < self_term:
return False
return False
def __str__(self):
if not self.terms:
return "0"
s = str(self.terms[0])
for i in range(1, len(self.terms)):
if self.terms[i]:
term = str(self.terms[i])
exponent = "n^{}".format(i) if i > 1 else "n"
s = term + exponent + " + " + s
return s
def __repr__(self):
return "Polynomial({!r})".format(self.terms)
def get_coefficient(self, i):
if i >= len(self.terms):
return 0
return self.terms[i]
def largest_term(self):
if not self.terms:
return DominantTerm.ZERO
exponent = len(self.terms) - 1
return DominantTerm(
multiplier=self.get_coefficient(exponent),
exponent=exponent)
def __add__(self, other):
terms = [0] * max(len(self.terms), len(other.terms))
for i in range(len(terms)):
terms[i] = self.get_coefficient(i) + other.get_coefficient(i)
return Polynomial(terms)
def __mul__(self, other):
if isinstance(other, Polynomial):
res = Polynomial.ZERO
for i in range(len(self.terms)):
res += other * self.terms[i]
res += Polynomial([0] * i + list(other.terms))
return res
else:
return Polynomial((t * other) for t in self.terms)
Polynomial.ZERO = Polynomial()
Polynomial.ONE = Polynomial([1])
Polynomial.N = Polynomial([0, 1])
@functools.total_ordering
class DominantTerm(object):
"""A term of the form c*n^e for some unknown n.
Instances of this class can be added, multiplied, and compared. A term
with a higher exponent is always greater than one with a lower exponent.
"""
__slots__ = ("multiplier", "exponent")
def __init__(self, multiplier, exponent):
self.multiplier = multiplier
self.exponent = exponent
def __eq__(self, other):
return self.multiplier == other.multiplier and self.exponent == other.exponent
def __lt__(self, other):
return (self.exponent, self.multiplier) < (other.exponent, other.multiplier)
def __str__(self):
return "{}n^{}".format(self.multiplier, self.exponent)
def __repr__(self):
return "DominantTerm({}, {})".format(self.multiplier, self.exponent)
def __add__(self, other):
if other.exponent == self.exponent:
return DominantTerm(self.multiplier + other.multiplier, self.exponent)
if other.exponent > self.exponent:
return other
return self
def __mul__(self, other):
return DominantTerm(self.multiplier * other.multiplier, self.exponent + other.exponent)
DominantTerm.ZERO = DominantTerm(0, 0)
DominantTerm.ONE = DominantTerm(1, 0)
DominantTerm.N = DominantTerm(1, 1)
| 3.265625 | 3 |
src/pybel/io/exc.py | tehw0lf/pybel | 0 | 12768159 | <filename>src/pybel/io/exc.py
# -*- coding: utf-8 -*-
"""Exceptions for input/output."""
from ..exceptions import PyBELWarning
import_version_message_fmt = 'Tried importing from PyBEL v{}. Need at least v{}'
class ImportVersionWarning(PyBELWarning, ValueError):
"""Raised when trying to import data from an old version of PyBEL."""
def __init__(self, actual_version_tuple, minimum_version_tuple):
"""Build an import version warning.
:type actual_version_tuple: str
:type minimum_version_tuple: str
"""
super(ImportVersionWarning, self).__init__(actual_version_tuple, minimum_version_tuple)
self.actual_tuple = actual_version_tuple
self.minimum_tuple = minimum_version_tuple
def __str__(self):
actual_s = '.'.join(map(str, self.actual_tuple))
minimum_s = '.'.join(map(str, self.minimum_tuple))
return import_version_message_fmt.format(actual_s, minimum_s)
| 2.53125 | 3 |
pyfocal/ui/viewer.py | eteq/pyfocal | 0 | 12768160 | <reponame>eteq/pyfocal<gh_stars>0
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from ..third_party.qtpy.QtCore import *
from ..third_party.qtpy.QtWidgets import *
from .qt.mainwindow import Ui_MainWindow
from .qt.plotsubwindow import Ui_SpectraSubWindow
from .widgets.plot_window import PlotWindow
class Viewer(QMainWindow):
"""
The `Viewer` is the main construction area for all GUI widgets. This
object does **not** control the interactions between the widgets,
but only their creation and placement.
"""
def __init__(self, parent=None):
super(Viewer, self).__init__(parent)
self.main_window = Ui_MainWindow()
self.main_window.setupUi(self)
self.wgt_data_list = self.main_window.listWidget
self.wgt_layer_list = self.main_window.treeWidget_2
self.wgt_model_list = self.main_window.treeWidget
self.wgt_model_list.setHeaderLabels(["Parameter", "Value"])
# Connect the validation events
self.wgt_model_list.itemChanged.connect(
self._model_parameter_validation)
# Setup context menus
self._setup_context_menus()
def _setup_context_menus(self):
self.wgt_layer_list.customContextMenuRequested.connect(
self._layer_context_menu)
def _set_model_tool_options(self):
layer = self.current_layer()
if layer is None:
return
if not hasattr(layer, 'model'):
self.main_window.pushButton_4.show()
self.main_window.pushButton_2.hide()
self.main_window.comboBox_2.setEnabled(False)
self.main_window.pushButton_3.setEnabled(False)
else:
self.main_window.pushButton_4.hide()
self.main_window.pushButton_2.show()
self.main_window.comboBox_2.setEnabled(True)
self.main_window.pushButton_3.setEnabled(True)
@property
def current_model(self):
return self.main_window.comboBox.currentText()
@property
def current_fitter(self):
return self.main_window.comboBox_2.currentText()
@property
def current_model_formula(self):
return self.main_window.lineEdit.text()
def add_sub_window(self):
"""
Creates a new sub window instance in the MDI area.
Returns
-------
new_sub_window : QMdiSubWindow
The MdiSubWindow Qt instance.
wgt_sub_window : QWidget
The widget object within the QMdiSubWindow.
"""
# Create new window
plot_sub_window = PlotWindow()
# Populate window with tool bars, status, etc.
ui_sub_window = Ui_SpectraSubWindow()
ui_sub_window.setupUi(plot_sub_window)
# Let the sub window do initialization
plot_sub_window.set_sub_window(ui_sub_window)
plot_sub_window.initialize()
new_sub_window = self.main_window.mdiArea.addSubWindow(plot_sub_window)
new_sub_window.show()
return plot_sub_window
def open_file_dialog(self, filters):
"""
Given a list of filters, prompts the user to select an existing file
and returns the file path and filter.
Parameters
----------
filters : list
List of filters for the dialog.
Returns
-------
file_name : str
Path to the selected file.
selected_filter : str
The chosen filter (this indicates which custom loader from the
registry to use).
"""
dialog = QFileDialog(self)
dialog.setFileMode(QFileDialog.ExistingFile)
dialog.setNameFilters([x for x in filters])
if dialog.exec_():
file_names = dialog.selectedFiles()
selected_filter = dialog.selectedNameFilter()
return file_names[0], selected_filter
return None, None
def add_data_item(self, data):
"""
Adds a `Data` object to the loaded data list widget.
Parameters
----------
data : pyfocal.core.data.Data
The `Data` object to add to the list widget.
"""
new_item = QListWidgetItem(data.name, self.wgt_data_list)
new_item.setData(Qt.UserRole, data)
def add_layer_item(self, layer, *args):
"""
Adds a `Layer` object to the loaded layer list widget.
Parameters
----------
layer : pyfocal.core.data.Layer
The `Layer` object to add to the list widget.
"""
new_item = QTreeWidgetItem(self.get_layer_item(layer._source) or
self.wgt_layer_list)
new_item.setText(0, layer.name)
new_item.setData(0, Qt.UserRole, layer)
self.wgt_layer_list.setCurrentItem(new_item)
def get_layer_item(self, layer):
root = self.wgt_layer_list.invisibleRootItem()
for i in range(root.childCount()):
child = root.child(i)
if child.data(0, Qt.UserRole) == layer:
return child
def remove_layer_item(self, layer):
for child in self.wgt_layer_list.children():
if child.data(Qt.UserRole) == layer:
self.wgt_layer_list.removeItemWidget(child)
break
def add_model_item(self, model):
"""
Adds an `astropy.modeling.Model` to the loaded model tree widget.
Parameters
----------
"""
name = model.__class__.__name__
# we want to remove the redundant '1D' suffix from model
# names displayed to the user.
if name.endswith('1D'):
name = name[:-2]
new_item = QTreeWidgetItem(self.wgt_model_list)
new_item.setFlags(new_item.flags() | Qt.ItemIsEditable)
new_item.setText(0, name)
new_item.setData(0, Qt.UserRole, model)
for i, para in enumerate(model.param_names):
new_para_item = QTreeWidgetItem(new_item)
new_para_item.setText(0, para)
new_para_item.setData(0, Qt.UserRole,
model.parameters[i])
new_para_item.setText(1, str(model.parameters[i]))
new_para_item.setFlags(new_para_item.flags() | Qt.ItemIsEditable)
def remove_model_item(self, layer, model):
for child in self.wgt_model_list.children():
if child.data(Qt.UserRole) == model:
self.wgt_model_list.removeItemWidget(child)
break
def _model_parameter_validation(self, item, col):
if col == 0:
return
try:
item.setText(col, str(float(item.text(col))))
item.setData(col, Qt.UserRole, float(item.text(col)))
except ValueError:
prev_val = item.data(col, Qt.UserRole)
item.setText(col, str(prev_val))
def get_model_inputs(self):
"""
Returns the model and current parameters displayed in the UI.
Returns
-------
models : dict
A dictionary with the model instance as the key and a list of
floats as the parameters values.
"""
root = self.wgt_model_list.invisibleRootItem()
models = {}
for model_item in [root.child(j) for j in range(root.childCount())]:
model = model_item.data(0, Qt.UserRole)
args = []
for i in range(model_item.childCount()):
child_item = model_item.child(i)
child = child_item.text(1)
args.append(float(child))
models[model] = args
return models
def clear_layer_widget(self):
self.wgt_layer_list.clear()
def clear_model_widget(self):
self.wgt_model_list.clear()
def current_data(self):
"""
Returns the currently selected data object from the data list widget.
Returns
-------
data : pyfocal.core.data.Data
The `Data` object of the currently selected row.
"""
data_item = self.wgt_data_list.currentItem()
if data_item is not None:
data = data_item.data(Qt.UserRole)
return data
def current_layer(self):
"""
Returns the currently selected layer object form the layer list widget.
Returns
-------
layer : pyfocal.core.data.Layer
The `Layer` object of the currently selected row.
"""
layer_item = self.wgt_layer_list.currentItem()
if layer_item is not None:
layer = layer_item.data(0, Qt.UserRole)
return layer
def parent_layer(self):
"""
Returns the parent of the currently selected layer object
form the layer list widget.
Returns
-------
layer : pyfocal.core.data.Layer
The `Layer` object of the parent of the currently selected row.
"""
parent_item = self.wgt_layer_list.currentItem().parent()
if parent_item is not None:
layer = parent_item.data(0, Qt.UserRole)
return layer
def current_sub_window(self):
"""
Returns the currently active `QMdiSubWindow` object.
Returns
-------
sub_window : QMdiSubWindow
The currently active `QMdiSubWindow` object.
"""
sub_window = self.main_window.mdiArea.currentSubWindow()
if sub_window is not None:
return sub_window.widget()
def update_statistics(self, stat_dict):
self.main_window.label_2.setText("{0:0.03f}".format(stat_dict['mean'].value))
self.main_window.label_4.setText("{0:0.03f}".format(stat_dict['median'].value))
self.main_window.label_6.setText("{0:0.03f}".format(stat_dict['stddev'].value))
self.main_window.label_8.setText("{0:0.03f}".format(stat_dict['total'].value))
self.main_window.label_10.setText(str(stat_dict['npoints']))
def _layer_context_menu(self, point):
menu = QMenu()
menu.addAction(self.main_window.actionChange_Color)
menu.addAction(self.main_window.actionRemove)
menu.exec_(self.wgt_layer_list.viewport().mapToGlobal(point))
| 1.859375 | 2 |
Algorithms/Easy/697. Degree of an Array/answer.py | KenWoo/Algorithm | 0 | 12768161 | <reponame>KenWoo/Algorithm<gh_stars>0
from typing import List
class Solution:
def findShortestSubArray(self, nums: List[int]) -> int:
a_min = m = len(nums)
if m == 0:
return 0
dict = {}
items = {}
for i in range(m):
n = nums[i]
if n not in dict:
dict[n] = 1
items[n] = [i]
else:
dict[n] += 1
items[n].append(i)
degree = max(dict.values())
for k, v in dict.items():
if v == degree:
start = items[k][0]
end = items[k][-1]
a_min = min(end-start+1, a_min)
return a_min
if __name__ == "__main__":
s = Solution()
result = s.findShortestSubArray([1, 2, 2, 3, 1, 4, 2])
print(result)
| 3.28125 | 3 |
sstcam_sandbox/d181123_tf_cell_check/apply_tf.py | watsonjj/CHECLabPySB | 0 | 12768162 | from subprocess import call
import os
from sstcam_sandbox.d181123_tf_cell_check import TF_Storage, \
TF_Sampling, all_files
def process(file, tf):
r0_path = file.r0_path
r1_name = os.path.basename(r0_path).replace("_r0", "_r1")
r1_path = os.path.join(tf.r1_dir, r1_name)
ped_path = file.ped_path
tf_path = tf.path
output_dir = os.path.dirname(r1_path)
if not os.path.exists(output_dir):
os.makedirs(output_dir)
apply_calibration = "apply_calibration -i {} -p {} -t {} -o {}"
cmd = apply_calibration.format(r0_path, ped_path, tf_path, r1_path)
print(cmd)
call(cmd, shell=True)
def main():
for f in all_files:
process(f, TF_Sampling())
process(f, TF_Storage())
if __name__ == '__main__':
main()
| 2.265625 | 2 |
Algorithms/string_generator/string_generator.py | TeacherManoj0131/HacktoberFest2020-Contributions | 256 | 12768163 | <reponame>TeacherManoj0131/HacktoberFest2020-Contributions
import string,random
def string_generator(size, chars):
return ''.join(random.choice(chars) for _ in range(size))
def get_option(option):
if option == 'alphabet':
characters = string.ascii_uppercase + string.ascii_lowercase + string.digits
elif option == 'numeric':
characters = string.digits
else:
print('option out of context!')
return characters
# choose want alphabet generic or numeric generic
option = 'alphabet'
# choose length of size string
size = 10
characters = get_option(option)
new_number = string_generator(size,characters)
print(new_number) | 3.6875 | 4 |
gym_trading/envs/simulator.py | AdrianP-/gym_trading | 109 | 12768164 | <reponame>AdrianP-/gym_trading<filename>gym_trading/envs/simulator.py<gh_stars>100-1000
import numpy as np
import pandas as pd
from .feature_engineering import FeatureEngineering
class Simulator(object):
def __init__(self, csv_name, train_split, dummy_period=None, train=True, multiple_trades=False):
if "EUR" in csv_name:
df = pd.read_csv(csv_name, parse_dates=[[0, 1]], header=None,
names=['Date', 'Time', 'Open', 'High', 'Low', 'Close', 'Volume'])
df = df[~np.isnan(df['Open'])].set_index('Date_Time')
else:
df = pd.read_csv(csv_name, usecols=['Date', 'High', 'Low', 'Open', 'Close', 'Volume'])
df = df[~np.isnan(df['Open'])].set_index('Date')
df = FeatureEngineering(df).get_df_processed()
##Attributes
self.data = df
self.date_time = df.index
self.count = df.shape[0]
self.train_end_index = int(train_split * self.count)
# Attributes related to the observation state: Return
# print(self.data.head(1))
data_dropped = self.data.drop(['Volume', 'Open', 'Close', 'High', 'Low'], axis=1)
print(data_dropped.head(1))
self.states = data_dropped.values
self.min_values = data_dropped.min(axis=0).values
self.max_values = data_dropped.max(axis=0).values
# Generate previous Close
if dummy_period is not None:
close_prices = pd.DataFrame()
close_prices['Close'] = self.data["Close"]
for i in range(1, dummy_period + 1):
close_prices['Close (n - %s)' % i] = self.data['Close'].shift(i)
self.close = close_prices.values
self._reset()
def _reset(self, train=True):
if train:
obs = self.states[0]
self.current_index = 1
self._end = self.train_end_index
else:
self.current_index = self.train_end_index + 1
obs = self.states[self.current_index]
self._end = self.count - 1
self._data = self.data.iloc[self.current_index:self._end + 1]
return obs
def _step(self, open_trade, duration_trade):
if open_trade:
obs = self.states[self.current_index] + [open_trade] + [duration_trade]
else:
obs = self.states[self.current_index]
self.current_index += 1
done = self.current_index > self._end
return obs, done
| 2.90625 | 3 |
usaspending_api/recipient/migrations/0005_auto_20180723_1753.py | truthiswill/usaspending-api | 0 | 12768165 | <gh_stars>0
# -*- coding: utf-8 -*-
# Generated by Django 1.11.4 on 2018-07-23 17:53
from __future__ import unicode_literals
import django.contrib.postgres.fields
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipient', '0004_dunsprops'),
]
operations = [
migrations.CreateModel(
name='RecipientProfile',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('recipient_level', models.CharField(max_length=1)),
('recipient_hash', models.UUIDField(null=True)),
('recipient_unique_id', models.TextField(null=True)),
('recipient_name', models.TextField(null=True)),
('recipient_affiliations', django.contrib.postgres.fields.ArrayField(base_field=models.TextField(), default=list, size=None)),
('last_12_months', models.DecimalField(decimal_places=2, default=0, max_digits=23)),
],
options={
'db_table': 'recipient_profile',
'managed': True,
},
),
migrations.AlterUniqueTogether(
name='recipientprofile',
unique_together=set([('recipient_level', 'recipient_hash')]),
),
]
| 1.695313 | 2 |
python/image_processor.py | devonlee111/Basic-Neural-Net | 0 | 12768166 | import os
import sys
from PIL import Image
numArgs = len(sys.argv)
if numArgs < 3:
print "Not Enough Arguments\n"
sys.exit()
elif numArgs == 4:
print "Not Enough Arguments\n"
sys.exit()
elif numArgs > 5:
print "Too many arguments\n"
sys.exit()
path = sys.argv[1]
outFile = open(sys.argv[2], "w+")
try:
image = Image.open(path)
size = image.size
width = size[0]
height = size[1]
newImage = image.convert('RGB')
for x in range(0, width):
for y in range(0, height):
pix = newImage.getpixel((x,y))
red = pix[0]
green = pix[1]
blue = pix[2]
grey = (red + green + blue) / 3
wb = 0
if grey > 127:
wb = 1
if x == 0 and y == 0:
outFile.write(str(wb))
else:
outFile.write("," + str(wb))
except:
width = -1
height = -1
numImages = 0
if len(sys.argv) == 4:
width = sys.argv[2]
height = sys.argv[3]
numImages = sum([len(files) for r, d, files in os.walk(path)])
outFile.write(str(numImages) + "\n")
for path, subdirs, files in os.walk(path):
for name in files:
label = path.rsplit('/', 1)[-1]
filePath = os.path.join(path, name)
image = Image.open(filePath)
if width == -1 and height == -1:
size = image.size
width = size[0]
height = size[1]
else:
newWidth, newHeight = image.size
if newWidth != width or newHeight != height:
print "Images are not the same size\nCanceling image processing..."
sys.exit()
for x in range(0, width):
for y in range(0, height):
pix = image.convert('RGB').getpixel((x,y))
red = pix[0]
green = pix[1]
blue = pix[2]
grey = (red + green + blue) / 3
wb = 0
if grey > 127:
wb = 1
if x == 0 and y == 0:
outFile.write(str(wb))
else:
outFile.write("," + str(wb))
numImages += 1
outFile.write(":" + label + "\n")
outFile.close()
| 2.90625 | 3 |
core/migrations/0030_greatmedia.py | uktrade/directory-cms | 6 | 12768167 | <reponame>uktrade/directory-cms<gh_stars>1-10
# Generated by Django 2.2.24 on 2021-10-05 14:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('wagtailmedia', '0003_copy_media_permissions_to_collections'),
('core', '0029_auto_20190417_1024'),
]
operations = [
migrations.CreateModel(
name='GreatMedia',
fields=[
('media_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailmedia.Media')),
('transcript', models.TextField(blank=True, null=True, verbose_name='Transcript')),
('subtitles_en', models.TextField(blank=True, help_text='English-language subtitles for this video, in VTT format', null=True, verbose_name='English subtitles')),
],
options={
'verbose_name': 'media',
'abstract': False,
},
bases=('wagtailmedia.media',),
),
]
| 1.734375 | 2 |
tools/autogenerate.py | Boldie/simple-youtube-api | 39 | 12768168 | import os
import sys
import argparse
import json
import re
SCHEMA_PATH = os.path.abspath("../resources/schema")
def main():
parser = argparse.ArgumentParser(description="Do stuff")
parser.add_argument("--schema", default=None)
arguments = parser.parse_args()
schema_file = arguments.schema
schema_file_path = SCHEMA_PATH + os.sep + schema_file
name = schema_file.replace(".json", "")
with open(schema_file_path, "r") as myfile:
schema = myfile.read()
schema_json = json.loads(schema)
autogenerate_parse(schema_json, name)
def autogenerate_parse(schema_json, name):
myfile = open(name + ".py", "w")
final_string = "\n"
final_string += "def parse_{0}({0}, data):\n".format(name)
for key in schema_json.keys():
py_key = convert_var(key)
key_json = schema_json[key]
if key == "kind":
continue
if type(schema_json[key]) is not dict:
final_string += "{3}{0}.{2} = data['{1}']\n".format(
name, key, py_key, 4 * " "
)
continue
final_string += "\n"
final_string += 4 * " " + "# " + key + "\n"
final_string += "{2}{1}_data = data.get('{0}', False)\n".format(
key, py_key, 4 * " "
)
final_string += "{1}if {0}_data:\n".format(py_key, 4 * " ")
for key2 in key_json.keys():
if type(key_json[key2]) is dict:
continue
py_key2 = convert_var(key2)
key2_json = key_json[key2]
final_string += "{4}{0}.{3} = {2}_data.get('{1}', None)\n".format(
name, key2, py_key, py_key2, 8 * " "
)
final_string += "\n"
final_string += "{1}return {0}".format(name, 4 * " ")
myfile.write(final_string)
myfile.close()
# print(final_string)
def convert_var(var_name):
return re.sub("(?<!^)(?=[A-Z])", "_", var_name).lower()
def contains_only_primitive_keys(json_dict):
for key in json_dict.keys():
t = type(json_dict[key])
if t is not dict:
return True
return False
if __name__ == "__main__":
main()
| 3.015625 | 3 |
source/infrastructure/personalize/step_functions/filter_fragment.py | turnoutnow/maintaining-personalized-experiences-with-machine-learning | 1 | 12768169 | # ######################################################################################################################
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance #
# with the License. You may obtain a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software distributed under the License is distributed #
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for #
# the specific language governing permissions and limitations under the License. #
# ######################################################################################################################
from typing import List
from aws_cdk.aws_stepfunctions import (
StateMachineFragment,
State,
INextable,
Choice,
Pass,
Condition,
Map,
JsonPath,
)
from aws_cdk.core import Construct, Duration
from personalize.aws_lambda.functions import (
CreateFilter,
)
class FilterFragment(StateMachineFragment):
def __init__(
self,
scope: Construct,
id: str,
create_filter: CreateFilter,
):
super().__init__(scope, id)
# total allowed elapsed duration ~ 11m30s
retry_config = {
"backoff_rate": 1.25,
"interval": Duration.seconds(8),
"max_attempts": 15,
}
self.prepare_filter_input = Pass(
self,
"Prepare Filter Input Data",
input_path="$.datasetGroupArn",
result_path="$.filter.serviceConfig.datasetGroupArn",
)
self.create_filter = create_filter.state(
self,
"Create Filter",
input_path="$.filter",
**retry_config,
)
self.not_required = Pass(self, "Filters Not Required")
self.create_filters = Map(
self,
"Create Filters",
items_path="$.filters",
parameters={
"datasetGroupArn.$": "$.datasetGroup.serviceConfig.datasetGroupArn",
"filter.$": "$$.Map.Item.Value",
},
result_path=JsonPath.DISCARD,
)
self.start = (
Choice(self, "Check if Filters Required")
.when(
Condition.is_present("$.filters[0]"),
self.create_filters.iterator(
self.prepare_filter_input.next(self.create_filter)
),
)
.otherwise(self.not_required)
)
@property
def start_state(self) -> State:
return self.start.start_state
@property
def end_states(self) -> List[INextable]:
return [self.not_required, self.create_filters]
| 1.484375 | 1 |
sklearntools/sym/sym_transform_parts.py | modusdatascience/sklearntools | 2 | 12768170 | <reponame>modusdatascience/sklearntools<gh_stars>1-10
from .base import call_method_or_dispatch, fallback, create_registerer
from .parts import double_check
from .syms import syms
from .sym_transform import sym_transform
def sym_transform_parts_base(obj, target=None):
return (syms(obj), sym_transform(obj), target)
sym_transform_parts_dispatcher = {}
sym_transform_parts = double_check(fallback(call_method_or_dispatch('sym_transform_parts', sym_transform_parts_dispatcher), sym_transform_parts_base))
register_sym_transform_parts = create_registerer(sym_transform_parts_dispatcher, 'register_sym_transform_parts')
| 2.125 | 2 |
sdk/eventhub/azure-eventhubs/azure/eventhub/_eventprocessor/event_processor.py | mjudeikis/azure-sdk-for-python | 0 | 12768171 | # --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
import uuid
import logging
import time
import threading
from functools import partial
from .partition_context import PartitionContext
from .ownership_manager import OwnershipManager
from .common import CloseReason
from. _eventprocessor_mixin import EventProcessorMixin
_LOGGER = logging.getLogger(__name__)
class EventProcessor(EventProcessorMixin): # pylint:disable=too-many-instance-attributes
"""
An EventProcessor constantly receives events from one or multiple partitions of the Event Hub
in the context of a given consumer group.
"""
def __init__(self, eventhub_client, consumer_group, on_event, **kwargs):
self._consumer_group = consumer_group
self._eventhub_client = eventhub_client
self._namespace = eventhub_client._address.hostname # pylint: disable=protected-access
self._eventhub_name = eventhub_client.eventhub_name
self._event_handler = on_event
self._partition_id = kwargs.get("partition_id", None)
self._error_handler = kwargs.get("on_error", None)
self._partition_initialize_handler = kwargs.get("on_partition_initialize", None)
self._partition_close_handler = kwargs.get("on_partition_close", None)
self._checkpoint_store = kwargs.get("checkpoint_store", None)
self._initial_event_position = kwargs.get("initial_event_position", "-1")
self._initial_event_position_inclusive = kwargs.get("initial_event_position_inclusive", False)
self._load_balancing_interval = kwargs.get("load_balancing_interval", 10.0)
self._ownership_timeout = self._load_balancing_interval * 2
self._partition_contexts = {}
# Receive parameters
self._owner_level = kwargs.get("owner_level", None)
if self._checkpoint_store and self._owner_level is None:
self._owner_level = 0
self._prefetch = kwargs.get("prefetch", None)
self._track_last_enqueued_event_properties = kwargs.get("track_last_enqueued_event_properties", False)
self._id = str(uuid.uuid4())
self._running = False
self._lock = threading.RLock()
self._consumers = {}
self._ownership_manager = OwnershipManager(
self._eventhub_client,
self._consumer_group,
self._id,
self._checkpoint_store,
self._ownership_timeout,
self._partition_id
)
def __repr__(self):
return 'EventProcessor: id {}'.format(self._id)
def _cancel_tasks_for_partitions(self, to_cancel_partitions):
with self._lock:
for partition_id in to_cancel_partitions:
if partition_id in self._consumers:
self._consumers[partition_id].stop = True
if to_cancel_partitions:
_LOGGER.info("EventProcesor %r has cancelled partitions %r", self._id, to_cancel_partitions)
def _create_tasks_for_claimed_ownership(self, claimed_partitions, checkpoints=None):
with self._lock:
for partition_id in claimed_partitions:
if partition_id not in self._consumers:
if partition_id in self._partition_contexts:
partition_context = self._partition_contexts[partition_id]
else:
partition_context = PartitionContext(
self._namespace,
self._eventhub_name,
self._consumer_group,
partition_id,
self._checkpoint_store
)
self._partition_contexts[partition_id] = partition_context
checkpoint = checkpoints.get(partition_id) if checkpoints else None
initial_event_position, event_postition_inclusive =\
self.get_init_event_position(partition_id, checkpoint)
event_received_callback = partial(self._on_event_received, partition_context)
self._consumers[partition_id] = self.create_consumer(partition_id,
initial_event_position,
event_postition_inclusive,
event_received_callback)
if self._partition_initialize_handler:
self._handle_callback(
[self._partition_initialize_handler,
self._partition_contexts[partition_id]]
)
def _handle_callback(self, callback_and_args):
callback = callback_and_args[0]
try:
callback(*callback_and_args[1:])
except Exception as exp: # pylint:disable=broad-except
partition_context = callback_and_args[1]
if self._error_handler and callback != self._error_handler:
self._handle_callback([self._error_handler, partition_context, exp])
else:
_LOGGER.warning(
"EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" has another error during running process_error(). The exception is %r.",
self._id,
partition_context.eventhub_name,
partition_context.partition_id,
partition_context.consumer_group,
exp
)
def _on_event_received(self, partition_context, event):
with self._context(event):
if self._track_last_enqueued_event_properties:
partition_context._last_received_event = event # pylint: disable=protected-access
self._handle_callback([self._event_handler, partition_context, event])
def _load_balancing(self):
"""Start the EventProcessor.
The EventProcessor will try to claim and balance partition ownership with other `EventProcessor`
and start receiving EventData from EventHub and processing events.
:return: None
"""
while self._running:
try:
checkpoints = self._ownership_manager.get_checkpoints() if self._checkpoint_store else None
claimed_partition_ids = self._ownership_manager.claim_ownership()
if claimed_partition_ids:
to_cancel_list = set(self._consumers.keys()) - set(claimed_partition_ids)
self._create_tasks_for_claimed_ownership(claimed_partition_ids, checkpoints)
else:
_LOGGER.info("EventProcessor %r hasn't claimed an ownership. It keeps claiming.", self._id)
to_cancel_list = set(self._consumers.keys())
if to_cancel_list:
self._cancel_tasks_for_partitions(to_cancel_list)
except Exception as err: # pylint:disable=broad-except
_LOGGER.warning("An exception (%r) occurred during balancing and claiming ownership for "
"eventhub %r consumer group %r. Retrying after %r seconds",
err, self._eventhub_name, self._consumer_group, self._load_balancing_interval)
# ownership_manager.get_checkpoints() and ownership_manager.claim_ownership() may raise exceptions
# when there are load balancing and/or checkpointing (checkpoint_store isn't None).
# They're swallowed here to retry every self._load_balancing_interval seconds.
# Meanwhile this event processor won't lose the partitions it has claimed before.
# If it keeps failing, other EventProcessors will start to claim ownership of the partitions
# that this EventProcessor is working on. So two or multiple EventProcessors may be working
# on the same partition.
time.sleep(self._load_balancing_interval)
def _close_consumer(self, partition_id, consumer, reason):
consumer.close()
with self._lock:
del self._consumers[partition_id]
_LOGGER.info(
"PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" is being closed. Reason is: %r",
self._id,
self._partition_contexts[partition_id].eventhub_name,
self._partition_contexts[partition_id].partition_id,
self._partition_contexts[partition_id].consumer_group,
reason
)
if self._partition_close_handler:
self._handle_callback([self._partition_close_handler, self._partition_contexts[partition_id], reason])
self._ownership_manager.release_ownership(partition_id)
def start(self):
if self._running:
_LOGGER.info("EventProcessor %r has already started.", self._id)
return
_LOGGER.info("EventProcessor %r is being started", self._id)
self._running = True
thread = threading.Thread(target=self._load_balancing)
thread.daemon = True
thread.start()
while self._running:
for partition_id, consumer in list(self._consumers.items()):
if consumer.stop:
self._close_consumer(partition_id, consumer, CloseReason.OWNERSHIP_LOST)
continue
try:
consumer.receive()
except Exception as error: # pylint:disable=broad-except
_LOGGER.warning(
"PartitionProcessor of EventProcessor instance %r of eventhub %r partition %r consumer group %r"
" has met an error. The exception is %r.",
self._id,
self._partition_contexts[partition_id].eventhub_name,
self._partition_contexts[partition_id].partition_id,
self._partition_contexts[partition_id].consumer_group,
error
)
if self._error_handler:
self._handle_callback([self._error_handler, self._partition_contexts[partition_id], error])
self._close_consumer(partition_id, consumer, CloseReason.OWNERSHIP_LOST)
with self._lock:
for partition_id, consumer in list(self._consumers.items()):
self._close_consumer(partition_id, consumer, CloseReason.SHUTDOWN)
def stop(self):
"""Stop the EventProcessor.
The EventProcessor will stop receiving events from EventHubs and release the ownership of the partitions
it is working on.
Other running EventProcessor will take over these released partitions.
A stopped EventProcessor can be restarted by calling method `start` again.
:return: None
"""
if not self._running:
_LOGGER.info("EventProcessor %r has already been stopped.", self._id)
return
self._running = False
_LOGGER.info("EventProcessor %r has been stopped.", self._id)
| 2.03125 | 2 |
src/dynamic_graph/entity.py | florent-lamiraux/dynamic-graph-python | 0 | 12768172 | """
Copyright (C) 2010 CNRS
Author: <NAME>, <NAME>
"""
import wrap, signal_base, new
from attrpath import setattrpath
if 'display' not in globals().keys():
def display(s):
print(s)
# --- FACTORY ------------------------------------------------------------------
# --- FACTORY ------------------------------------------------------------------
# --- FACTORY ------------------------------------------------------------------
class PyEntityFactoryClass(type):
"""
The class build dynamically a new class type, and return the reference
on the class-type object. The class type is not added to any context.
"""
def __new__(factory, className,bases=(), dict={} ):
if len(bases)==0:
# Initialize a basic Entity class
EntityClass = type.__new__(factory, className, (Entity,), dict)
EntityClass.className = className
EntityClass.__init__ = Entity.initEntity
else:
# Initialize a heritated class
EntityClass = type.__new__(factory, className, bases, dict)
for c in bases:
if issubclass(c,Entity):
EntityClass.className = c.className
break
EntityClass.commandCreated = False
return EntityClass
def PyEntityFactory( className, context ):
"""
Build a new class type by calling the factory, and add it
to the given context.
"""
EntityClass = PyEntityFactoryClass( className )
context[ className ] = EntityClass
return EntityClass
def updateEntityClasses(dictionary):
"""
For all c++entity types that are not in the pyentity class list
(entityClassNameList) run the factory and store the new type in the given
context (dictionary).
"""
cxx_entityList = wrap.factory_get_entity_class_list()
for e in filter(lambda x: not x in Entity.entityClassNameList, cxx_entityList):
# Store new class in dictionary with class name
PyEntityFactory( e,dictionary )
# Store class name in local list
Entity.entityClassNameList.append(e)
# --- ENTITY -------------------------------------------------------------------
# --- ENTITY -------------------------------------------------------------------
# --- ENTITY -------------------------------------------------------------------
class Entity (object) :
"""
This class binds dynamicgraph::Entity C++ class
"""
obj = None
"""
Store list of entities created via python
"""
entities = dict ()
def __init__(self, className, instanceName):
"""
Constructor: if not called by a child class, create and store a pointer
to a C++ Entity object.
"""
object.__setattr__(self, 'obj', wrap.create_entity(className, instanceName) )
Entity.entities [instanceName] = self
@staticmethod
def initEntity(self, name):
"""
Common constructor of specialized Entity classes. This function is bound
by the factory to each new class derivated from the Entity class as the
constructor of the new class.
"""
Entity.__init__(self, self.className, name)
if not self.__class__.commandCreated:
self.boundClassCommands()
self.__class__.__doc__ = wrap.entity_get_docstring (self.obj)
self.__class__.commandCreated = True
@property
def name(self) :
return wrap.entity_get_name(self.obj)
@property
def className(self) :
return wrap.entity_get_class_name(self.obj)
def __str__(self) :
return wrap.display_entity(self.obj)
def signal (self, name) :
"""
Get a signal of the entity from signal name
"""
signalPt = wrap.entity_get_signal(self.obj, name)
return signal_base.SignalBase(name = "", obj = signalPt)
def hasSignal(self, name) :
"""
Indicates if a signal with the given name exists in the entity
"""
return wrap.entity_has_signal(self.obj, name)
def displaySignals(self) :
"""
Print the list of signals into standard output: temporary.
"""
signals = self.signals()
if len(signals) == 0:
display ("--- <" + self.name + "> has no signal")
else:
display ("--- <" + self.name + "> signal list: ")
for s in signals[:-1]:
display(" |-- <" + str(s))
display(" `-- <" + str(signals[-1]))
def signals(self) :
"""
Return the list of signals
"""
sl = wrap.entity_list_signals(self.obj)
return map(lambda pyObj: signal_base.SignalBase(obj=pyObj), sl)
def commands(self):
"""
Return the list of commands.
"""
return wrap.entity_list_commands(self.obj)
def globalHelp(self):
"""
Print a short description of each command.
"""
if self.__doc__ :
print self.__doc__
print "List of commands:"
print "-----------------"
for cstr in self.commands():
ctitle=cstr+':'
for i in range(len(cstr),15):
ctitle+=' '
for docstr in wrap.entity_get_command_docstring(self.obj,cstr).split('\n'):
if (len(docstr)>0) and (not docstr.isspace()):
display(ctitle+"\t"+docstr)
break
def help( self,comm=None ):
"""
With no arg, print the global help. With arg the name of
a specific command, print the help associated to the command.
"""
if comm is None:
self.globalHelp()
else:
display(comm+":\n"+wrap.entity_get_command_docstring(self.obj,comm))
def __getattr__(self, name):
try:
return self.signal(name)
except:
try:
object.__getattr__(self, name)
except AttributeError:
raise AttributeError("'%s' entity has no attribute %s\n"%
(self.name, name)+
' entity attributes are usually either\n'+
' - commands,\n'+
' - signals or,\n'+
' - user defined attributes')
def __setattr__(self, name, value):
if name in map(lambda s: s.getName().split(':')[-1],self.signals()):
raise NameError(name+" already designates a signal. "
"It is not advised to set a new attribute of the same name.")
object.__setattr__(self, name, value)
# --- COMMANDS BINDER -----------------------------------------------------
# List of all the entity classes from the c++ factory, that have been bound
# bind the py factory.
entityClassNameList = []
# This function dynamically create the function object that runs the command.
@staticmethod
def createCommandBind(name, docstring) :
def commandBind(self, *arg):
return wrap.entity_execute_command(self.obj, name, arg)
commandBind.__doc__ = docstring
return commandBind
def boundClassCommands(self):
"""
This static function has to be called from a class heritating from Entity.
It should be called only once. It parses the list of commands obtained from
c++, and bind each of them to a python class method.
"""
# Get list of commands of the Entity object
commands = wrap.entity_list_commands(self.obj)
# for each command, add a method with the name of the command
for cmdstr in commands:
docstr = wrap.entity_get_command_docstring(self.obj, cmdstr)
cmdpy = Entity.createCommandBind(cmdstr, docstr)
setattrpath(self.__class__, cmdstr, cmdpy)
def boundNewCommand(self,cmdName):
"""
At construction, all existing commands are bound directly in the class.
This method enables to bound new commands dynamically. These new bounds
are not made with the class, but directly with the object instance.
"""
if (cmdName in self.__dict__) | (cmdName in self.__class__.__dict__):
print("Warning: command ",cmdName," will overwrite an object attribute.")
docstring = wrap.entity_get_command_docstring(self.obj, cmdName)
cmd = Entity.createCommandBind(cmdName,docstring)
# Limitation (todo): does not handle for path attribute name (see setattrpath).
setattr(self,cmdName,new.instancemethod( cmd, self,self.__class__))
def boundAllNewCommands(self):
"""
For all commands that are not attribute of the object instance nor of the
class, a new attribute of the instance is created to bound the command.
"""
cmdList = wrap.entity_list_commands(self.obj)
cmdList = filter(lambda x: not x in self.__dict__, cmdList)
cmdList = filter(lambda x: not x in self.__class__.__dict__, cmdList)
for cmd in cmdList:
self.boundNewCommand( cmd )
| 2.578125 | 3 |
tests/integ/test_record_set.py | LastRemote/sagemaker-python-sdk | 1,690 | 12768173 | <filename>tests/integ/test_record_set.py
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
from __future__ import absolute_import
from six.moves.urllib.parse import urlparse
from sagemaker import KMeans
from tests.integ import datasets
def test_record_set(sagemaker_session, cpu_instance_type):
"""Test the method ``AmazonAlgorithmEstimatorBase.record_set``.
In particular, test that the objects uploaded to the S3 bucket are encrypted.
"""
kmeans = KMeans(
role="SageMakerRole",
instance_count=1,
instance_type=cpu_instance_type,
k=10,
sagemaker_session=sagemaker_session,
)
record_set = kmeans.record_set(datasets.one_p_mnist()[0][:100], encrypt=True)
parsed_url = urlparse(record_set.s3_data)
s3_client = sagemaker_session.boto_session.client("s3")
head = s3_client.head_object(Bucket=parsed_url.netloc, Key=parsed_url.path.lstrip("/"))
assert head["ServerSideEncryption"] == "AES256"
| 2.1875 | 2 |
lib/fetchListToAdd.py | mwoolweaver/listManager.py | 1 | 12768174 | #!/usr/bin/env python3
#
# Project homepage: https://github.com/mwoolweaver
# Licence: <http://unlicense.org/>
# Created by <NAME> <<EMAIL>>
# ================================================================================
import pandas
from lib.debug import debuginfo, debuginfoDBV, debuginfoDBVV, debuginfoDBVVV, sqlError
def fetchEntries(filesWeNeed):
listWeNeed = []
for fileNeeded in filesWeNeed:
location = pandas.read_csv(fileNeeded,delimiter='\t',encoding='utf-8')
test3 = list(location.itertuples(index=False, name=None))
listWeNeed.append(test3)
#
return (listWeNeed)
def fetchGroups(fileNeeded):
location = pandas.read_csv(fileNeeded,delimiter='\t',encoding='utf-8')
groups = list(location.itertuples(index=False, name=None))
lists =[]
for group in groups:
lists.append(r"domains/" + group[4])
#
return (groups, lists)
| 2.703125 | 3 |
bin/rstrip.py | cwickham/merely-useful.github.io | 190 | 12768175 | #!/usr/bin/env python
'''
Strip trailing whitespaces from lines.
Usage: rstrip.py file file...
'''
import sys
def main(filenames):
for f in filenames:
with open(f, 'r') as reader:
lines = reader.readlines()
lines = [x.rstrip() + '\n' for x in lines]
with open(f, 'w') as writer:
writer.writelines(lines)
if __name__ == '__main__':
main(sys.argv[1:])
| 3.515625 | 4 |
loca-testing/scripts/015_myway_test.py | crutchwalkfactory/jaikuengine-mobile-client | 0 | 12768176 | <filename>loca-testing/scripts/015_myway_test.py
execfile("_general.py")
execfile("015_myway.py")
def test_myway() :
dev=dict()
general=dict()
general['nodename']='<NAME>'
general['time']=local_to_unixtime(8, 8, 47)
msg=dict()
msg['successcount']=3
res = myway(general, dev, msg)
print res
print "\n----\n\n"
print res[4]
test_myway()
| 1.671875 | 2 |
codewars/ValidBraces.py | 1067511899/tornado-learn | 1 | 12768177 | '''
Write a function that takes a string of braces, and determines if the order of the braces is valid. It should return true if the string is valid, and false if it's invalid.
This Kata is similar to the Valid Parentheses Kata, but introduces new characters: brackets [], and curly braces {}. Thanks to @arnedag for the idea!
All input strings will be nonempty, and will only consist of parentheses, brackets and curly braces: ()[]{}.
What is considered Valid?
A string of braces is considered valid if all braces are matched with the correct brace.
Examples
"(){}[]" => True
"([{}])" => True
"(}" => False
"[(])" => False
"[({})](]" => False
'''
# 别人实现的,我抄了一个一个题的实现办法,但显然脑筋没有能急转弯
def validBraces(s):
while '{}' in s or '()' in s or '[]' in s:
s = s.replace('{}', '')
s = s.replace('[]', '')
s = s.replace('()', '')
return s == ''
opposite = {')': '(', ']': '[', '}': '{'}
keys = [')', ']', '}']
def dirReduc(plan):
new_plan = []
for d in plan:
if d in keys:
if new_plan and new_plan[-1] == opposite[d]:
new_plan.pop()
else:
new_plan.append(d)
else:
new_plan.append(d)
if len(new_plan) > 0:
return False
return True
if __name__ == '__main__':
print(dirReduc('[({})](]'))
| 4.40625 | 4 |
doc/source/conf.py | mmmspatz/sexpdata | 31 | 12768178 | # -*- coding: utf-8 -*-
import sys
import os
sys.path.insert(0, os.path.abspath('../..'))
# -- General configuration ----------------------------------------------------
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.intersphinx',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'sexpdata'
copyright = u'2012, <NAME>'
# The short X.Y version.
version = '0.0.4'
# The full version, including alpha/beta/rc tags.
release = '0.0.4.dev1'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# -- Options for HTML output --------------------------------------------------
html_theme = 'default'
#html_theme_options = {}
html_static_path = [] # default: ['_static']
# Output file base name for HTML help builder.
htmlhelp_basename = 'sexpdatadoc'
# -- Options for LaTeX output -------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto/manual]).
latex_documents = [
('index', 'sexpdata.tex', u'sexpdata Documentation',
u'Takafumi Arakaki', 'manual'),
]
# -- Options for manual page output -------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'sexpdata', u'sexpdata Documentation',
[u'Takafumi Arakaki'], 1)
]
# -- Options for Texinfo output -----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'sexpdata', u'sexpdata Documentation',
u'Takafumi Arakaki', 'sexpdata', 'One line description of project.',
'Miscellaneous'),
]
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'http://docs.python.org/': None}
| 1.8125 | 2 |
integrations/tensorflow_v1/classification.py | clementpoiret/sparseml | 922 | 12768179 | <filename>integrations/tensorflow_v1/classification.py<gh_stars>100-1000
# Copyright (c) 2021 - present / Neuralmagic, Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Perform optimization tasks on image classification tensorflow_v1 including:
* Model training
* Model pruning
* Sparse transfer learning
* pruning sensitivity analysis
* ONNX export
##########
Command help:
usage: classification.py [-h] {train,export,pruning_sensitivity} ...
Run tasks on classification models and datasets using the sparseml API
positional arguments:
{train,export,pruning_sensitivity}
optional arguments:
-h, --help show this help message and exit
##########
train command help:
usage: classification.py train [-h] --arch-key ARCH_KEY
[--pretrained PRETRAINED]
[--pretrained-dataset PRETRAINED_DATASET]
[--checkpoint-path CHECKPOINT_PATH]
[--model-kwargs MODEL_KWARGS] --dataset DATASET
--dataset-path DATASET_PATH
[--dataset-kwargs DATASET_KWARGS]
[--model-tag MODEL_TAG] [--save-dir SAVE_DIR]
[--dataset-parallel-calls DATASET_PARALLEL_CALLS]
[--shuffle-buffer-size SHUFFLE_BUFFER_SIZE]
[--recipe-path RECIPE_PATH]
[--sparse-transfer-learn] [--eval-mode]
--train-batch-size TRAIN_BATCH_SIZE
--test-batch-size TEST_BATCH_SIZE
[--logs-dir LOGS_DIR]
[--save-best-after SAVE_BEST_AFTER]
[--save-epochs SAVE_EPOCHS [SAVE_EPOCHS ...]]
[--init-lr INIT_LR] [--optim-args OPTIM_ARGS]
Train and/or prune an image classification model
optional arguments:
-h, --help show this help message and exit
--arch-key ARCH_KEY The type of model to use, ex: resnet50, vgg16,
mobilenet put as help to see the full list (will raise
an exception with the list)
--pretrained PRETRAINED
The type of pretrained weights to use, default is true
to load the default pretrained weights for the model.
Otherwise should be set to the desired weights type:
[base, optim, optim-perf]. To not load any weights set
to one of [none, false]
--pretrained-dataset PRETRAINED_DATASET
The dataset to load pretrained weights for if
pretrained is set. Default is None which will load the
default dataset for the architecture. Ex can be set to
imagenet, cifar10, etc
--checkpoint-path CHECKPOINT_PATH
A path to a previous checkpoint to load the state from
and resume the state for. If provided, pretrained will
be ignored
--model-kwargs MODEL_KWARGS
kew word arguments to be passed to model constructor,
should be given as a json object
--dataset DATASET The dataset to use for training, ex: imagenet,
imagenette, cifar10, etc. Set to imagefolder for a
generic dataset setup with an image folder structure
setup like imagenet or loadable by a dataset in
sparseml.tensorflow_v1.datasets
--dataset-path DATASET_PATH
The root path to where the dataset is stored
--dataset-kwargs DATASET_KWARGS
kew word arguments to be passed to dataset
constructor, should be given as a json object
--model-tag MODEL_TAG
A tag to use for the model for saving results under
save-dir, defaults to the model arch and dataset used
--save-dir SAVE_DIR The path to the directory for saving results
--dataset-parallel-calls DATASET_PARALLEL_CALLS
the number of parallel workers for dataset loading
--shuffle-buffer-size SHUFFLE_BUFFER_SIZE
Shuffle buffer size for dataset loading
--recipe-path RECIPE_PATH
The path to the yaml file containing the modifiers and
schedule to apply them with. If set to
'transfer_learning', then will create a schedule to
enable sparse transfer learning
--sparse-transfer-learn
Enable sparse transfer learning modifiers to enforce
the sparsity for already sparse layers. The modifiers
are added to the ones to be loaded from the recipe-
path
--eval-mode Puts into evaluation mode so that the model can be
evaluated on the desired dataset
--train-batch-size TRAIN_BATCH_SIZE
The batch size to use while training
--test-batch-size TEST_BATCH_SIZE
The batch size to use while testing
--logs-dir LOGS_DIR The path to the directory for saving logs
--save-best-after SAVE_BEST_AFTER
start saving the best validation result after the
given epoch completes until the end of training
--save-epochs SAVE_EPOCHS [SAVE_EPOCHS ...]
epochs to save checkpoints at
--init-lr INIT_LR The initial learning rate to use while training, the
actual initial value used should be set by the
sparseml recipe
--optim-args OPTIM_ARGS
Additional args to be passed to the optimizer passed
in as a json object
##########
export command help:
usage: classification.py export [-h] --arch-key ARCH_KEY
[--pretrained PRETRAINED]
[--pretrained-dataset PRETRAINED_DATASET]
[--checkpoint-path CHECKPOINT_PATH]
[--model-kwargs MODEL_KWARGS] --dataset
DATASET --dataset-path DATASET_PATH
[--dataset-kwargs DATASET_KWARGS]
[--model-tag MODEL_TAG] [--save-dir SAVE_DIR]
[--num-samples NUM_SAMPLES]
[--onnx-opset ONNX_OPSET]
Export a model to onnx as well as store sample inputs, outputs, and labels
optional arguments:
-h, --help show this help message and exit
--arch-key ARCH_KEY The type of model to use, ex: resnet50, vgg16,
mobilenet put as help to see the full list (will raise
an exception with the list)
--pretrained PRETRAINED
The type of pretrained weights to use, default is true
to load the default pretrained weights for the model.
Otherwise should be set to the desired weights type:
[base, optim, optim-perf]. To not load any weights set
to one of [none, false]
--pretrained-dataset PRETRAINED_DATASET
The dataset to load pretrained weights for if
pretrained is set. Default is None which will load the
default dataset for the architecture. Ex can be set to
imagenet, cifar10, etc
--checkpoint-path CHECKPOINT_PATH
A path to a previous checkpoint to load the state from
and resume the state for. If provided, pretrained will
be ignored
--model-kwargs MODEL_KWARGS
kew word arguments to be passed to model constructor,
should be given as a json object
--dataset DATASET The dataset to use for training, ex: imagenet,
imagenette, cifar10, etc. Set to imagefolder for a
generic dataset setup with an image folder structure
setup like imagenet or loadable by a dataset in
sparseml.tensorflow_v1.datasets
--dataset-path DATASET_PATH
The root path to where the dataset is stored
--dataset-kwargs DATASET_KWARGS
kew word arguments to be passed to dataset
constructor, should be given as a json object
--model-tag MODEL_TAG
A tag to use for the model for saving results under
save-dir, defaults to the model arch and dataset used
--save-dir SAVE_DIR The path to the directory for saving results
--num-samples NUM_SAMPLES
The number of samples to export along with the model
onnx and pth files (sample inputs and labels as well
as the outputs from model execution)
--onnx-opset ONNX_OPSET
The onnx opset to use for export. Default is 11
##########
pruning_sensitivity command help:
usage: classification.py pruning_sensitivity [-h] --arch-key ARCH_KEY
[--pretrained PRETRAINED]
[--pretrained-dataset PRETRAINED_DATASET]
[--checkpoint-path CHECKPOINT_PATH]
[--model-kwargs MODEL_KWARGS]
--dataset DATASET --dataset-path
DATASET_PATH
[--dataset-kwargs DATASET_KWARGS]
[--model-tag MODEL_TAG]
[--save-dir SAVE_DIR]
[--dataset-parallel-calls
DATASET_PARALLEL_CALLS]
[--shuffle-buffer-size SHUFFLE_BUFFER_SIZE]
[--approximate]
[--steps-per-measurement
STEPS_PER_MEASUREMENT]
[--batch-size BATCH_SIZE]
Run a kernel sparsity (pruning) analysis for a given model
optional arguments:
-h, --help show this help message and exit
--arch-key ARCH_KEY The type of model to use, ex: resnet50, vgg16,
mobilenet put as help to see the full list (will raise
an exception with the list)
--pretrained PRETRAINED
The type of pretrained weights to use, default is true
to load the default pretrained weights for the model.
Otherwise should be set to the desired weights type:
[base, optim, optim-perf]. To not load any weights set
to one of [none, false]
--pretrained-dataset PRETRAINED_DATASET
The dataset to load pretrained weights for if
pretrained is set. Default is None which will load the
default dataset for the architecture. Ex can be set to
imagenet, cifar10, etc
--checkpoint-path CHECKPOINT_PATH
A path to a previous checkpoint to load the state from
and resume the state for. If provided, pretrained will
be ignored
--model-kwargs MODEL_KWARGS
kew word arguments to be passed to model constructor,
should be given as a json object
--dataset DATASET The dataset to use for training, ex: imagenet,
imagenette, cifar10, etc. Set to imagefolder for a
generic dataset setup with an image folder structure
setup like imagenet or loadable by a dataset in
sparseml.tensorflow_v1.datasets
--dataset-path DATASET_PATH
The root path to where the dataset is stored
--dataset-kwargs DATASET_KWARGS
kew word arguments to be passed to dataset
constructor, should be given as a json object
--model-tag MODEL_TAG
A tag to use for the model for saving results under
save-dir, defaults to the model arch and dataset used
--save-dir SAVE_DIR The path to the directory for saving results
--dataset-parallel-calls DATASET_PARALLEL_CALLS
the number of parallel workers for dataset loading
--shuffle-buffer-size SHUFFLE_BUFFER_SIZE
Shuffle buffer size for dataset loading
--approximate True to approximate without running data through the
model, otherwise will run a one shot analysis
--steps-per-measurement STEPS_PER_MEASUREMENT
The number of steps (batches) to run for each
measurement
--batch-size BATCH_SIZE
The batch size to use while performing analysis
#########
EXAMPLES
#########
##########
Example command for pruning resnet50 on imagenet dataset:
python scripts/tensorflow_v1/classification.py train \
--recipe-path ~/sparseml_recipes/pruning_resnet50.yaml \
--arch-key resnet50 --dataset imagenet --dataset-path ~/datasets/ILSVRC2012 \
--train-batch-size 256 --test-batch-size 1024
##########
Example command for transfer learning sparse mobilenet_v1 on an image folder dataset:
python scripts/tensorflow_v1/classification.py train \
--sparse-transfer-learn \
--recipe-path ~/sparseml_recipes/pruning_mobilenet.yaml \
--arch-key mobilenet_v1 --pretrained optim \
--dataset imagefolder --dataset-path ~/datasets/my_imagefolder_dataset \
--train-batch-size 256 --test-batch-size 1024
##########
Example command for exporting ResNet50:
python scripts/tensorflow_v1/classification.py export \
--arch-key resnet50 --dataset imagenet --dataset-path ~/datasets/ILSVRC2012
##########
Example command for running approximated KS sensitivity analysis on mobilenet:
python scripts/tensorflow_v1/classification.py pruning_sensitivity \
--approximate \
--arch-key mobilenet --dataset imagenet \
--dataset-path ~/datasets/ILSVRC2012
##########
Example command for running one shot KS sensitivity analysis on resnet50 for coco:
python scripts/tensorflow_v1/classification.py pruning_sensitivity \
--arch-key resnet50 --dataset imagenet \
--dataset-path ~/datasets/ILSVRC2012
"""
import argparse
import json
import math
import os
from typing import Dict, Optional, Tuple
import numpy
from sparseml import get_main_logger
from sparseml.tensorflow_v1.datasets import (
Dataset,
DatasetRegistry,
create_split_iterators_handle,
)
from sparseml.tensorflow_v1.models import ModelRegistry
from sparseml.tensorflow_v1.optim import (
ConstantPruningModifier,
ScheduledModifierManager,
pruning_loss_sens_magnitude,
pruning_loss_sens_one_shot,
pruning_loss_sens_op_vars,
)
from sparseml.tensorflow_v1.utils import (
GraphExporter,
accuracy,
batch_cross_entropy_loss,
tf_compat,
write_simple_summary,
)
from sparseml.utils import create_dirs
LOGGER = get_main_logger()
TRAIN_COMMAND = "train"
EXPORT_COMMAND = "export"
PRUNING_SENSITVITY_COMMAND = "pruning_sensitivity"
def parse_args():
parser = argparse.ArgumentParser(
description="Run tasks on classification models and datasets "
"using the sparseml API"
)
subparsers = parser.add_subparsers(dest="command")
train_parser = subparsers.add_parser(
TRAIN_COMMAND,
description="Train and/or prune an image classification model",
)
export_parser = subparsers.add_parser(
EXPORT_COMMAND,
description="Export a model to onnx as well as "
"store sample inputs, outputs, and labels",
)
pruning_sensitivity_parser = subparsers.add_parser(
PRUNING_SENSITVITY_COMMAND,
description="Run a kernel sparsity (pruning) analysis for a given model",
)
parsers = [
train_parser,
export_parser,
pruning_sensitivity_parser,
]
for par in parsers:
# general arguments
# model args
par.add_argument(
"--arch-key",
type=str,
required=True,
help="The type of model to use, ex: resnet50, vgg16, mobilenet "
"put as help to see the full list (will raise an exception with the list)",
)
par.add_argument(
"--pretrained",
type=str,
default=True,
help="The type of pretrained weights to use, "
"default is true to load the default pretrained weights for the model. "
"Otherwise should be set to the desired weights type: "
"[base, optim, optim-perf]. "
"To not load any weights set to one of [none, false]",
)
par.add_argument(
"--pretrained-dataset",
type=str,
default=None,
help="The dataset to load pretrained weights for if pretrained is set. "
"Default is None which will load the default dataset for the architecture."
" Ex can be set to imagenet, cifar10, etc",
)
par.add_argument(
"--checkpoint-path",
type=str,
default=None,
help="A path to a previous checkpoint to load the state from and "
"resume the state for. If provided, pretrained will be ignored",
)
par.add_argument(
"--model-kwargs",
type=json.loads,
default={},
help="kew word arguments to be passed to model constructor, should be "
" given as a json object",
)
# dataset args
par.add_argument(
"--dataset",
type=str,
required=True,
help="The dataset to use for training, "
"ex: imagenet, imagenette, cifar10, etc. "
"Set to imagefolder for a generic dataset setup "
"with an image folder structure setup like imagenet or loadable by a "
"dataset in sparseml.tensorflow_v1.datasets",
)
par.add_argument(
"--dataset-path",
type=str,
required=True,
help="The root path to where the dataset is stored",
)
par.add_argument(
"--dataset-kwargs",
type=json.loads,
default={},
help="kew word arguments to be passed to dataset constructor, should be "
" given as a json object",
)
# logging and saving
par.add_argument(
"--model-tag",
type=str,
default=None,
help="A tag to use for the model for saving results under save-dir, "
"defaults to the model arch and dataset used",
)
par.add_argument(
"--save-dir",
type=str,
default="tensorflow_v1_classification",
help="The path to the directory for saving results",
)
# task specific arguments
if par in [train_parser, pruning_sensitivity_parser]:
par.add_argument(
"--dataset-parallel-calls",
type=int,
default=4,
help="the number of parallel workers for dataset loading",
)
par.add_argument(
"--shuffle-buffer-size",
type=int,
default=1000,
help="Shuffle buffer size for dataset loading",
)
if par == train_parser:
par.add_argument(
"--recipe-path",
type=str,
default=None,
help="The path to the yaml file containing the modifiers and "
"schedule to apply them with. If set to 'transfer_learning', "
"then will create a schedule to enable sparse transfer learning",
)
par.add_argument(
"--sparse-transfer-learn",
action="store_true",
help=(
"Enable sparse transfer learning modifiers to enforce the sparsity "
"for already sparse layers. The modifiers are added to the "
"ones to be loaded from the recipe-path"
),
)
par.add_argument(
"--eval-mode",
action="store_true",
help="Puts into evaluation mode so that the model can be "
"evaluated on the desired dataset",
)
par.add_argument(
"--train-batch-size",
type=int,
required=True,
help="The batch size to use while training",
)
par.add_argument(
"--test-batch-size",
type=int,
required=True,
help="The batch size to use while testing",
)
par.add_argument(
"--logs-dir",
type=str,
default=os.path.join(
"tensorflow_v1_classification_train", "tensorboard-logs"
),
help="The path to the directory for saving logs",
)
par.add_argument(
"--save-best-after",
type=int,
default=-1,
help="start saving the best validation result after the given "
"epoch completes until the end of training",
)
par.add_argument(
"--save-epochs",
type=int,
default=[],
nargs="+",
help="epochs to save checkpoints at",
)
par.add_argument(
"--init-lr",
type=float,
default=1e-9,
help="The initial learning rate to use while training, "
"the actual initial value used should be set by the sparseml recipe",
)
par.add_argument(
"--optim-args",
type=json.loads,
default={},
help="Additional args to be passed to the optimizer passed in"
" as a json object",
)
if par == export_parser:
par.add_argument(
"--num-samples",
type=int,
default=100,
help="The number of samples to export along with the model onnx "
"and pth files (sample inputs and labels as well as the outputs "
"from model execution)",
)
par.add_argument(
"--onnx-opset",
type=int,
default=11,
help="The onnx opset to use for export. Default is 11",
)
if par == pruning_sensitivity_parser:
par.add_argument(
"--approximate",
action="store_true",
help="True to approximate without running data through the model, "
"otherwise will run a one shot analysis",
)
par.add_argument(
"--steps-per-measurement",
type=int,
default=15,
help="The number of steps (batches) to run for each measurement",
)
par.add_argument(
"--batch-size",
type=int,
default=64,
help="The batch size to use while performing analysis",
)
return parser.parse_args()
def _setup_save_dirs(args) -> Tuple[str, Optional[str]]:
# logging and saving setup
save_dir = os.path.abspath(os.path.expanduser(args.save_dir))
logs_dir = (
os.path.abspath(os.path.expanduser(os.path.join(args.logs_dir)))
if args.command == TRAIN_COMMAND
else None
)
if not args.model_tag:
model_tag = "{}_{}".format(args.arch_key.replace("/", "."), args.dataset)
model_id = model_tag
model_inc = 0
# set location to check for models with same name
model_main_dir = logs_dir or save_dir
while os.path.exists(os.path.join(model_main_dir, model_id)):
model_inc += 1
model_id = "{}__{:02d}".format(model_tag, model_inc)
else:
model_id = args.model_tag
save_dir = os.path.join(save_dir, model_id)
create_dirs(save_dir)
# logs dir setup
if args.command == TRAIN_COMMAND:
logs_dir = os.path.join(logs_dir, model_id)
create_dirs(logs_dir)
else:
logs_dir = None
LOGGER.info("Model id is set to {}".format(model_id))
return save_dir, logs_dir
def _create_dataset(args, train=True, image_size=None) -> Tuple[Dataset, int]:
kwargs = args.dataset_kwargs
if "image_size" in kwargs:
image_size = kwargs["image_size"]
del kwargs["image_size"]
dataset = DatasetRegistry.create(
args.dataset,
root=args.dataset_path,
train=train,
image_size=image_size,
**kwargs,
)
LOGGER.info("created {} dataset: {}".format("train" if train else "val", dataset))
# get num_classes
if args.dataset == "imagefolder":
num_classes = dataset.num_classes
else:
dataset_attributes = DatasetRegistry.attributes(args.dataset)
num_classes = dataset_attributes["num_classes"]
return dataset, num_classes
def _build_dataset(args, dataset: Dataset, batch_size: int) -> Dataset:
return dataset.build(
batch_size,
shuffle_buffer_size=args.shuffle_buffer_size,
prefetch_buffer_size=batch_size,
num_parallel_calls=args.dataset_parallel_calls,
)
def _create_model(args, num_classes, inputs, training=False):
outputs = ModelRegistry.create(
args.arch_key,
inputs,
training=training,
num_classes=num_classes,
**args.model_kwargs,
)
LOGGER.info("created model {}".format(args.arch_key))
return outputs
def _load_model(args, sess, checkpoint_path=None):
sess.run(
[
tf_compat.global_variables_initializer(),
tf_compat.local_variables_initializer(),
]
)
checkpoint_path = checkpoint_path or args.checkpoint_path
ModelRegistry.load_pretrained(
args.arch_key,
pretrained=args.pretrained,
pretrained_dataset=args.pretrained_dataset,
pretrained_path=checkpoint_path,
sess=sess,
)
if checkpoint_path:
LOGGER.info("Loaded model weights from checkpoint: {}".format(checkpoint_path))
def _save_checkpoint(args, sess, save_dir, checkpoint_name) -> str:
checkpoint_path = os.path.join(os.path.join(save_dir, checkpoint_name, "model"))
create_dirs(checkpoint_path)
saver = ModelRegistry.saver(args.arch_key)
saved_name = saver.save(sess, checkpoint_path)
checkpoint_path = os.path.join(checkpoint_path, saved_name)
LOGGER.info("Checkpoint saved to {}".format(checkpoint_path))
return checkpoint_path
def _save_recipe(
recipe_manager: ScheduledModifierManager,
save_dir: str,
):
recipe_save_path = os.path.join(save_dir, "recipe.yaml")
recipe_manager.save(recipe_save_path)
LOGGER.info(f"Saved recipe to {recipe_save_path}")
def train(args, save_dir, logs_dir):
# setup dataset
with tf_compat.device("/cpu:0"):
train_dataset, _ = _create_dataset(args, train=True)
val_dataset, num_classes = _create_dataset(args, train=False)
# calc steps
train_steps = math.ceil(len(train_dataset) / args.train_batch_size)
val_steps = math.ceil(len(val_dataset) / args.test_batch_size)
# build datasets
train_dataset = _build_dataset(args, train_dataset, args.train_batch_size)
val_dataset = _build_dataset(args, val_dataset, args.test_batch_size)
handle, iterator, (train_iter, val_iter) = create_split_iterators_handle(
[train_dataset, val_dataset]
)
# set up model graph
images, labels = iterator.get_next()
training = tf_compat.placeholder(dtype=tf_compat.bool, shape=[])
outputs = _create_model(args, num_classes, images, training)
# set up training objects
loss = batch_cross_entropy_loss(outputs, labels)
acc = accuracy(outputs, labels)
global_step = tf_compat.train.get_or_create_global_step()
train_op = tf_compat.train.AdamOptimizer(
learning_rate=args.init_lr, **args.optim_args
).minimize(loss, global_step=global_step)
update_ops = tf_compat.get_collection(tf_compat.GraphKeys.UPDATE_OPS)
LOGGER.info("Created update ops for training")
# set up sparseml modifier ops
add_mods = (
ConstantPruningModifier(params="__ALL__")
if args.sparse_transfer_learn
else None
)
manager = ScheduledModifierManager.from_yaml(
file_path=args.recipe_path, add_modifiers=add_mods
)
mod_ops, mod_extras = manager.create_ops(train_steps, global_step)
_save_recipe(recipe_manager=manager, save_dir=save_dir)
with tf_compat.Session() as sess:
# set up tensorboard logging
summary_writer = tf_compat.summary.FileWriter(logs_dir, sess.graph)
summaries = tf_compat.summary.merge_all()
LOGGER.info("Logging to tensorboard at {}".format(logs_dir))
# initialize variables, load pretrained weights, initialize modifiers
train_iter_handle, val_iter_handle = sess.run(
[train_iter.string_handle(), val_iter.string_handle()]
)
LOGGER.info("Initialized graph variables")
_load_model(args, sess)
manager.initialize_session()
LOGGER.info("Initialized SparseML modifiers")
best_loss = None
for epoch in range(manager.max_epochs):
# train
LOGGER.info("Training for epoch {}...".format(epoch))
sess.run(train_iter.initializer)
train_acc, train_loss = [], []
for step in range(train_steps):
_, __, meas_step, meas_loss, meas_acc, meas_summ = sess.run(
[train_op, update_ops, global_step, loss, acc, summaries],
feed_dict={handle: train_iter_handle, training: True},
)
if step >= train_steps - 1:
# log the general summaries on the last training step
summary_writer.add_summary(meas_summ, meas_step)
# run modifier ops
sess.run(mod_ops)
# summarize
write_simple_summary(summary_writer, "Train/Loss", meas_loss, meas_step)
write_simple_summary(
summary_writer, "Train/Acc", meas_acc * 100.0, meas_step
)
train_acc.append(meas_acc)
train_loss.append(meas_loss)
LOGGER.info(
"Epoch {} - Train Loss: {}, Train Acc: {}".format(
epoch, numpy.mean(train_loss).item(), numpy.mean(train_acc).item()
)
)
# val
LOGGER.info("Validating for epoch {}...".format(epoch))
sess.run(val_iter.initializer)
val_acc, val_loss = [], []
for step in range(val_steps):
meas_loss, meas_acc = sess.run(
[loss, acc],
feed_dict={handle: val_iter_handle, training: False},
)
val_acc.append(meas_acc)
val_loss.append(meas_loss)
write_simple_summary(
summary_writer, "Val/Loss", numpy.mean(val_loss).item(), epoch
)
write_simple_summary(
summary_writer, "Val/Acc", numpy.mean(val_acc).item(), epoch
)
val_loss = numpy.mean(val_loss).item()
LOGGER.info(
"Epoch {} - Val Loss: {}, Val Acc: {}".format(
epoch, val_loss, numpy.mean(train_acc).item()
)
)
if epoch >= args.save_best_after and (
best_loss is None or val_loss <= best_loss
):
_save_checkpoint(args, sess, save_dir, "checkpoint-best")
best_loss = val_loss
if args.save_epochs and epoch in args.save_epochs:
_save_checkpoint(
args, sess, save_dir, "checkpoint-epoch-{}".format(epoch)
)
# cleanup graph and save final checkpoint
manager.complete_graph()
checkpoint_path = _save_checkpoint(args, sess, save_dir, "final-checkpoint")
LOGGER.info("Running ONNX export flow")
export(
args,
save_dir,
checkpoint_path=checkpoint_path,
skip_samples=True,
num_classes=num_classes,
opset=11,
)
def export(
args,
save_dir,
checkpoint_path=None,
skip_samples=False,
num_classes=None,
opset=None,
):
assert not skip_samples or num_classes
# dataset creation
if not skip_samples:
val_dataset, num_classes = _create_dataset(args, train=False)
with tf_compat.Graph().as_default():
input_shape = ModelRegistry.input_shape(args.arch_key)
inputs = tf_compat.placeholder(
tf_compat.float32, [None] + list(input_shape), name="inputs"
)
outputs = _create_model(args, num_classes, inputs)
with tf_compat.Session() as sess:
_load_model(
args, sess, checkpoint_path=checkpoint_path or args.checkpoint_path
)
exporter = GraphExporter(save_dir)
if not skip_samples:
# Export a batch of samples and expected outputs
tf_dataset = val_dataset.build(
args.num_samples, repeat_count=1, num_parallel_calls=1
)
tf_iter = tf_compat.data.make_one_shot_iterator(tf_dataset)
features, _ = tf_iter.get_next()
inputs_val = sess.run(features)
exporter.export_samples([inputs], [inputs_val], [outputs], sess)
# Export model to tensorflow checkpoint format
LOGGER.info("exporting tensorflow in {}".format(save_dir))
exporter.export_checkpoint(sess=sess)
# Export model to pb format
LOGGER.info("exporting pb in {}".format(exporter.pb_path))
exporter.export_pb(outputs=[outputs])
# Export model to onnx format
LOGGER.info("exporting onnx in {}".format(exporter.onnx_path))
exporter.export_onnx([inputs], [outputs], opset=opset or args.onnx_opset)
def pruning_loss_sensitivity(args, save_dir):
input_shape = ModelRegistry.input_shape(args.arch_key)
train_dataset, num_classes = _create_dataset(
args, train=True, image_size=input_shape[1]
)
with tf_compat.Graph().as_default() as graph:
# create model graph
inputs = tf_compat.placeholder(
tf_compat.float32, [None] + list(input_shape), name="inputs"
)
outputs = _create_model(args, num_classes, inputs)
with tf_compat.Session() as sess:
_load_model(args, sess, checkpoint_path=args.checkpoint_path)
if args.approximate:
LOGGER.info("Running weight magnitude loss sensitivity analysis...")
analysis = pruning_loss_sens_magnitude(graph, sess)
else:
op_vars = pruning_loss_sens_op_vars(graph)
train_steps = math.ceil(len(train_dataset) / args.batch_size)
train_dataset = _build_dataset(args, train_dataset, args.batch_size)
handle, iterator, dataset_iter = create_split_iterators_handle(
[train_dataset]
)
dataset_iter = dataset_iter[0]
images, labels = iterator.get_next()
loss = batch_cross_entropy_loss(outputs, labels)
tensor_names = ["inputs:0", labels.name]
sess.run(dataset_iter.initializer)
def feed_dict_creator(step: int) -> Dict[str, tf_compat.Tensor]:
assert step < train_steps
batch_data = [
tens.eval(session=sess) for tens in dataset_iter.get_next()
]
return dict(zip(tensor_names, batch_data))
LOGGER.info("Running one shot loss sensitivity analysis...")
analysis = pruning_loss_sens_one_shot(
op_vars=op_vars,
loss_tensor=loss,
steps_per_measurement=args.steps_per_measurement,
feed_dict_creator=feed_dict_creator,
sess=sess,
)
# saving and printing results
LOGGER.info("completed...")
LOGGER.info("Saving results in {}".format(save_dir))
analysis.save_json(
os.path.join(
save_dir,
"ks_approx_sensitivity.json"
if args.approximate
else "ks_one_shot_sensitivity.json",
)
)
analysis.plot(
os.path.join(
save_dir,
os.path.join(
save_dir,
"ks_approx_sensitivity.png"
if args.approximate
else "ks_one_shot_sensitivity.png",
),
),
plot_integral=True,
)
analysis.print_res()
def main(args):
# set up saving and logging dirs
save_dir, logs_dir = _setup_save_dirs(args)
# RUN COMMAND SPECIFIC TASTS
if args.command == TRAIN_COMMAND:
train(args, save_dir, logs_dir)
if args.command == EXPORT_COMMAND:
export(args, save_dir)
if args.command == PRUNING_SENSITVITY_COMMAND:
pruning_loss_sensitivity(args, save_dir)
if __name__ == "__main__":
args_ = parse_args()
main(args_)
| 1.882813 | 2 |
Advanced_exams/16december_2020/Matching_Problem.py | petel3/Softuni_education | 2 | 12768180 | from collections import deque
matches_count=0
males=[int(el) for el in input().split() if int(el)>0]
females=deque([int(el) for el in input().split() if int(el)>0])
while males and females:
if males[-1] % 25 == 0 or females[0] % 25 == 0:
if males[-1] % 25 == 0:
males.pop()
if females[0] % 25 == 0:
females.popleft()
else:
male = males.pop()
female=females.popleft()
if male==female:
matches_count+=1
else:
if (male-2)>0:
males.append(male-2)
print(f"Matches: {matches_count}")
if males:
print(f"Males left: {', '.join([str(el)for el in males[::-1]])}")
else:
print("Males left: none")
if females:
print(f"Females left: {', '.join([str(el) for el in females])}")
else:
print("Females left: none")
| 3.390625 | 3 |
gym_delta_robot_trampoline/envs/delta_robot_trampoline_env.py | RicoJia/delta_robot_trampoline | 3 | 12768181 | import gym
import pybullet as p
import numpy as np
from gym_delta_robot_trampoline.resources.delta_robot_trampoline import Omnid_Simulator
import matplotlib.pyplot as plt
import os
import pybullet_data
"""
Action space (1,3) : [theta_1_torque, theta_2_torque, theta_3_torque]
Observation space (1,18) : [3 joint_positions, 3 joint velocities, 3 eef positions, 3 eef velocities, 3
3 ball positions, 3 ball velocities]
"""
FAIL_ALTITUDE = 0.20
BONUS_ALTITUDE_DIFF = 0.16
MAX_STEP_NUM = 800
class DeltaRobotTrampolineEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
self.step_counter = 0
#TODO
# self.client = p.connect(p.DIRECT)
self.client = p.connect(p.GUI)
p.resetDebugVisualizerCamera(cameraDistance=1.5, cameraYaw=0, cameraPitch=-40, cameraTargetPosition=[0.05,-0.35,0.2])
self.action_space = gym.spaces.box.Box(
low=np.array([-100] * 3),
high=np.array([100] * 3))
self.observation_space = gym.spaces.box.Box(
low=np.array([-np.pi/4, -np.pi/4, -np.pi/4, -100, -100, -100, \
-5, -5, -5, -50, -50, -50, \
-20, -20, 0, -50, -50, -50]),
high=np.array([np.pi/2, np.pi/2, np.pi/2, 100, 100, 100, \
5, 5, 5, 50, 50, 50, \
20, 20, 20, 50, 50, 50]))
self.np_random, _ = gym.utils.seeding.np_random()
#enable visualization
#TODO
p.configureDebugVisualizer(p.COV_ENABLE_RENDERING,1)
def reset(self):
p.resetSimulation()
# episode params
self.step_counter = 0
self.above_BONUS_ALTITUDE_DIFF = False
p.loadURDF(os.path.join(pybullet_data.getDataPath(), "plane.urdf")) #loads from the root pybullet library
p.setGravity(0,0,-10)
p.setRealTimeSimulation(0)
#set up the robot and the ball
self.omnid_simulator = Omnid_Simulator()
initialized = False
self.omnid_simulator.attachBallToRobot() # we want the robot to land safely onto the robot.
while not initialized:
self.omnid_simulator.updateStates()
if self.omnid_simulator.ballonRobot():
self.omnid_simulator.detachBallFromRobot() #now we can let the ball move freely!
initialized = True
p.stepSimulation()
self.observation = self.omnid_simulator.updateStates().astype(np.float32)
return self.observation
def step(self, action):
self.omnid_simulator.applyJointTorque({"theta_1": action[0], \
"theta_2": action[1], \
"theta_3": action[2]})
p.stepSimulation()
self.step_counter += 1
self.observation = self.omnid_simulator.updateStates()
#z < 0, -100. else, if get over height threshold, we get 100.
z= self.observation[14]
if z < FAIL_ALTITUDE:
reward = -25
done = True
else:
height_diff = z - self.observation[8]
if height_diff >= BONUS_ALTITUDE_DIFF:
done = False
if not self.above_BONUS_ALTITUDE_DIFF:
reward = 50
self.above_BONUS_ALTITUDE_DIFF = True
self.step_counter = 0
else:
reward = 0
else: #ball is above the platform but lower than the relative height threshold
if self.above_BONUS_ALTITUDE_DIFF:
self.above_BONUS_ALTITUDE_DIFF = False
reward = -0.1
done = False
if self.step_counter >= MAX_STEP_NUM:
done = True
info = {"eef position: ": self.observation[6:9], \
"ball position: ": self.observation[12:15]}
return self.observation.astype(np.float32), reward, done, info
def render(self, mode='human'):
""" Render is an interface function. Since we are using GUI, we do not need this.
We use GUI because computing view matrices and projection matrices is much slower. """
pass
def close(self):
p.disconnect(self.client)
def seed(self, seed=None):
self.np_random, seed = gym.utils.seeding.np_random(seed)
return [seed]
| 2.515625 | 3 |
day04zy.py | aixiaocha/test | 0 | 12768182 | #EP1
'''
def getPentagonalNumber(n):
i = 1
for i in range(1,n+1):
s = (i*(3*i-1)*1.0)/2
print (str(s)+' ',end='')
if i%10==0:
print()
getPentagonalNumber(100)
'''
#EP2
'''
def sum(n):
s= 0
while(n%10!=0):
a=n%10
b=n//10
s=s+a
n=b
print(s)
a= eval(raw_input("enter a int:"))
sum(a)
'''
#EP3
'''
def display(n1,n2,n3):
b=[n1,n2,n3]
b.sort()
print(b)
a1,a2,a3=eval(raw_input("enter three numbers:"))
display(a1,a2,a3)
'''
#EP4
'''
inves = eval(input("the amount inversted:"))
monthly = eval(input("annual interest rate:"))
print("annual\tfuture value")
def funtureinver(inves,monthly,years):
return inves*pow((1+monthly/100/12),years*12)
for i in range(1,31):
c=funtureinver(inves,monthly,i)
print("%d\t%.2f"%(i,c),end=" ")
print()
'''
#EP5
'''
def printchars(c1,c2,number):
m=0
a=ord(c1)
b=ord(c2)+1
for i in range(a,b):
print(chr(i),end='')
m=m+1
if(m%number==0):
print("")
a,b=input("enter start to end ascii:").split(',')
c = eval(input("enter number:"))
printchars('1','Z',10)
'''
#EP6
'''
def number(year):
if((year%4==0)&(year%100!=0))|(year%400==0):
print("%d:366"%(i))
else:
print("%d:365"%(i))
for i in range(2010,2021):
number(i)
'''
#EP7
'''
def distance(a1,b1,a2,b2):
print(((a1-a2)*(a1-a2)+(b1-b2)*(b1-b2))**0.5)
a1,b1=eval(raw_input("enter a1 and a2 for point1: "))
a2,b2=eval(raw_input("enter a1 and a2 for point2: "))
distance(a1,b1,a2,b2)
'''
#EP8
'''
import math
print ("p\t2^p-1")
def n(a):
f=0
for j in range(2,int(math.sqrt(a)+1)):
if a%j==0 :
f = 0
else :
f = 1
return f
print("2\t3")
for i in range(1,32):
c=pow(2,i)-1
if(n(c)):
print("%d\t%d"%(i,c))
'''
#EP9
'''
from time import *
print(ctime(time()))
'''
#EP10
'''
import random
n = random.randint(1,6)
m = random.randint(1,6)
s = n+m
if (s==2)|(s==3)|(s==12):
print("you rolled {} + {} = {}\nyou lose".format(n,m,s))
elif (s==7)|(s==11):
print("you rolled {} + {} = {}\nyou win".format(n,m,s))
else :
print("you rolled {} + {} = {}\nyou is {}".format(n,m,s,s))
n1 = random.randint(1,6)
m1 = random.randint(1,6)
s1 = n1+m1
if(s1!=s):
print("you rolled {} + {} = {}\nyou lose".format(n1,m1,s1))
else :
print("you rolled {} + {} = {}\nyou win".format(n1,m1,s1))
'''
#EP11
| 3.734375 | 4 |
cc_licenses/urls.py | brylie/cc-licenses | 1 | 12768183 | """cc_licenses URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add an import: from blog import urls as blog_urls
2. Add a URL to urlpatterns: url(r'^blog/', include(blog_urls))
"""
# Third-party
from django.conf import settings
from django.conf.urls import url
from django.conf.urls.static import static
from django.contrib import admin
from django.urls import include, path
from django.views.generic import TemplateView
# First-party/Local
from licenses.views import branch_status, translation_status
urlpatterns = [
url(r"^admin/", admin.site.urls),
path("", TemplateView.as_view(template_name="home.html"), name="home"),
url(
r"status/(?P<id>\d+)/$",
branch_status,
name="branch_status",
),
url(
r"status/$",
translation_status,
name="translation_status",
),
url(r"licenses/", include("licenses.urls")),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
if settings.DEBUG:
# Third-party
import debug_toolbar
urlpatterns += [
url(r"^__debug__/", include(debug_toolbar.urls)),
]
| 2.46875 | 2 |
python__OOP/04.classes_and_instances_exercise/01.point.py | EmilianStoyanov/Projects-in-SoftUni | 1 | 12768184 | <filename>python__OOP/04.classes_and_instances_exercise/01.point.py
import math
class Point:
def __init__(self, x, y):
self.x = x
self.y = y
def set_x(self, new_x):
self.x = new_x
def set_y(self, new_y):
self.y = new_y
def distance(self, x, y):
xx = abs(self.x - x)
yy = abs(self.y - y)
c = (xx ** 2) + (yy ** 2)
return math.sqrt(c)
| 4.0625 | 4 |
typhon/nano/smallcaps.py | justinnoah/typhon | 63 | 12768185 | from rpython.rlib.rbigint import BASE10
from typhon.nano import checkpoints
from typhon.quoting import quoteChar, quoteStr
def doNanoSmallCaps(expr):
expr = checkpoints.AddCheckpoints().visitExpr(expr)
expr = checkpoints.CollectCheckpoints().visitExpr(expr)
return expr
class PrettySmallCaps(checkpoints.CheckpointIR.makePassTo(None)):
def __init__(self):
self.buf = []
def asUnicode(self):
return u"".join(self.buf)
def write(self, s):
self.buf.append(s)
def visitNullExpr(self):
self.write(u"null")
def visitCharExpr(self, c):
self.write(quoteChar(c[0]))
def visitDoubleExpr(self, d):
self.write(u"%f" % d)
def visitIntExpr(self, i):
self.write(i.format(BASE10).decode("utf-8"))
def visitStrExpr(self, s):
self.write(quoteStr(s))
def visitAssignExpr(self, name, rvalue):
self.write(name)
self.write(u" := ")
self.visitExpr(rvalue)
def visitBindingExpr(self, name):
self.write(u"&&")
self.write(name)
def visitCallExpr(self, obj, verb, args, namedArgs):
self.visitExpr(obj)
self.write(u".")
self.write(verb)
self.write(u"(")
if args:
self.visitExpr(args[0])
for arg in args[1:]:
self.write(u", ")
self.visitExpr(arg)
if namedArgs:
self.visitNamedArg(args[0])
for namedArg in namedArgs[1:]:
self.write(u", ")
self.visitNamedArg(namedArg)
self.write(u")")
def visitDefExpr(self, patt, ex, rvalue):
if isinstance(patt, checkpoints.CheckpointIR.VarPatt):
self.write(u"def ")
self.visitPatt(patt)
if not isinstance(ex, checkpoints.CheckpointIR.NullExpr):
self.write(u" exit ")
self.visitExpr(ex)
self.write(u" := ")
self.visitExpr(rvalue)
def visitEscapeOnlyExpr(self, patt, body):
self.write(u"escape ")
self.visitPatt(patt)
self.write(u" {")
self.visitExpr(body)
self.write(u"}")
def visitEscapeExpr(self, patt, body, catchPatt, catchBody):
self.write(u"escape ")
self.visitPatt(patt)
self.write(u" {")
self.visitExpr(body)
self.write(u"} catch ")
self.visitPatt(catchPatt)
self.write(u" {")
self.visitExpr(catchBody)
self.write(u"}")
def visitFinallyExpr(self, body, atLast):
self.write(u"try {")
self.visitExpr(body)
self.write(u"} finally {")
self.visitExpr(atLast)
self.write(u"}")
def visitHideExpr(self, body):
self.write(u"{")
self.visitExpr(body)
self.write(u"}")
def visitIfExpr(self, test, cons, alt):
self.write(u"if (")
self.visitExpr(test)
self.write(u") {")
self.visitExpr(cons)
self.write(u"} else {")
self.visitExpr(alt)
self.write(u"}")
def visitMetaContextExpr(self):
self.write(u"meta.context()")
def visitMetaStateExpr(self):
self.write(u"meta.state()")
def visitNounExpr(self, name):
self.write(name)
def visitObjectExpr(self, doc, patt, auditors, methods, matchers):
self.write(u"object ")
self.visitPatt(patt)
if auditors:
self.write(u" as ")
self.visitExpr(auditors[0])
auditors = auditors[1:]
if auditors:
self.write(u" implements ")
self.visitExpr(auditors[0])
for auditor in auditors[1:]:
self.write(u", ")
self.visitExpr(auditor)
self.write(u" {")
for method in methods:
self.visitMethod(method)
for matcher in matchers:
self.visitMatcher(matcher)
self.write(u"}")
def visitSeqExpr(self, exprs):
if exprs:
self.visitExpr(exprs[0])
for expr in exprs[1:]:
self.write(u"; ")
self.visitExpr(expr)
def visitTryExpr(self, body, catchPatt, catchBody):
self.write(u"try {")
self.visitExpr(body)
self.write(u"} catch ")
self.visitPatt(catchPatt)
self.write(u" {")
self.visitExpr(catchBody)
self.write(u"}")
def visitIgnorePatt(self, guard):
self.write(u"_")
if not isinstance(guard, checkpoints.CheckpointIR.NullExpr):
self.write(u" :")
self.visitExpr(guard)
def visitBindingPatt(self, name):
self.write(u"&&")
self.write(name)
def visitFinalPatt(self, name, guard):
self.write(name)
if not isinstance(guard, checkpoints.CheckpointIR.NullExpr):
self.write(u" :")
self.visitExpr(guard)
def visitVarPatt(self, name, guard):
self.write(u"var ")
self.write(name)
if not isinstance(guard, checkpoints.CheckpointIR.NullExpr):
self.write(u" :")
self.visitExpr(guard)
def visitListPatt(self, patts):
self.write(u"[")
if patts:
self.visitPatt(patts[0])
for patt in patts[1:]:
self.write(u", ")
self.visitPatt(patt)
self.write(u"]")
def visitViaPatt(self, trans, patt):
self.write(u"via (")
self.visitExpr(trans)
self.write(u") ")
self.visitPatt(patt)
def visitNamedArgExpr(self, key, value):
self.visitExpr(key)
self.write(u" => ")
self.visitExpr(value)
def visitNamedPattern(self, key, patt, default):
self.visitExpr(key)
self.write(u" => ")
self.visitPatt(patt)
self.write(u" := ")
self.visitExpr(default)
def visitMatcherExpr(self, patt, body):
self.write(u"match ")
self.visitPatt(patt)
self.write(u" {")
self.visitExpr(body)
self.write(u"}")
def visitMethodExpr(self, doc, verb, patts, namedPatts, guard, body):
self.write(u"method ")
self.write(verb)
self.write(u"(")
if patts:
self.visitPatt(patts[0])
for patt in patts[1:]:
self.write(u", ")
self.visitPatt(patt)
if patts and namedPatts:
self.write(u", ")
if namedPatts:
self.visitNamedPatt(namedPatts[0])
for namedPatt in namedPatts[1:]:
self.write(u", ")
self.visitNamedPatt(namedPatt)
self.write(u")")
if not isinstance(guard, checkpoints.CheckpointIR.NullExpr):
self.write(u" :")
self.visitExpr(guard)
self.write(u" {")
self.visitExpr(body)
self.write(u"}")
def visitCheckpointExpr(self, count):
self.write(u"meta.checkpoint(")
self.write(u"%d" % count)
self.write(u")")
| 2.1875 | 2 |
apps/user/views/registration.py | dy1zan/softwarecapstone | 0 | 12768186 | <reponame>dy1zan/softwarecapstone
"""
Registration views: GET & POST (submitting form data)
"""
from django.conf import settings
from django.contrib import messages
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.template.loader import render_to_string
from django.urls import reverse
from django.views import View
from apps.user.models import User
from .. import forms
class Register(View):
def get(self, request):
"""
User wants to register
"""
form = forms.UserRegistrationForm()
return render(request, 'registration/register.html', {'form': form})
def post(self, request):
"""
Once the user has submitted the registration form,
send them an email containing the verification code.
"""
form = forms.UserRegistrationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
user = User.objects.get(username=username)
# send an email with verification code
# a verification code is automatically generated when a User object is created.
link = settings.SITE_DOMAIN + reverse('user:verify') + "?code=%d&username=%s" % (user.verify_code, username)
msg_plain = render_to_string('emails/verification.txt', {'site': settings.SITE_DOMAIN, 'verify_link': link})
user.email_user("TechPalmy: Verify your account", msg_plain)
messages.success(request, "Please check your email and verify your account.")
return HttpResponseRedirect(reverse('index'))
# if form has errors
return render(request, 'registration/register.html', {'form': form})
class VerifyAccount(View):
"""
Simply reads GET data, verifies the account
if the verification code is correct, and then redirects
them to the login page.
"""
def get(self, request):
verify_code = request.GET.get('code')
username = request.GET.get('username')
try:
user = User.objects.get(username=username)
if user.is_verified():
messages.success(request, "Your account has already been verified.")
elif str(user.verify_code) == verify_code:
user.verify_code = 0
user.save()
messages.success(request, "Your account has successfully been verified! Please login to continue.")
return HttpResponseRedirect(reverse('login'))
except:
pass
return HttpResponseRedirect(reverse('index'))
| 2.890625 | 3 |
django_rq_jobs/management/commands/rqjobs.py | Koed00/Django-RQ-Jobs | 0 | 12768187 | <reponame>Koed00/Django-RQ-Jobs<filename>django_rq_jobs/management/commands/rqjobs.py
from ast import literal_eval
import importlib
import arrow
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils.translation import ugettext_lazy as _
import django_rq
from django_rq_jobs.models import Job
class Command(BaseCommand):
help = _("Queues scheduled jobs")
BaseCommand.can_import_settings = True
BaseCommand.requires_system_checks = True
BaseCommand.leave_locale_alone = True
def handle(self, *args, **options):
for job in Job.objects.exclude(repeats=0).filter(next_run__lt=arrow.utcnow().datetime):
if '.' not in job.task:
job = fix_module(job)
if job.args:
rq = django_rq.enqueue(job.rq_task, **literal_eval(job.args))
else:
rq = django_rq.enqueue(job.rq_task)
job.rq_id = rq.id
job.rq_origin = rq.origin
job.last_run = arrow.utcnow().datetime
self.stdout.write(_('* Queueing {} on {}.').format(job.get_task_display(), job.rq_origin), ending=' ')
if job.schedule_type != Job.ONCE:
if job.repeats < 0 or job.repeats > 1:
next_run = arrow.get(job.next_run)
if job.schedule_type == Job.HOURLY:
next_run = next_run.replace(hours=+1)
elif job.schedule_type == Job.DAILY:
next_run = next_run.replace(days=+1)
elif job.schedule_type == Job.WEEKLY:
next_run = next_run.replace(weeks=+1)
elif job.schedule_type == Job.MONTHLY:
next_run = next_run.replace(months=+1)
elif job.schedule_type == Job.QUARTERLY:
next_run = next_run.replace(months=+3)
elif job.schedule_type == Job.YEARLY:
next_run = next_run.replace(years=+1)
job.next_run = next_run.datetime
if job.repeats > 1:
job.repeats += -1
self.stdout.write(_('Next run {}.').format(next_run.humanize()))
job.save()
else:
job.delete()
self.stdout.write(_('Deleting limited run task'))
else:
self.stdout.write(_('Deleting run once task'))
job.delete()
def fix_module(job):
"""
Fix for tasks without a module. Provides backwards compatibility with < 0.1.5
"""
modules = settings.RQ_JOBS_MODULE
if not type(modules) == tuple:
modules = [modules]
for module in modules:
try:
module_match = importlib.import_module(module)
if hasattr(module_match, job.task):
job.task = '{}.{}'.format(module, job.task)
break
except ImportError:
continue
return job
| 2.03125 | 2 |
authors/apps/authentication/renderers.py | AmosWels/ah-django | 0 | 12768188 | import json
from authors.apps.core.renderers import AuthorsJSONRenderer
class UserJSONRenderer(AuthorsJSONRenderer):
object_label = 'user'
| 1.570313 | 2 |
django_csv/admin.py | yunojuno/django-queryset-csv | 0 | 12768189 | from django.contrib import admin
from .models import CsvDownload
class CsvDownloadAdmin(admin.ModelAdmin):
list_display = ("user", "timestamp", "row_count", "filename")
list_filter = ("timestamp",)
search_fields = ("user", "filename")
raw_id_fields = ("user",)
readonly_fields = (
"user",
"timestamp",
"filename",
"row_count",
"columns",
)
admin.site.register(CsvDownload, CsvDownloadAdmin)
| 1.765625 | 2 |
deepchem/splits/__init__.py | cjgalvin/deepchem | 3,782 | 12768190 | <gh_stars>1000+
"""
Gathers all splitters in one place for convenient imports
"""
# flake8: noqa
# basic splitter
from deepchem.splits.splitters import Splitter
from deepchem.splits.splitters import RandomSplitter
from deepchem.splits.splitters import RandomStratifiedSplitter
from deepchem.splits.splitters import RandomGroupSplitter
from deepchem.splits.splitters import SingletaskStratifiedSplitter
from deepchem.splits.splitters import IndexSplitter
from deepchem.splits.splitters import SpecifiedSplitter
# molecule splitter
from deepchem.splits.splitters import ScaffoldSplitter
from deepchem.splits.splitters import MolecularWeightSplitter
from deepchem.splits.splitters import MaxMinSplitter
from deepchem.splits.splitters import FingerprintSplitter
from deepchem.splits.splitters import ButinaSplitter
# other splitter
from deepchem.splits.task_splitter import merge_fold_datasets
from deepchem.splits.task_splitter import TaskSplitter
#################################################################
# Removed API
#################################################################
import logging
logger = logging.getLogger(__name__)
class IndiceSplitter:
def __init__(self, valid_indices=None, test_indices=None):
raise ImportError("IndiceSplitter was renamed to SpecifiedSplitter.\n"
"Please use SpecifiedSplitter instead of IndiceSplitter.")
| 1.867188 | 2 |
profit/run/default.py | Rykath/profit | 14 | 12768191 | """ Default Runner & Worker components
Local Runner
Memmap Interface (numpy)
Template Preprocessor
JSON Postprocessor
NumpytxtPostprocessor
HDF5Postprocessor
"""
from .runner import Runner, RunnerInterface
from .worker import Interface, Preprocessor, Postprocessor, Worker
import subprocess
from multiprocessing import Process
from time import sleep
import logging
import numpy as np
import os
from shutil import rmtree
# === Local Runner === #
@Runner.register('local')
class LocalRunner(Runner):
""" Runner for executing simulations locally
- forks the worker, thereby having less overhead (especially with a custom python Worker)
- per default uses all available CPUs
"""
def spawn_run(self, params=None, wait=False):
super().spawn_run(params, wait)
if self.run_config['custom'] or not self.config['fork']:
env = self.env.copy()
env['PROFIT_RUN_ID'] = str(self.next_run_id)
if self.run_config['custom']:
cmd = self.run_config['command']
else:
cmd = 'profit-worker'
self.runs[self.next_run_id] = subprocess.Popen(cmd, shell=True, env=env, cwd=self.base_config['run_dir'])
if wait:
self.runs[self.next_run_id].wait()
del self.runs[self.next_run_id]
else:
def work():
worker = Worker.from_config(self.run_config, self.next_run_id)
worker.main()
os.chdir(self.base_config['run_dir'])
process = Process(target=work)
self.runs[self.next_run_id] = process
process.start()
if wait:
process.join()
del self.runs[self.next_run_id]
os.chdir(self.base_config['base_dir'])
self.next_run_id += 1
def spawn_array(self, params_array, blocking=True):
""" spawn an array of runs, maximum 'parallel' at the same time, blocking until all are done """
if not blocking:
raise NotImplementedError
for params in params_array:
self.spawn_run(params)
while len(self.runs) >= self.config['parallel']:
sleep(self.config['sleep'])
self.check_runs(poll=True)
while len(self.runs):
sleep(self.config['sleep'])
self.check_runs(poll=True)
def check_runs(self, poll=False):
""" check the status of runs via the interface """
self.interface.poll()
if self.run_config['custom'] or not self.config['fork']:
for run_id, process in list(self.runs.items()): # preserve state before deletions
if self.interface.internal['DONE'][run_id]:
process.wait() # just to make sure
del self.runs[run_id]
elif poll and process.poll() is not None:
del self.runs[run_id]
else:
for run_id, process in list(self.runs.items()): # preserve state before deletions
if self.interface.internal['DONE'][run_id]:
process.join() # just to make sure
del self.runs[run_id]
elif poll and process.exitcode is not None:
process.terminate()
del self.runs[run_id]
def cancel_all(self):
if self.run_config['custom'] or not self.config['fork']:
for process in self.runs.values():
process.terminate()
else:
for process in self.runs.values():
process.terminate()
self.runs = {}
# === Numpy Memmap Inerface === #
@RunnerInterface.register('memmap')
class MemmapRunnerInterface(RunnerInterface):
""" Runner-Worker Interface using a memory mapped numpy array
- expected to be very fast with the *local* Runner as each Worker can access the array directly (unverified)
- expected to be inefficient if used on a cluster with a shared filesystem (unverified)
- reliable
- known issue: resizing the array (to add more runs) is dangerous, needs a workaround
(e.g. several arrays in the same file)
"""
def __init__(self, config, size, input_config, output_config, *, logger_parent: logging.Logger = None):
super().__init__(config, size, input_config, output_config, logger_parent=logger_parent)
init_data = np.zeros(size, dtype=self.input_vars + self.internal_vars + self.output_vars)
np.save(self.config['path'], init_data)
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.runner.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
# should return views on memmap
self.input = self._memmap[[v[0] for v in self.input_vars]]
self.output = self._memmap[[v[0] for v in self.output_vars]]
self.internal = self._memmap[[v[0] for v in self.internal_vars]]
def resize(self, size):
""" Resizing Memmap Runner Interfac
Attention: this is dangerous and may lead to unexpected errors!
The problem is that the memory mapped file is overwritten.
Any Workers which have this file mapped will run into severe problems.
Possible future workarounds: multiple files or multiple headers in one file.
"""
if size <= self.size:
self.logger.warning('shrinking RunnerInterface is not supported')
return
self.logger.warning('resizing MemmapRunnerInterface is dangerous')
self.clean()
init_data = np.zeros(size, dtype=self.input_vars + self.internal_vars + self.output_vars)
np.save(self.config['path'], init_data)
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.runner.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
self.input = self._memmap[[v[0] for v in self.input_vars]]
self.output = self._memmap[[v[0] for v in self.output_vars]]
self.internal = self._memmap[[v[0] for v in self.internal_vars]]
def clean(self):
if os.path.exists(self.config['path']):
os.remove(self.config['path'])#
@Interface.register('memmap')
class MemmapInterface(Interface):
""" Runner-Worker Interface using a memory mapped numpy array
counterpart to :py:class:`MemmapRunnerInterface`
"""
def __init__(self, config, run_id: int, *, logger_parent: logging.Logger = None):
super().__init__(config, run_id, logger_parent=logger_parent)
# ToDo: multiple arrays after another to allow extending the file dynamically
try:
self._memmap = np.load(self.config['path'], mmap_mode='r+')
except FileNotFoundError:
self.worker.logger.error(
f'{self.__class__.__name__} could not load {self.config["path"]} (cwd: {os.getcwd()})')
raise
# should return views on memmap
inputs, outputs = [], []
k = 0
for k, key in enumerate(self._memmap.dtype.names):
if key == 'DONE':
break
inputs.append(key)
for key in self._memmap.dtype.names[k:]:
if key not in ['DONE', 'TIME']:
outputs.append(key)
self.input = self._memmap[inputs][run_id]
self.output = self._memmap[outputs][run_id]
self._data = self._memmap[run_id]
def done(self):
self._memmap['TIME'] = self.time
self._memmap['DONE'] = True
self._memmap.flush()
def clean(self):
if os.path.exists(self.config['path']):
os.remove(self.config['path'])
# === Template Preprocessor === #
@Preprocessor.register('template')
class TemplatePreprocessor(Preprocessor):
""" Preprocessor which substitutes the variables with a given template
- copies the given template directory to the target run directory
- searches all files for variables templates of the form {name} and replaces them with their values
- for file formats which use curly braces (e.g. json) the template identifier is {{name}}
- substitution can be restricted to certain files by specifying `param_files`
- relative symbolic links are converted to absolute symbolic links on copying
- linked files are ignored with `param_files: all`, but if specified explicitly the link target is copied to the run
directory and then substituted
"""
def pre(self, data, run_dir):
# No call to super()! replaces the default preprocessing
from profit.pre import fill_run_dir_single
if os.path.exists(run_dir):
rmtree(run_dir)
fill_run_dir_single(data, self.config['path'], run_dir, ignore_path_exists=True,
param_files=self.config['param_files'])
os.chdir(run_dir)
# === JSON Postprocessor === #
@Postprocessor.register('json')
class JSONPostprocessor(Postprocessor):
""" Postprocessor to read output from a JSON file
- variables are assumed to be stored with the correct key and able to be converted immediately
- not extensively tested
"""
def post(self, data):
import json
with open(self.config['path']) as f:
output = json.load(f)
for key, value in output.items():
data[key] = value
# === Numpy Text Postprocessor === #
@Postprocessor.register('numpytxt')
class NumpytxtPostprocessor(Postprocessor):
""" Postprocessor to read output from a tabular text file (e.g. csv, tsv) with numpy ``genfromtxt``
- the data is assumed to be row oriented
- vector variables are spread across the row and have to be in the right order, only the name of the variable should
be specified once in ``names``
- ``names`` which are not specified as output variables are ignored
- additional options are passed directly to ``numpy.genfromtxt()`
"""
def post(self, data):
dtype = [(name, float, data.dtype[name].shape if name in data.dtype.names else ())
for name in self.config['names']]
try:
raw = np.genfromtxt(self.config['path'], dtype=dtype, **self.config['options'])
except OSError:
self.logger.error(f'output file {self.config["path"]} not found')
self.logger.info(f'cwd = {os.getcwd()}')
dirname = os.path.dirname(self.config['path']) or '.'
self.logger.info(f'ls {dirname} = {os.listdir(dirname)}')
raise
for key in self.config['names']:
if key in data.dtype.names:
data[key] = raw[key]
# === HDF5 Postprocessor === #
@Postprocessor.register('hdf5')
class HDF5Postprocessor(Postprocessor):
""" Postprocessor to read output from a HDF5 file
- variables are assumed to be stored with the correct key and able to be converted immediately
- not extensively tested
"""
def post(self, data):
import h5py
with h5py.File(self.config['path'], 'r') as f:
for key in f.keys():
data[key] = f[key]
| 2.09375 | 2 |
full-problems/amq1.py | vikas-t/DS-Algo | 0 | 12768192 | <filename>full-problems/amq1.py
# Question #1
def find_all_paths(graph, start, end, path=[]):
path = path + [start]
if start == end:
return [path]
if not graph.has_key(start):
return []
paths = []
for node in graph[start]:
if node not in path:
newpaths = find_all_paths(graph, node, end, path)
for newpath in newpaths:
paths.append(newpath)
return paths
n,m = map(int, (raw_input().strip().split()))
r = []
for i in xrange(n+1):
r.append([0]*(n+1))
for i in xrange(m):
n1, n2, rt = map(int,(raw_input().strip().split()))
r[n1][n2] = rt
graph = {}
tx = []
for i in xrange(n+1):
for j in xrange(n+1):
if r[i][j] or r[j][i]:
if i not in graph:
graph[i] = []
graph[i].append(j)
for i in xrange(1,n+1):
for j in xrange(1,n+1):
if i == j:
continue
x = find_all_paths(graph,i,j)
for path in x:
tx.append(tuple(path))
rtx = set(tx)
print rtx
print ""
print tx | 3.296875 | 3 |
pythonProject/venv/Lib/site-packages/workbook/__init__.py | MontanhaRio/python | 0 | 12768193 | <filename>pythonProject/venv/Lib/site-packages/workbook/__init__.py
#!/usr/bin/env python
from xlwt import Workbook as _WB_, Font, XFStyle, Borders, Alignment
def print_table(data, title="", bold=True):
'''fancy ascii table'''
maxs = []
for row in data:
for i, cell in enumerate(row):
if len(maxs) <= i:
maxs.append(0)
if len(str(cell)) > maxs[i]:
maxs[i] = len(str(cell).decode('utf-8'))
tb = "+-" + "-+-".join(["-" * m for m in maxs]) + "-+"
print
if title:
if bold:
print "*** \033[31m" + title + "\033[0m ***"
else:
print "*** " + title + " ***"
print
print tb
#_row = ['\033[1m%s\033[0m' % r for r in row]
for j, row in enumerate(data):
text = []
for i, cell in enumerate(row):
if i > 0:
cell = str(cell).rjust(maxs[i])
else:
cell = str(cell).ljust(maxs[i])
if bold:
if j == 0 or i == 0:
cell = '\033[32m%s\033[0m' % str(cell)
text.append(cell)
print "| " + " | ".join(text) + " |"
if j == 0:
print tb
print tb
class Workbook(_WB_):
def write_sheet(self, data, sheet_name, print_to_screen=False):
'''Write a very simple table to a new sheet in a spreadsheet,
Optionally, print the table to the screen'''
# most cells
al = Alignment()
al.horz = Alignment.HORZ_RIGHT
al.vert = Alignment.VERT_CENTER
font = Font()
font.name = 'Arial'
font.height = 9 * 20 # 9 pt
style = XFStyle()
style.font = font
style.alignment = al
# tops cells
al = Alignment()
al.horz = Alignment.HORZ_CENTER
al.vert = Alignment.VERT_CENTER
font = Font()
font.name = 'Arial'
font.bold = True
font.height = 9 * 20 # 9 pt
style_top = XFStyle()
style_top.font = font
style_top.alignment = al
# left cells
al = Alignment()
al.horz = Alignment.HORZ_LEFT
al.vert = Alignment.VERT_CENTER
font = Font()
font.name = 'Arial'
font.bold = True
font.italic = True
font.height = 9 * 20 # 9 pt
style_left = XFStyle()
style_left.font = font
style_left.alignment = al
ws = self.add_sheet(sheet_name)
for i, row in enumerate(data):
for j, cell in enumerate(row):
borders = Borders()
if i == 0:
borders.top = 1
borders.bottom = 2
if i == len(row) - 1:
borders.bottom = 1
if j == 0:
borders.left = 1
borders.right = 1
if j == len(row) - 1:
borders.right = 1
if j == 0:
_style = style_left
elif i == 0:
_style = style_top
else:
_style = style
_style.borders = borders
ws.write(i + 1, j + 1, cell, _style)
if print_to_screen:
print print_table(data, sheet_name, bold=True)
if __name__ == "__main__":
wb = Workbook()
wb.country_code = 61
data = [["Acc", "b", "c"], [1, 2, 3], [4, 3, 5]]
wb.write_sheet(data, "test_sheet", print_to_screen=True)
wb.save("test.xls")
| 3.125 | 3 |
neuralcompression/layers/_generalized_divisive_normalization.py | tallamjr/NeuralCompression | 233 | 12768194 | <reponame>tallamjr/NeuralCompression
# Copyright (c) Meta Platforms, Inc. and affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import functools
from typing import Callable, Optional
import torch
import torch.nn.functional
from torch import Tensor
from torch.nn import Module, Parameter
from ._non_negative_parameterization import NonNegativeParameterization
class GeneralizedDivisiveNormalization(Module):
"""Applies generalized divisive normalization for each channel across a
batch of data.
Implements an activation function that is a multivariate generalization of
the following sigmoid-like function:
.. math::
y_{i}=\\frac{x_{i}}{(\\beta_{i}+\\sum_{j}\\gamma_{ij}|x_{j}|^{\\alpha_{ij}})^{\\epsilon_{i}}}
where :math:`i` and :math:`j` map over channels.
This implementation never sums across spatial dimensions. It is similar to
local response normalization, but much more flexible, as :math:`\\alpha`,
:math:`\\beta`, :math:`\\gamma`, and :math:`\\epsilon` are trainable
parameters.
The method was originally described in:
| “Density Modeling of Images using a Generalized Normalization
Transformation”
| <NAME>, <NAME>, <NAME>
| https://arxiv.org/abs/1511.06281
and expanded in:
| “End-to-end Optimized Image Compression”
| <NAME>, <NAME>, <NAME>
| https://arxiv.org/abs/1611.01704
Args:
channels: number of channels in the input.
inverse: compute the generalized divisive normalization response. If
``True``, compute the inverse generalized divisive normalization
response (one step of fixed point iteration to invert the
generalized divisive normalization; the division is replaced by
multiplication).
alpha_parameter: A ``Tensor`` means that the value of ``alpha`` is
fixed. ``None`` means that when the layer is initialized, a
``NonNegativeParameterization`` layer is created to train ``alpha``
(with a minimum value of ``1``). The default is a fixed value of
``1``.
beta_parameter: A ``Tensor`` means that the value of ``beta`` is fixed.
``None`` means that when the layer is initialized, a
``NonNegativeParameterization`` layer is created to train ``beta``
(with a minimum value of ``1e-6``).
epsilon_parameter: A ``Tensor`` means that the value of ``epsilon`` is
fixed. ``None`` means that when the layer is initialized, a
``NonNegativeParameterization`` layer is created to train
``epsilon`` (with a minimum value of 1e-6). The default is a fixed
value of ``1``.
gamma_parameter: A ``Tensor`` means that the value of ``gamma`` is
fixed. ``None`` means that when the layer is initialized, a
``NonNegativeParameterization`` layer is created to train
``gamma``.
alpha_initializer: initializes the ``alpha`` parameter. Only used if
``alpha`` is trained. Defaults to ``1``.
beta_initializer: initializes the ``beta`` parameter. Only used if
``beta`` is created when initializing the layer. Defaults to ``1``.
epsilon_initializer: initializes the ``epsilon`` parameter. Only used
if ``epsilon`` is trained. Defaults to ``1``.
gamma_initializer: initializes the ``gamma`` parameter. Only used if
``gamma`` is created when initializing the layer. Defaults to the
identity multiplied by ``0.1``. A good default value for the
diagonal is somewhere between ``0`` and ``0.5``. If set to ``0``
and ``beta`` is initialized as ``1``, the layer is effectively
initialized to the identity operation.
"""
alpha: Parameter
beta: Parameter
epsilon: Parameter
gamma: Parameter
def __init__(
self,
channels: int,
inverse: bool = False,
alpha_parameter: Optional[Tensor] = None,
beta_parameter: Optional[Tensor] = None,
epsilon_parameter: Optional[Tensor] = None,
gamma_parameter: Optional[Tensor] = None,
alpha_initializer: Optional[Callable[[Tensor], Tensor]] = None,
beta_initializer: Optional[Callable[[Tensor], Tensor]] = None,
epsilon_initializer: Optional[Callable[[Tensor], Tensor]] = None,
gamma_initializer: Optional[Callable[[Tensor], Tensor]] = None,
):
super(GeneralizedDivisiveNormalization, self).__init__()
self._channels = torch.tensor(channels, dtype=torch.int32)
self._inverse = inverse
if alpha_parameter is None:
if alpha_initializer is None:
alpha_initializer = functools.partial(
lambda x: torch.ones(x),
)
self._reparameterized_alpha = NonNegativeParameterization(
alpha_initializer(self._channels),
minimum=1,
)
if self._reparameterized_alpha.initial_value is not None:
self.alpha = Parameter(
self._reparameterized_alpha.initial_value,
)
else:
if isinstance(alpha_parameter, Parameter):
self.alpha = alpha_parameter
else:
alpha_parameter = torch.tensor(alpha_parameter)
self.alpha = Parameter(alpha_parameter)
if beta_parameter is None:
if beta_initializer is None:
beta_initializer = functools.partial(
lambda x: torch.ones(x),
)
self._reparameterized_beta = NonNegativeParameterization(
beta_initializer(self._channels),
minimum=1e-6,
)
if self._reparameterized_beta.initial_value is not None:
self.beta = Parameter(
self._reparameterized_beta.initial_value,
)
else:
if isinstance(beta_parameter, Parameter):
self.beta = beta_parameter
else:
beta_parameter = torch.tensor(beta_parameter)
self.beta = Parameter(beta_parameter)
if epsilon_parameter is None:
if epsilon_initializer is None:
epsilon_initializer = functools.partial(
lambda x: torch.ones(x),
)
self._reparameterized_epsilon = NonNegativeParameterization(
epsilon_initializer(self._channels),
minimum=1e-6,
)
if self._reparameterized_epsilon.initial_value is not None:
self.epsilon = Parameter(
self._reparameterized_epsilon.initial_value,
)
else:
if isinstance(epsilon_parameter, Parameter):
self.epsilon = epsilon_parameter
else:
epsilon_parameter = torch.tensor(epsilon_parameter)
self.epsilon = Parameter(epsilon_parameter)
if gamma_parameter is None:
if gamma_initializer is None:
gamma_initializer = functools.partial(
lambda x: 0.1 * torch.eye(x),
)
self._reparameterized_gamma = NonNegativeParameterization(
gamma_initializer(self._channels),
minimum=0,
)
if self._reparameterized_gamma.initial_value is not None:
self.gamma = Parameter(
self._reparameterized_gamma.initial_value,
)
else:
if isinstance(gamma_parameter, Parameter):
self.gamma = gamma_parameter
else:
gamma_parameter = torch.tensor(gamma_parameter)
self.gamma = Parameter(gamma_parameter)
def forward(self, x: Tensor) -> Tensor:
_, channels, _, _ = x.size()
y = torch.nn.functional.conv2d(
x ** 2,
torch.reshape(
self._reparameterized_gamma(self.gamma),
(channels, channels, 1, 1),
),
self._reparameterized_beta(self.beta),
)
if self._inverse:
return x * torch.sqrt(y)
return x * torch.rsqrt(y)
| 2.65625 | 3 |
examples/sample_experiment.py | clark-mask/EvasionPaths | 2 | 12768195 | # <NAME> 3/5/20
import os
from time_stepping import *
num_sensors: int = 20
sensing_radius: float = 0.2
timestep_size: float = 0.01
unit_square: Boundary = RectangularDomain(spacing=sensing_radius)
# noinspection PyTypeChecker
billiard: MotionModel = BilliardMotion(dt=timestep_size, boundary=unit_square, vel=1, n_int_sensors=num_sensors)
output_dir: str = "./output"
filename_base: str = "data"
n_runs: int = 1
# Unlike the animation, each simulation needs to create its own simulation object
def simulate() -> float:
simulation = EvasionPathSimulation(boundary=unit_square,
motion_model=billiard,
n_int_sensors=num_sensors,
sensing_radius=sensing_radius,
dt=timestep_size)
return simulation.run()
def output_data(filename: str, data_points: list) -> None:
with open(filename, 'a+') as file:
for d in data_points:
if type(d) != str:
file.writelines("%.2f\n" % d)
else:
file.writelines(str(d) + "\n")
def run_experiment() -> None:
times = [simulate() for _ in range(n_runs)]
filename = output_dir + "/" + filename_base + ".txt"
output_data(filename, times)
def main() -> None:
if not os.path.exists(output_dir):
os.mkdir(output_dir)
run_experiment()
if __name__ == "__main__":
main()
| 3.171875 | 3 |
tests/input.py | ExObsSim/Rapoc-public | 0 | 12768196 | <reponame>ExObsSim/Rapoc-public
exomol_file = 'test_data/1H2-16O__POKAZATEL__R15000_0.3-50mu.xsec.TauREx.h5'
dace_file = 'test_data/1H2-16O__POKAZATEL_e2b'
| 0.851563 | 1 |
exercise01/src/util.py | aWdas/self-organising-systems-2021 | 0 | 12768197 | from Bio import pairwise2
from Bio.Seq import Seq
from Bio.SeqRecord import SeqRecord
from Bio.pairwise2 import format_alignment
adjacency_list = {
"A": {"R", "W", "M"},
"G": {"R", "K", "S"},
"T": {"W", "K", "Y"},
"C": {"M", "S", "Y"},
"R": {"D", "V"},
"W": {"D", "H"},
"M": {"V", "H"},
"K": {"D", "B"},
"S": {"V", "B"},
"Y": {"H", "B"},
"D": {"N"},
"V": {"N"},
"H": {"N"},
"B": {"N"},
"-": {"N"},
"N": {"N"}
}
def g(symbol_a: str, symbol_b: str) -> str:
def g_internal(symbols_a, symbols_b):
if len(symbols_a.intersection(symbols_b)) > 0:
return next(iter(symbols_a.intersection(symbols_b)))
else:
with_parents_a = symbols_a.union(*[adjacency_list[k] for k in symbols_a])
with_parents_b = symbols_b.union(*[adjacency_list[k] for k in symbols_b])
if len(with_parents_a.intersection(symbols_b)) > 0:
return next(iter(with_parents_a.intersection(symbols_b)))
elif len(with_parents_b.intersection(symbols_a)) > 0:
return next(iter(with_parents_b.intersection(symbols_a)))
else:
return g_internal(with_parents_a, with_parents_b)
return g_internal({symbol_a}, {symbol_b})
def generalize(seq_a: str, seq_b: str) -> str:
return "".join(g(symbol_a, symbol_b) for symbol_a, symbol_b in zip(seq_a, seq_b))
def level(symbol: str) -> int:
if symbol in ["A", "G", "T", "C"]:
return 1
elif symbol in ["R", "W", "M", "K", "S", "Y"]:
return 2
elif symbol in ["D", "V", "H", "B", "-"]:
return 3
else:
return 4
def d(symbol_a: str, symbol_b: str) -> int:
return 2*level(g(symbol_a, symbol_b)) - level(symbol_a) - level(symbol_b)
def dist(seq_a: str, seq_b: str) -> int:
return sum(d(symbol_a, symbol_b) for symbol_a, symbol_b in zip(seq_a, seq_b))
def seq_str(sequence: SeqRecord) -> str:
return str(sequence.seq)
def pairwise_align(seq_a: SeqRecord, seq_b: SeqRecord):
# this is slow af, but I haven't found a way to make the pairwise2 matcher behave the same as CLUSTAL
# (it always introduces gaps for sequences of the same length)
print(f"Pairwise aligning sequence {seq_a.id} to {seq_b.id}")
# penalize gaps like the default parameters of CLUSTALW according to https://www.genome.jp/tools-bin/clustalw
alignments = pairwise2.align.globalxs(seq_str(seq_a), seq_str(seq_b), -10.0, -0.1)
print(format_alignment(*alignments[0]))
return SeqRecord(Seq(alignments[0].seqA), id=seq_a.id), SeqRecord(Seq(alignments[0].seqB), id=seq_b.id)
def build_pairwise_alignments(sequences: [SeqRecord]):
pairwise_alignments = {}
min_dist_pair = None
for i in range(len(sequences)):
pairwise_alignments_i = {}
for j in range(i+1, len(sequences)):
aligned_seq_i, aligned_seq_j = pairwise_align(sequences[i], sequences[j])
d = dist(seq_str(aligned_seq_i), seq_str(aligned_seq_j))
pairwise_alignments_i[j] = (aligned_seq_i, aligned_seq_j, d)
if min_dist_pair is None or min_dist_pair[2] > d:
min_dist_pair = (i, j, d)
pairwise_alignments[i] = pairwise_alignments_i
return {
"pairwise_alignments": pairwise_alignments,
"min_dist_pair": min_dist_pair
}
def fix_odd_sequence_count(sequences, pairwise_alignments, min_dist_pair):
# if there's an odd number of sequences, replace the min distance pair by its generalization f in the distance map
if len(sequences) % 2 == 1:
a, b, d = min_dist_pair
print(f"Min-Distance Pair: {min_dist_pair}")
# remove sequences a and b from the distance map
pairwise_alignments.pop(a, None)
pairwise_alignments.pop(b, None)
for pairwise_alignments_i in pairwise_alignments.values():
pairwise_alignments_i.pop(a, None)
pairwise_alignments_i.pop(b, None)
# add f with index "len(sequences)" to the distance map, calculate the pairwise distances to the other seqs
seq_f = SeqRecord(Seq(generalize(seq_str(sequences[a]), seq_str(sequences[b]))), id="f")
for (i, pairwise_alignments_i) in pairwise_alignments.items():
aligned_seq_i, aligned_seq_f = pairwise_align(sequences[i], seq_f)
fd = dist(seq_str(aligned_seq_i), seq_str(aligned_seq_f))
pairwise_alignments_i[len(sequences)] = (aligned_seq_i, aligned_seq_f, fd)
return pairwise_alignments
| 2.75 | 3 |
src/pythonFEA/constraints/__init__.py | honzatomek/pythonFEA | 0 | 12768198 | from .constraint.nodal import Suppress, Prescribe
| 1.039063 | 1 |
python_scripts/11-colored_circles.py | mohammedbehjoo/DigitalFUTURES-Developing-Costum-Components-in-Grasshopper-with-Python | 1 | 12768199 | import rhinoscriptsyntax as rs
import Rhino.Geometry as rg
import System.Drawing as sd
import Rhino.RhinoDoc as rr
import scriptcontext as sc
sc.doc=rr.ActiveDoc
def createColoredPoint(x,y,z,r,g,b):
currentColor = [r,g,b]
pt = rs.AddPoint(x,y,z)
rs.ObjectColor(pt, currentColor)
rs.EnableRedraw(False)
for x in range(0,256, step):
for y in range(0,256, step):
for z in range(0,256,step):
createColoredPoint(x,y,z,x,y,z)
rs.Redraw() | 2.203125 | 2 |
03-Template-basics/05-url_for-help-func.py | alehpineda/flask_bootcamp | 0 | 12768200 | from flask import Flask, render_template
app = Flask(__name__)
@app.route('/')
def index():
return render_template('home.html')
@app.route('/puppy/<name>')
def pup_name(name):
return render_template('puppy.html', name=name)
if __name__ == "__main__":
app.run(debug=True)
| 2.5625 | 3 |
src/test/chirc/tests/common.py | Schrotty/sIRC | 0 | 12768201 | <reponame>Schrotty/sIRC
import os
import random
import re
import shutil
import subprocess
import tempfile
import time
import chirc.replies as replies
import pytest
from chirc.client import ChircClient
from chirc.types import ReplyTimeoutException
class IRCSession():
def __init__(self, chirc_exe=None, msg_timeout=0.1, randomize_ports=False,
default_port=None, loglevel=-1, debug=False, path=None):
if chirc_exe is None:
self.chirc_exe = path
else:
self.chirc_exe = chirc_exe
if not (os.path.exists(self.chirc_exe) and os.path.isfile(self.chirc_exe) and os.access(self.chirc_exe,
os.X_OK)):
raise RuntimeError("{} does not exist or it is not executable".format(self.chirc_exe))
if default_port is None:
self.default_port = 6667
else:
self.default_port = default_port
self.msg_timeout = msg_timeout
self.randomize_ports = randomize_ports
self.loglevel = loglevel
self.debug = debug
self.oper_password = "<PASSWORD>"
# Testing functions
def _assert_equals(self, a, b, explanation, irc_msg=None):
if irc_msg is not None:
explanation = explanation + "\n\nMESSAGE: {}".format(irc_msg.raw(bookends=True))
assert a == b, explanation
def _assert_is_none(self, a, explanation, irc_msg=None):
if irc_msg is not None:
explanation = explanation + "\n\nMESSAGE: {}".format(irc_msg.raw(bookends=True))
assert a is None, explanation
def _assert_is_not_none(self, a, explanation, irc_msg=None):
if irc_msg is not None:
explanation = explanation + "\n\nMESSAGE: {}".format(irc_msg.raw(bookends=True))
assert a is not None, explanation
def _assert_in(self, x, l, explanation, irc_msg=None):
if irc_msg is not None:
explanation = explanation + "\n\nMESSAGE: {}".format(irc_msg.raw(bookends=True))
assert x in l, explanation
# Start/end IRC session
def start_session(self):
self.tmpdir = tempfile.mkdtemp()
if self.randomize_ports:
self.port = random.randint(10000, 60000)
else:
self.port = self.default_port
if self.randomize_ports:
tries = 10
else:
tries = 1
while tries > 0:
chirc_cmd = [os.path.abspath(self.chirc_exe), "-p", str(self.port), "-o", self.oper_password]
if self.loglevel == -1:
chirc_cmd.append("-q")
elif self.loglevel == 1:
chirc_cmd.append("-v")
elif self.loglevel == 2:
chirc_cmd.append("-vv")
# self.chirc_proc = subprocess.Popen(chirc_cmd, cwd = self.tmpdir)
self.chirc_proc = subprocess.Popen(['java', '-jar', 'I:\Projekte\punk-irc\\target\punkIRC\punkIRC.jar'])
time.sleep(0.01)
rc = self.chirc_proc.poll()
if rc != None:
tries -= 1
if tries == 0:
pytest.fail("chirc process failed to start. rc = %i" % rc)
else:
if self.randomize_ports:
self.port = random.randint(10000, 60000)
else:
break
self.clients = []
def end_session(self):
for c in self.clients:
self.disconnect_client(c)
rc = self.chirc_proc.poll()
if rc is not None:
if rc != 0:
shutil.rmtree(self.tmpdir)
pytest.fail("chirc process failed during test. rc = %i" % rc)
else:
self.chirc_proc.kill()
self.chirc_proc.wait()
shutil.rmtree(self.tmpdir)
# Client connect/disconnect
def get_client(self, nodelay=False):
c = ChircClient(msg_timeout=self.msg_timeout, port=self.port, nodelay=nodelay)
self.clients.append(c)
return c
def disconnect_client(self, c):
c.disconnect()
self.clients.remove(c)
def connect_user(self, nick, username):
client = self.get_client()
client.send_cmd("NICK %s" % nick)
client.send_cmd("USER %s * * :%s" % (nick, username))
self.verify_welcome_messages(client, nick)
self.verify_lusers(client, nick)
self.verify_motd(client, nick)
return client
def connect_clients(self, numclients, join_channel=None):
clients = []
for i in range(numclients):
nick = "user%i" % (i + 1)
username = "User %s" % nick
client = self.connect_user(nick, username)
clients.append((nick, client))
if join_channel != None:
self.join_channel(clients, join_channel)
return clients
def connect_and_join_channels(self, channels, aways=[], ircops=[], test_names=False):
users = {}
if None in channels:
for user in channels[None]:
if user not in users:
client = self.connect_user(user, user)
users[user] = client
channelsl = sorted([k for k in channels.keys() if k is not None])
for channel in channelsl:
channelusers = channels[channel]
joined = []
joinedp = []
op = channelusers[0][1:]
if op not in users:
client = self.connect_user(op, op)
users[op] = client
if test_names:
expect_names = [channelusers[0]]
else:
expect_names = None
users[op].send_cmd("JOIN %s" % channel)
self.verify_join(users[op], op, channel, expect_names=expect_names)
joined.append(op)
joinedp.append(channelusers[0])
for user in channelusers[1:]:
if user[0] in ("@", "+"):
nick = user[1:]
else:
nick = user
if nick not in users:
client = self.connect_user(nick, nick)
users[nick] = client
if test_names:
expect_names = joinedp + [nick]
else:
expect_names = None
users[nick].send_cmd("JOIN %s" % channel)
self.verify_join(users[nick], nick, channel, expect_names=expect_names)
for user2 in joined:
self.verify_relayed_join(users[user2], from_nick=None, channel=channel)
joined.append(nick)
joinedp.append(user)
if user[0] in ("@", "+"):
if user[0] == "@":
mode = "+o"
elif user[0] == "+":
mode = "+v"
self.set_channel_mode(users[op], op, channel, mode, nick)
for user2 in joined:
self.verify_relayed_mode(users[user2], from_nick=op, channel=channel, mode=mode, mode_nick=nick)
for user in aways:
users[user].send_cmd("AWAY :I'm away")
self.get_reply(users[user], expect_code=replies.RPL_NOWAWAY, expect_nick=user,
expect_nparams=1, long_param_re="You have been marked as being away")
for user in ircops:
users[user].send_cmd("OPER %s %s" % (user, self.oper_password))
self.get_reply(users[user], expect_code=replies.RPL_YOUREOPER, expect_nick=user,
expect_nparams=1, long_param_re="You are now an IRC operator")
return users
# IRC actions
def join_channel(self, clients, channel):
for (nick, client) in clients:
client.send_cmd("JOIN %s" % channel)
self.verify_join(client, nick, channel)
relayed = len(clients) - 1
for (nick, client) in clients:
for i in range(relayed):
self.verify_relayed_join(client, from_nick=None, channel=channel)
relayed -= 1
def part_channel(self, clients, channel):
clients2 = clients[:]
for (nick1, client1) in clients:
client1.send_cmd("PART #test :%s is out of here!" % nick1)
self.verify_relayed_part(client1, from_nick=nick1, channel=channel, msg="%s is out of here!" % nick1)
clients2.remove((nick1, client1))
for (nick2, client2) in clients2:
self.verify_relayed_part(client2, from_nick=nick1, channel=channel, msg="%s is out of here!" % nick1)
def set_user_mode(self, client, nick, nick_mode, mode, expect_wrong_mode=False, expect_relay=True):
client.send_cmd("MODE %s %s" % (nick_mode, mode))
if nick != nick_mode:
self.get_reply(client, expect_code=replies.ERR_USERSDONTMATCH, expect_nick=nick,
expect_nparams=1,
long_param_re="Cannot change mode for other users")
return
if expect_wrong_mode:
self.get_reply(client, expect_code=replies.ERR_UMODEUNKNOWNFLAG, expect_nick=nick,
expect_nparams=1,
long_param_re="Unknown MODE flag")
else:
if expect_relay:
reply = self.get_message(client, expect_prefix=True, expect_cmd="MODE",
expect_nparams=2, expect_short_params=[nick_mode],
long_param_re=mode)
self._assert_equals(reply.prefix.hostname, nick,
explanation="Expected MODE's prefix to be nick '{}'".format(nick),
irc_msg=reply)
else:
self.get_reply(client, expect_timeout=True)
def set_channel_mode(self, client, nick, channel, mode=None, nick_mode=None, expect_mode=None,
expect_wrong_channel=False, expect_wrong_mode=False, expect_ops_needed=False,
expect_not_on_channel=False):
if mode is None and nick_mode is None:
client.send_cmd("MODE %s" % channel)
elif nick_mode is None:
client.send_cmd("MODE %s %s" % (channel, mode))
else:
client.send_cmd("MODE %s %s %s" % (channel, mode, nick_mode))
if expect_wrong_channel:
self.get_reply(client, expect_code=replies.ERR_NOSUCHCHANNEL, expect_nick=nick,
expect_nparams=2, expect_short_params=[channel],
long_param_re="No such channel")
return
if mode is None and nick_mode is None:
reply = self.get_reply(client, expect_code=replies.RPL_CHANNELMODEIS, expect_nick=nick,
expect_nparams=2, expect_short_params=[channel])
mode_string = reply.params[-1]
self._assert_equals(mode_string[0], "+",
explanation="Returned mode string does not start with '+'",
irc_msg=reply)
mode_string = mode_string[1:]
if expect_mode is not None:
self._assert_equals(len(mode_string), len(expect_mode),
explanation="Expected mode string to have length {}".format(len(expect_mode)),
irc_msg=reply)
for m in expect_mode:
self._assert_in(m, mode_string,
explanation="Expected mode string to have '{}', got this instead: {}".format(m,
mode_string),
irc_msg=reply)
else:
if expect_wrong_mode:
self.get_reply(client, expect_code=replies.ERR_UNKNOWNMODE, expect_nick=nick,
expect_nparams=2, expect_short_params=[mode[1]],
long_param_re="is unknown mode char to me for (?P<channel>.+)",
long_param_values={"channel": channel})
if expect_ops_needed:
self.get_reply(client, expect_code=replies.ERR_CHANOPRIVSNEEDED, expect_nick=nick,
expect_nparams=2, expect_short_params=[channel],
long_param_re="You're not channel operator")
if nick_mode is not None and expect_not_on_channel:
self.get_reply(client, expect_code=replies.ERR_USERNOTINCHANNEL, expect_nick=nick,
expect_nparams=3, expect_short_params=[nick_mode, channel],
long_param_re="They aren't on that channel")
# Message/reply getters
def get_reply(self, client, expect_code=None, expect_nick=None, expect_nparams=None,
expect_short_params=None, long_param_re=None, long_param_values=None,
expect_timeout=False):
try:
msg = client.get_message()
if expect_timeout:
pytest.fail("Was not expecting a reply, but got one:\n" + msg.raw(bookends=True))
except EOFError:
pytest.fail("Server closed connection unexpectedly. Possible segfault in server?")
except ReplyTimeoutException as rte:
if expect_timeout:
return None
if len(rte.bytes_received) == 0:
failmsg = "Expected a reply but got none (no bytes received)"
else:
failmsg = "Expected a reply but did not get valid reply terminated with \\r\\n. Bytes received:\n|||{}|||".format(
rte.bytes_received)
pytest.fail(failmsg)
self.verify_reply(msg, expect_code, expect_nick, expect_nparams, expect_short_params, long_param_re,
long_param_values)
return msg
def get_message(self, client, expect_prefix=None, expect_cmd=None, expect_nparams=None,
expect_short_params=None, long_param_re=None, long_param_values=None):
try:
msg = client.get_message()
except EOFError:
pytest.fail("Server closed connection unexpectedly. Possible segfault in server?")
self.verify_message(msg, expect_prefix, expect_cmd,
expect_nparams, expect_short_params,
long_param_re, long_param_values)
return msg
# Verifiers
def verify_message(self, msg, expect_prefix=None, expect_cmd=None,
expect_nparams=None, expect_short_params=None,
long_param_re=None, long_param_values=None):
if expect_prefix != None and expect_prefix:
assert msg.prefix is not None, "Expected a prefix, but got none.\nMessage: {}".format(
msg.raw(bookends=True))
if expect_cmd != None:
self._assert_equals(msg.cmd, expect_cmd,
"Expected command {}, got {} instead".format(expect_cmd, msg.cmd),
irc_msg=msg)
if expect_nparams != None:
nparams = len(msg.params)
self._assert_equals(nparams, expect_nparams,
"Expected {} parameters, got {} instead".format(expect_nparams, nparams),
irc_msg=msg)
if expect_short_params != None:
for i, expect_p, p in zip(range(len(expect_short_params)), expect_short_params, msg.params):
if expect_p is not None:
self._assert_equals(str(p), str(expect_p),
"Expected parameter #{} to be {}, got {} instead".format(str(i + 1),
str(expect_p), str(p)),
irc_msg=msg)
if long_param_re != None:
lpre = "^:%s$" % long_param_re
lp = msg.params[-1]
match = re.match(lpre, lp)
self._assert_is_not_none(match,
"|||%s||| <-- Long parameter does not match regular expression: %s" % (lp, lpre),
irc_msg=msg)
if long_param_values != None:
for k, v in long_param_values.items():
self._assert_equals(match.group(k), str(v),
"Expected <{}> in long parameter to be {}, not {} (long parameter regex: {})".format(
k, v, match.group(k), lpre),
irc_msg=msg)
def verify_reply(self, msg, expect_code=None, expect_nick=None, expect_nparams=None,
expect_short_params=None, long_param_re=None, long_param_values=None):
if expect_nparams is not None:
nparams = expect_nparams + 1
else:
nparams = expect_nparams
if expect_short_params is not None:
if expect_nick is not None:
short_params = [expect_nick] + expect_short_params
else:
short_params = [None] + expect_short_params
else:
if expect_nick is not None:
short_params = [expect_nick]
else:
short_params = None
self.verify_message(msg, expect_prefix=True, expect_cmd=expect_code,
expect_nparams=nparams, expect_short_params=short_params,
long_param_re=long_param_re, long_param_values=long_param_values)
def verify_welcome_messages(self, client, nick, user=None):
r = []
if user is None:
user = nick
reply = self.get_reply(client, expect_code=replies.RPL_WELCOME, expect_nick=nick, expect_nparams=1,
long_param_re="Welcome to the Internet Relay Network {}!{}.*".format(nick, user))
r.append(reply)
reply = self.get_reply(client, expect_code=replies.RPL_YOURHOST, expect_nick=nick, expect_nparams=1)
r.append(reply)
reply = self.get_reply(client, expect_code=replies.RPL_CREATED, expect_nick=nick, expect_nparams=1)
r.append(reply)
reply = self.get_reply(client, expect_code=replies.RPL_MYINFO, expect_nick=nick, expect_nparams=4)
r.append(reply)
return r
def verify_lusers(self, client, nick, expect_users=None, expect_ops=None, expect_unknown=None, expect_channels=None,
expect_clients=None):
r = []
reply = self.get_reply(client, expect_code=replies.RPL_LUSERCLIENT, expect_nick=nick, expect_nparams=1)
if expect_users is not None:
self.verify_reply(reply,
long_param_re="There are (?P<users>\d+) users and 0 services on 1 servers",
long_param_values={"users": expect_users})
r.append(reply)
reply = self.get_reply(client, expect_code=replies.RPL_LUSEROP, expect_nick=nick,
expect_nparams=2, long_param_re="operator\(s\) online")
if expect_ops is not None:
self.verify_reply(reply, expect_short_params=[expect_ops])
r.append(reply)
reply = self.get_reply(client, expect_code=replies.RPL_LUSERUNKNOWN, expect_nick=nick,
expect_nparams=2, long_param_re="unknown connection\(s\)")
if expect_unknown is not None:
self.verify_reply(reply, expect_short_params=[expect_unknown])
r.append(reply)
reply = self.get_reply(client, expect_code=replies.RPL_LUSERCHANNELS, expect_nick=nick,
expect_nparams=2, long_param_re="channels formed")
if expect_channels is not None:
self.verify_reply(reply, expect_short_params=[expect_channels])
r.append(reply)
reply = self.get_reply(client, expect_code=replies.RPL_LUSERME, expect_nick=nick, expect_nparams=1)
if expect_clients is not None:
self.verify_reply(reply,
long_param_re="I have (?P<clients>\d+) clients and (?P<servers>\d+) servers",
long_param_values={"clients": expect_clients})
r.append(reply)
return r
def verify_motd(self, client, nick, expect_motd=None):
r = []
if expect_motd is None:
reply = self.get_reply(client, expect_code=replies.ERR_NOMOTD, expect_nick=nick,
expect_nparams=1, long_param_re="MOTD File is missing")
r.append(reply)
else:
reply = self.get_reply(client, expect_code=replies.RPL_MOTDSTART, expect_nick=nick,
expect_nparams=1, long_param_re="- .* Message of the day - ")
r.append(reply)
motd_lines = expect_motd.strip().split("\n")
for l in motd_lines:
reply = self.get_reply(client, expect_code=replies.RPL_MOTD, expect_nick=nick,
expect_nparams=1, long_param_re="- " + l)
r.append(reply)
reply = self.get_reply(client, expect_code=replies.RPL_ENDOFMOTD, expect_nick=nick,
expect_nparams=1, long_param_re="End of MOTD command")
r.append(reply)
return r
def verify_disconnect(self, client):
try:
client.get_message()
except EOFError:
return
except ReplyTimeoutException:
pytest.fail("Server did not close connection after QUIT")
else:
pytest.fail("Server did not close connection after QUIT")
def verify_join(self, client, nick, channel, expect_topic=None, expect_names=None):
self.verify_relayed_join(client, nick, channel)
if expect_topic != None:
self.get_reply(client, expect_code=replies.RPL_TOPIC, expect_nick=nick,
expect_nparams=2, expect_short_params=[channel], long_param_re=expect_topic)
self.verify_names(client, nick, expect_names=expect_names)
def verify_relayed_join(self, client, from_nick, channel):
reply = self.get_message(client, expect_prefix=True, expect_cmd="JOIN",
expect_nparams=1, expect_short_params=[channel])
if from_nick != None:
self._assert_equals(reply.prefix.nick, from_nick,
explanation="Expected JOIN's prefix to have nick '{}'".format(from_nick),
irc_msg=reply)
def verify_relayed_part(self, client, from_nick, channel, msg):
if msg != None:
expect_nparams = 2
else:
expect_nparams = 1
reply = self.get_message(client, expect_prefix=True, expect_cmd="PART",
expect_nparams=expect_nparams, expect_short_params=[channel],
long_param_re=msg)
self._assert_equals(reply.prefix.nick, from_nick,
explanation="Expected PART's prefix to have nick '{}'".format(from_nick),
irc_msg=reply)
def verify_relayed_quit(self, client, from_nick, msg):
reply = self.get_message(client, expect_prefix=True, expect_cmd="QUIT",
expect_nparams=1, long_param_re=msg)
self._assert_equals(reply.prefix.nick, from_nick,
explanation="Expected QUIT's prefix to have nick '{}'".format(from_nick),
irc_msg=reply)
def verify_relayed_nick(self, client, from_nick, newnick):
reply = self.get_message(client, expect_prefix=True, expect_cmd="NICK",
expect_nparams=1, long_param_re=newnick)
self._assert_equals(reply.prefix.nick, from_nick,
explanation="Expected NICK's prefix to have nick '{}'".format(from_nick),
irc_msg=reply)
def verify_relayed_privmsg(self, client, from_nick, recip, msg):
reply = self.get_message(client, expect_prefix=True, expect_cmd="PRIVMSG",
expect_nparams=2, expect_short_params=[recip],
long_param_re=msg)
self._assert_equals(reply.prefix.nick, from_nick,
explanation="Expected PRIVMSG's prefix to have nick '{}'".format(from_nick),
irc_msg=reply)
def verify_relayed_topic(self, client, from_nick, channel, topic):
reply = self.get_message(client, expect_prefix=True, expect_cmd="TOPIC",
expect_nparams=2, expect_short_params=[channel],
long_param_re=topic)
self._assert_equals(reply.prefix.nick, from_nick,
explanation="Expected TOPIC's prefix to have nick '{}'".format(from_nick),
irc_msg=reply)
def verify_relayed_mode(self, client, from_nick, channel, mode, mode_nick=None):
if mode_nick is not None:
expect_nparams = 3
expect_short_params = [channel, mode, mode_nick]
else:
expect_nparams = 2
expect_short_params = [channel, mode]
reply = self.get_message(client, expect_prefix=True, expect_cmd="MODE",
expect_nparams=expect_nparams, expect_short_params=expect_short_params)
self._assert_equals(reply.prefix.nick, from_nick,
explanation="Expected MODE's prefix to have nick '{}'".format(from_nick),
irc_msg=reply)
def verify_relayed_notice(self, client, from_nick, recip, msg):
reply = self.get_message(client, expect_prefix=True, expect_cmd="NOTICE",
expect_nparams=2, expect_short_params=[recip],
long_param_re=msg)
self._assert_equals(reply.prefix.nick, from_nick,
explanation="Expected NOTICE's prefix to have nick '{}'".format(from_nick),
irc_msg=reply)
def verify_names_single(self, reply, nick, expect_channel=None, expect_names=None):
if expect_channel is not None:
if expect_channel == "*":
self._assert_equals(reply.params[1], "*",
explanation="Expected first parameter to be '*'",
irc_msg=reply)
self._assert_equals(reply.params[2], "*",
explanation="Expected second parameter to be '*'",
irc_msg=reply)
else:
self._assert_equals(reply.params[1], "=",
explanation="Expected first parameter to be '='",
irc_msg=reply)
self._assert_equals(reply.params[2], expect_channel,
explanation="Expected channel in NAMES to be {}".format(expect_channel),
irc_msg=reply)
if expect_names is not None:
names = reply.params[3][1:].split(" ")
self._assert_equals(len(names), len(expect_names),
explanation="Expected list of names to have {} entries".format(len(expect_names)),
irc_msg=reply)
for name in expect_names:
self._assert_in(name, names,
explanation="Expected {} in NAMES".format(name),
irc_msg=reply)
def verify_names(self, client, nick, expect_channel=None, expect_names=None):
reply = self.get_reply(client, expect_code=replies.RPL_NAMREPLY, expect_nick=nick,
expect_nparams=3)
self.verify_names_single(reply, nick, expect_channel, expect_names)
if expect_channel is not None:
expect_short_params = [expect_channel]
else:
expect_short_params = None
self.get_reply(client, expect_code=replies.RPL_ENDOFNAMES, expect_nick=nick,
expect_short_params=expect_short_params, expect_nparams=2)
| 2.296875 | 2 |
krankenfinder/preprocessor.py | tuxedocat/ntcir13-medweb | 0 | 12768202 | <gh_stars>0
"""Perform pre-process such as normalization by rules"""
import pandas as pd
import re
import functools
def apply_ja(s: str) -> str:
return s
def _ja_split_sentence(s: str) -> str:
return s
def _ja_normalize(s: str) -> str:
return s
| 2.734375 | 3 |
api/lib/mysqlclient/MySQL.py | NLeRoy917/indcovid | 0 | 12768203 | <gh_stars>0
import MySQLdb
import sys
import time
class MySQL():
"""
Python interface for the mysql database running on AWS EC2.
"""
def __init__(self, server, user, password, database='indcovid', port=3306):
"""
init function for the class - runs when you create an instance of the class
:config_file - path to a '.ini' file that has the server crednetials in it... Format of the file should look like this:
[SERVER]
URL: <server>
USER: <username>
PASS: <password>
:database [Optional] - name of database to connect to - defaults to 'indcovid'
"""
self._server = server
self.database_name = database
self._user = user
self._password = password
self.timeout = 1000
self._port = port
self._cursor = None
self._db = None
try:
self._db = MySQLdb.connect(
host=self._server,
user=self._user,
passwd=self._password,
db=self.database_name,
port=self._port
)
self._cursor = self._db.cursor()
print("Connected to database.")
except Exception as err:
print("Unable to connect to database.")
print(err)
exit(1)
def __del__(self):
if self._cursor:
self._cursor.close()
def _query(self,query,data=None):
'''
execute a query against the database
:query - the SQL query to execute (can be parameterized)
:data - a list of data that needs to be passed to the query if parameterized
return - the result of the query. A list of lists for each row returned
'''
# run query
if data:
self._cursor.execute(query,data)
else:
self._cursor.execute(query)
# Fetch the data
data = self._cursor.fetchall()
# Convert data to list for easier manipulation
result = []
for row in data:
row = list(row)
result.append(row)
return result
def get_county_hospitals(self):
"""
Query to get the county hospital data on inpatient v outpatient facilities that are available
"""
query = '''
SELECT * FROM indcovid.CountyHospitals
'''
result = self._query(query)
return result
def get_education_geographic(self,year=2016):
"""
Query to get the education attainment statistics based on geographic reagion in indiana.
The data is so large, that a year must be specified from the following:
2012, 2013, 2014, 2015, 2016
It defaults to 2016 - the most recent data
"""
query = '''
SELECT * FROM indcovid.EducationGeographic WHERE year = %s'''
result = self._query(query, data=[year])
return result
def get_expenditure(self):
"""
Get the expenditure table days - it is a lot of data. Non-performant and shouldnt be used on the UI
"""
query = '''
SELECT * FROM Expenditure
'''
result = self._query(query)
return result
def get_demographics(self):
"""
Get the most recent demographic data for Indiana on race
"""
query = '''
SELECT * FROM indcovid.IndianaDemographics
'''
result = self._query(query)
return result
def get_median_income(self, year=2016):
"""
Get the median houshold income for a specific year. Defaults to 2016
"""
query = '''
SELECT * FROM indcovid.MedianHouseholdIncome
WHERE year = %s
'''
result = self._query(query, data=[year])
return result
def get_medicaid_funding(self):
"""
Get the most common funding for medicaid claims
"""
query = '''
SELECT * FROM indcovid.MedicaidFundingSource
'''
result = self._query(query)
return result
def get_medicaid_race(self):
"""
Get the demographics data for mediaid use
"""
query = '''
SELECT * FROM indcovid.MedicaidRace
'''
result = self._query(query)
return result
def get_median_rent(self):
"""
Gets the median rent for specific geographic regions in Indiana. All for the year 2016
"""
query = '''
SELECT * FROM indcovid.MedianHouseholdIncome
'''
result = self._query(query)
return result
def bulk_insert(self,query,data):
self._cursor.executemany(query,data)
return
def test_cnx(self):
result = self._query('''SELECT 1;''')
return result
if __name__ == '__main__':
start = time.time()
mysql = MySQL('../../config/config.ini')
result = mysql.get_median_rent()
end = time.time()
print('{} results returned'.format(len(result)))
print('Elapsed Time: %10s' % format(end-start))
| 3.40625 | 3 |
optim/optimizer.py | aiwizzard/text2text | 0 | 12768204 | <reponame>aiwizzard/text2text
import torch.optim as optim
class ScheduledOptimizer:
def __init__(
self, optimizer: optim.Optimizer, factor=2, model_dim=2048, warmup=4000
):
super(ScheduledOptimizer, self).__init__()
self.optimizer = optimizer
self.factor = factor
self.model_dim = model_dim
self.warmup = warmup
self.n_steps = 0
self.learning_rate = 0
def zero_grad(self):
self.optimizer.zero_grad()
def step(self):
self._update_learning_rate()
self.optimizer.step()
def _update_learning_rate(self):
self.n_steps += 1
learning_rate = self.factor * (
self.model_dim ** -0.5
* min(self.n_steps ** -0.5, self.n_steps * self.warmup ** -1.5)
)
for p in self.optimizer.param_groups:
p["lr"] = learning_rate
self.learning_rate = learning_rate
def load(self, opt_state_dict, parameters):
self.load_state_dict(opt_state_dict)
self.load_parameters(parameters)
def state_dict(self):
return self.optimizer.state_dict()
def load_state_dict(self, obj):
self.optimizer.load_state_dict(obj)
def parameters(self):
return {
"warmup": self.warmup,
"n_steps": self.n_steps,
"factor": self.factor,
"learning_rate": self.learning_rate,
}
def load_parameters(self, obj):
self.warmup = obj["warmup"]
self.n_steps = obj["n_steps"]
self.factor = obj["factor"]
self.learning_rate = obj["learning_rate"]
| 2.5 | 2 |
tests/web/user_api_test.py | Darkheir/TibetanBrownBear | 9 | 12768205 | """Tests for the User API."""
import json
import time
import jwt
import pytest
from yeti.auth.local import user_management
from yeti.common.config import yeti_config
from yeti.webapp import app
app.testing = True
client = app.test_client()
# pylint: disable=fixme
# TODO: Consider using pytest-flask for easier testing flask stuff, e.g.:
# - Access to url_for objects to test routes
# - Access to .json attribute of request
@pytest.mark.usefixtures('clean_db')
def test_index(populate_users, authenticated_client):
"""Test that fetched User objects are well-formed"""
emails = [user.email for user in populate_users]
for email in emails:
query_json = {'email': email}
rv = authenticated_client.post('/api/users/filter/',
data=json.dumps(query_json),
content_type='application/json')
response = json.loads(rv.data)
for item in response:
assert item['email']
assert 'password' not in item
assert 'api_key' in item
@pytest.mark.usefixtures('clean_db', 'populate_users')
def test_login():
"""Test that a user gets a valid JWT on succesful log-in."""
query_json = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
rv = client.post('/api/users/login/',
data=json.dumps(query_json),
content_type='application/json')
response = json.loads(rv.data)
assert 'token' in response
assert response
decoded = jwt.decode(response['token'], yeti_config.core.secret_key)
assert decoded['sub'] == '<EMAIL>'
@pytest.mark.usefixtures('clean_db', 'populate_users')
def test_failed_login():
"""Test that log-in attempts with wrong credentials fail."""
query_json = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
rv = client.post('/api/users/login/',
data=json.dumps(query_json),
content_type='application/json')
response = json.loads(rv.data)
assert response == {'error': 'Invalid credentials for <EMAIL>.'}
@pytest.mark.usefixtures('clean_db')
def test_nonexistent_user():
"""Test that logging-in as a nonexistent user returns a generic error."""
query_json = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
rv = client.post('/api/users/login/',
data=json.dumps(query_json),
content_type='application/json')
response = json.loads(rv.data)
assert response == {'error': 'Invalid credentials for <EMAIL>.'}
@pytest.mark.usefixtures('clean_db', 'populate_users')
def test_protected_resource_access_granted():
"""Tests that an authenticated client has access to protected resources."""
query_json = {
'email': '<EMAIL>',
'password': '<PASSWORD>'
}
rv = client.post('/api/users/login/',
data=json.dumps(query_json),
content_type='application/json')
response = json.loads(rv.data)
assert rv.status_code == 200
token = response['token']
rv = client.get('/api/users/protected/',
headers={'Authorization': f'Bearer: {token}'},
content_type='application/json')
assert rv.status_code == 200
response = json.loads(rv.data)
assert response['msg'] == "You're in!"
@pytest.mark.usefixtures('clean_db')
def test_api_key_access_granted(populate_users):
"""Tests that a user using an API key has access to protected resources."""
user = populate_users[0]
rv = client.get('/api/users/protected/',
headers={'X-Yeti-API': user.api_key},
content_type='application/json')
assert rv.status_code == 200
response = json.loads(rv.data)
assert response['msg'] == "You're in!"
@pytest.mark.usefixtures('clean_db', 'populate_users')
def test_invalid_api_key():
"""Tests that a user using an API key has access to protected resources."""
rv = client.get('/api/users/protected/',
headers={'X-Yeti-API': 'INVALID'},
content_type='application/json')
assert rv.status_code == 401
response = json.loads(rv.data)
assert not response['authenticated']
assert response['message'] == 'Invalid API key.'
@pytest.mark.usefixtures('clean_db', 'populate_users')
def test_protected_resource_access_denied():
"""Tests that an unauthenticated client can't access a protected
resource."""
rv = client.get('/api/users/protected/',
content_type='application/json')
assert rv.status_code == 401
response = json.loads(rv.data)
assert not response['authenticated']
assert response['message'] == ('Invalid or nonexistent token. '
'Please get a new token.')
@pytest.mark.usefixtures('clean_db')
def test_password_reset_expires_token(populate_users, authenticated_client):
"""Tests a password reset expires a users JWT."""
rv = authenticated_client.get('/api/users/protected/',
content_type='application/json')
assert rv.status_code == 200
time.sleep(2)
admin = populate_users[0]
user_management.set_password(<PASSWORD>)
admin.save()
rv = authenticated_client.get('/api/users/protected/',
content_type='application/json')
assert rv.status_code == 401
| 2.390625 | 2 |
NPTEL-Course-Lecture Programmes/KmaxKmin.py | Slow-Rain/NPTEL-The-Joy-of-Computing-using-Python | 29 | 12768206 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 19 13:45:46 2021
@author: <NAME>
"""
mylist=input().split()
k=int(input())
k1=k
mylist1=mylist
mylist1.sort()
mylist.sort()
max1=max(mylist1)
#print(mylist,mylist1)
#remove k-1th max from list1 and print max
while(k1-1!=0):
while(max(mylist1)==max1):
mylist1.remove(max(mylist1))
max1=max(mylist1)
k1-=1
#remove k-1th min from list and print min
min2=min(mylist)
while(k-1!=0):
while(min(mylist)==min2):
mylist.remove(min(mylist))
min2=min(mylist1)
k-=1
#finally sum of kth max and kth min
print(int(max(mylist1))+int(min(mylist))) | 3.203125 | 3 |
sila_library/sila2lib/sila_service_detection.py | lemmi25/sila2lib | 0 | 12768207 | <reponame>lemmi25/sila2lib
"""
________________________________________________________________________
:PROJECT: SiLA2_python
*SiLA device detection library*
:details: SiLA Device detection is based on zeroconf / bonjour.
s. zeroconf documentation for details.
server looks for services in local network
server chooses a free port
server receives a logical name
client can use this logicial name to address server
:file: sila_device_detection.py
:authors: <NAME> (<EMAIL>)
<NAME>
<NAME>
:date: (creation) 20180530
:date: (last modification) 2019-11-09
.. note:: -
.. todo:: - check available ports and select first free port available
________________________________________________________________________
**Copyright**:
This file is provided "AS IS" with NO WARRANTY OF ANY KIND,
INCLUDING THE WARRANTIES OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
For further Information see LICENSE file that comes with this distribution.
________________________________________________________________________
"""
__version__ = "0.0.6"
import socket
from zeroconf import ServiceInfo, Zeroconf
import logging
class SiLA2ServiceDetection():
""" This class Registers the SiLA2 service in the given network"""
def __init__ (self):
""" Registering the SiLA2 service in the given network with zeroconfig"""
self.service_tag = "_sila._tcp.local." # specified in SiLA standard, Part B
self.zc: Zeroconf = Zeroconf()
def register_service(self,
service_name="SiLATestDevice",
IP: str = "127.0.0.1",
port: int = None,
description: dict = {'version': __version__, 'descr1': 'zeroconf def. description 1', 'descr2': 'zeroconf def. description 2'},
server_uuid: str = None,
server_hostname: str = None) -> None:
""" :param [param_name]: [description]"""
"""[summary]
_param IP: [description], defaults to "127.0.0.1"
:type IP: str, optional
:param port: [description], defaults to None
:type port: int, optional
:param description: [description], defaults to {'version': __version__, 'descr1': 'zeroconf def. description 1', 'descr2': 'zeroconf def. description 2'}
:type description: dict, optional
:param server_uuid: [description], defaults to None
:type server_uuid: str, optional
:param server_hostname: [description], defaults to None
:type server_hostname: str, optional
:raises err: [description]
:raises err: [description]
:return: [description]
:rtype: [type]
"""
if server_hostname is None:
server_hostname = socket.getfqdn()
# This code only checks wheter a port is open or closed.
# Todo: Implement code that checkks if the used open port is used otherwise to avoid
# multiple use of ports.
if port is not None:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(('', port)) ## Try to open port
except OSError as err:
if err.errno is 98: ## Errorno 98 means address already bound
logging.error(f"Specified port {port} is already occupied - ({err})")
port = None
else:
raise err
s.close()
if port is None:
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
port = 55001 # range(55001, ....)
while True:
try:
s.bind(('', port)) ## Try to open port
except OSError as err:
if err.errno is 98: ## Errorno 98 means address already bound
port += 1
continue
raise err
s.close()
logging.info(
f"No port specified or specified port is occupied. Registering Service on next free port: {port}")
break
# logging.debug("UUID: {}".format(server_uuid)) Deleted? Why? Not necessary I guess...
self.service_info = ServiceInfo(type_=self.service_tag,
name=f"{server_uuid}.{self.service_tag}",
addresses=[socket.inet_aton(IP)], port=port, weight=0, priority=0,
properties=description, server=f"{server_hostname}.local.") # do we need to add .local ? # Yes .local is necessary, LB
logging.info(
f"registering the SiLA2 service '{server_uuid}.{self.service_tag}' in the network at {IP} with port {port} as hostname '{server_hostname}''")
try:
self.zc.register_service(self.service_info)
except Exception as err: # should be more specific
logging.error(f"{err}")
raise
logging.info("Zeroconf registration done ...")
def unregister_service(self):
logging.info(f"unregistering the SiLA2 service {self.service_info.name} in the network {self.service_info.server}")
try:
self.zc.unregister_service(self.service_info)
except Exception as err: # should be more specific
logging.error(f"{err}")
raise
logging.info("Zeroconf service removed ...")
def find_service_by_name(self, service_name=""):
""" this method tries to find a given service by name and returns the connection parameters
:param service_name [string]: service name to search for"""
logging.info("not implemented yet ... just sending default values..")
return { 'server_name':'localhost', 'port':50001 }
def watch_for_new_services(self):
""" new service watcher
if server is removed from server list, send a gRPC ping (s. unitelabs implementation)
this should be called every second
:param [param_name]: [description]"""
pass
| 2.1875 | 2 |
activeClassifier/tools/test_tf_tools.py | dHonerkamp/ActiveClassifier | 0 | 12768208 | import numpy as np
import tensorflow as tf
from tools.tf_tools import binary_entropy, repeat_axis
class EntropyTest(tf.test.TestCase):
def test_binary_entropy_logits(self):
H1 = binary_entropy(logits=[0., 0.]) # i.e. sigmoid(logits) = 0.5
H0 = binary_entropy(logits=[100., -100.])
with self.test_session():
self.assertAllEqual(H1.eval(), [1., 1.])
self.assertAllClose(H0.eval(), [0., 0.])
def test_binary_entropy_probs(self):
H1 = binary_entropy(probs=tf.constant([0.5, 0.5]))
H0 = binary_entropy(probs=tf.constant([0., 1.]))
with self.test_session():
self.assertAllEqual(H1.eval(), [1., 1.])
self.assertAllEqual(H0.eval(), [0., 0.])
class RepeatsTest(tf.test.TestCase):
def test_repeat_axis(self):
x = np.random.rand(10, 10)
x1 = np.repeat(x, repeats=5, axis=1)
x2 = repeat_axis(tf.constant(x), axis=1, repeats=5)
with self.test_session():
self.assertAllEqual(x1, x2.eval())
if __name__ == '__main__':
tf.test.main() | 2.53125 | 3 |
users/migrations/0005_auto_20210404_0058.py | hanahamberno/project-a-23 | 0 | 12768209 | <filename>users/migrations/0005_auto_20210404_0058.py
# Generated by Django 3.1.7 on 2021-04-04 00:58
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('users', '0004_auto_20210404_0050'),
]
operations = [
migrations.AlterModelOptions(
name='amenity',
options={'verbose_name_plural': 'Amenities'},
),
]
| 1.53125 | 2 |
uniformConfig/UCStructure/DSField.py | apointea/config-normalizer | 0 | 12768210 | # @Author: <NAME> <kalif>
# @Date: 2017-03-27T01:27:02+02:00
# @Email: <EMAIL>
# @Filename: UCSField.py
# @Last modified by: kalif
# @Last modified time: 2017-04-04T00:51:36+02:00
from ..UCException import *
from ..UCValidators import *
from .Interface import *
class DSField(Interface):
def initDS(self, cnt):
self.validators = []
if isinstance(cnt, dict):
self.__initValidators(cnt)
self.value = cnt.get("default", None)
else:
self.value = cnt
self.default = self.value
def __initValidators(self, cnt):
if "validator" in cnt:
try:
vInsts = UCValidatorFactory.buildArray(cnt["validator"])
self.validators += vInsts
except Exception as e:
raise UCException("in field - %s" % str(e))
def get(self): return self.value
def set(self, value):
for v in self.validators:
v.check(value)
self.value = value
def extract(self):
return (self.value)
| 2.46875 | 2 |
api/serializers.py | IronTooch/bounca | 0 | 12768211 | """Serializers for Certificate API"""
import django_countries
from dj_rest_auth.serializers import UserDetailsSerializer
from django.contrib.auth import password_validation
from django_countries.serializers import CountryFieldMixin
from rest_framework import serializers
from certificate_engine.types import CertificateTypes
from x509_pki.models import Certificate, DistinguishedName, KeyStore
countries = django_countries.Countries()
class DistinguishedNameSerializer(CountryFieldMixin, serializers.ModelSerializer):
class Meta:
fields = (
"commonName",
"countryName",
"stateOrProvinceName",
"localityName",
"organizationName",
"organizationalUnitName",
"emailAddress",
"subjectAltNames",
)
model = DistinguishedName
class CertificateSerializer(serializers.ModelSerializer):
dn = DistinguishedNameSerializer()
passphrase_issuer = serializers.CharField(max_length=200, required=False, allow_null=True, allow_blank=True)
passphrase_out = serializers.CharField(max_length=200, required=False, allow_null=True, allow_blank=True)
passphrase_out_confirmation = serializers.CharField(
max_length=200, required=False, allow_null=True, allow_blank=True
)
owner = serializers.HiddenField(default=serializers.CurrentUserDefault())
class Meta:
fields = (
"id",
"name",
"owner",
"parent",
"type",
"dn",
"created_at",
"expires_at",
"revoked_at",
"days_valid",
"expired",
"revoked",
"crl_distribution_url",
"ocsp_distribution_host",
"passphrase_issuer",
"passphrase_out",
"passphrase_out_confirmation",
)
model = Certificate
extra_kwargs = {
"passphrase_out": {"write_only": True},
"passphrase_out_confirmation": {"write_only": True},
"passphrase_issuer": {"write_only": True},
}
def validate_passphrase_out(self, passphrase_out):
if passphrase_out:
password_validation.validate_password(passphrase_out, self.instance)
return passphrase_out
return None
def validate_passphrase_issuer(self, passphrase_issuer):
if passphrase_issuer:
if not self.initial_data.get("parent"):
raise serializers.ValidationError(
"You should provide a parent certificate if you provide an issuer passphrase"
)
parent = Certificate.objects.get(pk=self.initial_data.get("parent"))
try:
if not parent.is_passphrase_valid(passphrase_issuer):
raise serializers.ValidationError("Passphrase incorrect. Not allowed " "to revoke your certificate")
except KeyStore.DoesNotExist:
raise serializers.ValidationError("Certificate has no cert, something went " "wrong during generation")
return passphrase_issuer
return None
def validate_passphrase_out_confirmation(self, passphrase_out_confirmation):
if passphrase_out_confirmation:
passphrase_out = self.initial_data.get("passphrase_out")
if passphrase_out and passphrase_out_confirmation and passphrase_out != passphrase_out_confirmation:
raise serializers.ValidationError("The two passphrase fields didn't match.")
password_validation.validate_password(passphrase_out_confirmation, self.instance)
return passphrase_out_confirmation
return None
def validate(self, data):
name = data.get("name")
if not name:
name = str(data.get("dn").get("commonName"))
cert_type = data.get("type")
owner = data.get("owner")
if Certificate.objects.filter(name=name, owner=owner, type=cert_type).count() > 0:
raise serializers.ValidationError(f"{dict(Certificate.TYPES)[cert_type]} " f'"{name}" already exists.')
return data
def create(self, validated_data):
dn_data = validated_data.pop("dn")
dn = DistinguishedName.objects.create(**dn_data)
certificate = Certificate.objects.create(dn=dn, **validated_data)
return certificate
class CertificateRevokeSerializer(serializers.ModelSerializer):
passphrase_issuer = serializers.CharField(max_length=200, required=True)
class Meta:
fields = ("passphrase_issuer",)
model = Certificate
extra_kwargs = {"passphrase_issuer": {"write_only": True}}
def validate_passphrase_issuer(self, passphrase_issuer):
if passphrase_issuer:
if self.instance.type == CertificateTypes.ROOT:
revoke_issuer = self.instance
else:
revoke_issuer = self.instance.parent
try:
if not revoke_issuer.is_passphrase_valid(passphrase_issuer):
raise serializers.ValidationError("Passphrase incorrect. Not allowed " "to revoke your certificate")
except KeyStore.DoesNotExist:
raise serializers.ValidationError("Certificate has no cert, something went " "wrong during generation")
return passphrase_issuer
return None
class CertificateCRLSerializer(serializers.ModelSerializer):
passphrase_issuer = serializers.CharField(max_length=200, required=True)
class Meta:
fields = ("passphrase_issuer",)
model = Certificate
extra_kwargs = {"passphrase_issuer": {"write_only": True}}
def validate_passphrase_issuer(self, passphrase_issuer):
if passphrase_issuer:
self.instance.passphrase_issuer = passphrase_issuer
if not self.instance.is_passphrase_valid():
raise serializers.ValidationError("Passphrase issuer incorrect. No permission to create CRL File")
return passphrase_issuer
return None
def update(self, instance, validated_data):
instance.passphrase_issuer = validated_data["passphrase_issuer"]
instance.generate_crl()
return instance
class UserSerializer(UserDetailsSerializer):
class Meta(UserDetailsSerializer.Meta):
fields = ("username", "email", "first_name", "last_name")
read_only_fields = ("username",)
| 2.203125 | 2 |
dota2crawler/__init__.py | co2y/Wallpaper-Crawler | 0 | 12768212 | # coding=utf-8
__author__ = 'co2y'
__email__ = '<EMAIL>'
__version__ = '0.0.1'
| 0.972656 | 1 |
parse.py | Algy/Attrobject | 0 | 12768213 | # coding: utf-8
def parse_pattern(s):
try:
if s.startswith(u"**"):
return u"varkwd", None, s[2:]
elif s.startswith(u"*"):
return u"vararg", None, s[1:]
elif u"#" in s:
idx = s.rindex("#")
tag_str = s[idx + 1:]
name = s[:idx]
tag = int(tag_str)
if idx < 0:
raise ValueError
return u"argument", tag, name
else:
return u"keyword", None, s
except ValueError:
raise ValueError("Invalid attribute name pattern: %s"%s)
ordinal = lambda n: "%d%s" % (n,"tsnrhtdd"[(n/10%10!=1)*(n%10<4)*n%10::4])
class AttributeSignature(dict):
def __init__(self, signature):
super(dict, self).__init__()
self._args = []
self._maxarg = 0
self._kwds = []
self._varkwd = None
self._vararg = None
for key, value in signature.items():
category, index, attrname = parse_pattern(key)
if category == u'varkwd':
self._varkwd = attrname
elif category == u'vararg':
self._vararg = attrname
elif category == u'argument':
if self._maxarg < index:
self._maxarg = index
self._args.extend([None] * max(0, index - len(self._args) + 1))
self._args[index] = attrname
else:
self._kwds.append(attrname)
self[attrname] = value
for idx, arg_attrname in enumerate(self._args):
if arg_attrname is None:
raise TypeError(
"<argname>#" +str(idx) + " required."
" (the %s positional argument out of %s)"%(
ordinal(idx + 1),
self._maxarg
)
)
def has_var_kwds(self):
return self._varkwd is not None
def apply_arguments(self, args, kwargs):
result = {}
params = self._args
maxarg = self._args
param_idx = 0
arg_idx = 0
if self._vararg:
result[self._vararg] = []
if self._varkwd:
result[self._varkwd] = {}
while param_idx < len(params) and arg_idx < len(args):
attrname = params[param_idx]
attrvalue = args[arg_idx]
result[attrname] = attrvalue
param_idx += 1
arg_idx += 1
poskeyword = [] # positional parameters applied by keyword ones
if param_idx < len(params): # positional parameters are not exhausted
for idx in range(param_idx, len(params)):
param = params[idx]
try:
attrvalue = kwargs[param]
result[param] = attrvalue
poskeyword.append(param)
except KeyError:
raise TypeError("The %s positional argument '%s' is not applied"%(ordinal(idx + 1), param))
elif arg_idx < len(args): # when non-keyword arguments still not exhausted
if self._vararg:
result[self._vararg] = args[arg_idx:]
else:
raise TypeError('Unexpected %s positional argument'%ordinal(arg_idx + 1))
for keyparam in self._kwds:
if keyparam in poskeyword:
continue
try:
result[keyparam] = kwargs[keyparam]
except KeyError:
# raise TypeError('No value assigned to keyword argument "%s"'%keyparam)
pass
redundant_keywords = [applied_keyword
for applied_keyword in kwargs
if applied_keyword not in result]
if redundant_keywords:
if self._varkwd:
result[self._varkwd] = {key: kwargs[key] for key in redundant_keywords}
else:
raise TypeError(
"Unexpected keyword arguments: %s"
%(", ".join(map(repr, redundant_keywords)))
)
return result
if __name__ == '__main__':
print parse_pattern("keyword")
print parse_pattern("attrname#2")
print parse_pattern("attrname#4")
print parse_pattern("*args")
print parse_pattern("**kwds")
signature = AttributeSignature({
"a#0": 0,
"b#1": 0,
"c": 0,
"d": 0
})
# print signature.apply_arguments([1], {"c": 1, "d": 1})
# print signature.apply_arguments([1, 1], {'b': 1, "c": 1, "d": 1})
# print signature.apply_arguments([1, 1], {'b': 1, "d": 1})
# print signature.apply_arguments([1, 1], {"c": 1, "d": 1, "f": 1})
signature_var = AttributeSignature({
"a#0": 0,
"b#1": 0,
"c": 0,
"d": 0,
"*list": 0,
"**dict": 0,
})
print signature_var.apply_arguments([1, 1, 'z', 't'], {"c": 1, "d": 1, "f": 1})
| 2.890625 | 3 |
testapp/tests.py | tomi77/django-evostream | 6 | 12768214 | <reponame>tomi77/django-evostream<filename>testapp/tests.py<gh_stars>1-10
import json
import os
import django
from pyems.protocols import HTTPProtocol
from evostream.default import api
os.environ['DJANGO_SETTINGS_MODULE'] = 'testapp.settings'
if hasattr(django, 'setup'):
django.setup()
try:
from unittest import mock
except ImportError:
import mock
from django.core.management import call_command
from django.test import TestCase
class TestHTTPProtocol(HTTPProtocol):
def __init__(self, result):
self.result = json.dumps(result)
def get_result(self, command, **params):
return self.result
def load_test_data(filename):
fh = open(os.path.join(os.path.dirname(__file__), 'testdata', filename), 'r')
data = json.load(fh)
fh.close()
return data
class EmsTestCase(TestCase):
data_file = None
data = None
def setUp(self):
self.data = load_test_data(self.data_file)
api.protocol = TestHTTPProtocol(self.data)
@mock.patch('pyems.utils.logger', mock.Mock())
class PullStreamTestCase(EmsTestCase):
data_file = 'pull_stream.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('pullstream', 'rtmp://s2pchzxmtymn2k.cloudfront.net/cfx/st/mp4:sintel.mp4',
localStreamName='testpullstream')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['localStreamName']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class PushStreamTestCase(EmsTestCase):
data_file = 'push_stream.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('pushstream', 'rtmp://DestinationAddress/live',
localStreamName='testpullstream', targetStreamName='testpushStream')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['localStreamName']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class ListStreamsIdsTestCase(EmsTestCase):
data_file = 'list_streams_ids.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('liststreamsids')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for _id in ['205', '206', '207']:
try:
out.index(_id)
except ValueError:
self.fail('ID %s not found' % _id)
@mock.patch('pyems.utils.logger', mock.Mock())
class GetStreamInfoTestCase(EmsTestCase):
data_file = 'get_stream_info.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli_verbose(self, mock_write):
call_command('getstreaminfo', '1', verbosity=2)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data'].keys():
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('getstreaminfo', '1')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['uniqueId', 'name']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class ListStreamsTestCase(EmsTestCase):
data_file = 'list_streams.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli_verbose(self, mock_write):
call_command('liststreams', verbosity=2)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for data in self.data['data']:
for key in data.keys():
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('liststreams')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['uniqueId', 'name']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class GetStreamsCountTestCase(EmsTestCase):
data_file = 'get_streams_count.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('getstreamscount')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data'].keys():
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class ShutdownStreamTestCase(EmsTestCase):
data_file = 'shutdown_stream.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('shutdownstream', '55')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data'].keys():
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class ListConfigTestCase(EmsTestCase):
data_file = 'list_config.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('listconfig')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data'].keys():
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class RemoveConfigTestCase(EmsTestCase):
data_file = 'remove_config.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli_verbose(self, mock_write):
call_command('removeconfig', '555', verbosity=2)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('removeconfig', '555')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['configId']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class GetConfigInfoTestCase(EmsTestCase):
data_file = 'get_config_info.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli_verbose(self, mock_write):
call_command('getconfiginfo', '1', verbosity=2)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('getconfiginfo', '1')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['configId', 'localStreamName']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class AddStreamAliasTestCase(EmsTestCase):
data_file = 'add_stream_alias.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('addstreamalias', 'MyStream', 'video1', expirePeriod=-300)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class ListStreamAliasesTestCase(EmsTestCase):
data_file = 'list_stream_aliases.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli_verbose(self, mock_write):
call_command('liststreamaliases', verbosity=2)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for data in self.data['data']:
for key in data:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('liststreamaliases')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['aliasName', 'localStreamName']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class RemoveStreamAliasTestCase(EmsTestCase):
data_file = 'remove_stream_alias.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('removestreamalias', 'video1')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class FlushStreamAliasesTestCase(EmsTestCase):
data_file = 'flush_stream_aliases.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('flushstreamaliases')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
try:
out.index('No data')
except ValueError:
self.fail('Key "No data" not found')
@mock.patch('pyems.utils.logger', mock.Mock())
class AddGroupNameAliasTestCase(EmsTestCase):
data_file = 'add_group_name_alias.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli_verbose(self, mock_write):
call_command('addgroupnamealias', 'MyGroup', 'TestGroupAlias', verbosity=2)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('addgroupnamealias', 'MyGroup', 'TestGroupAlias')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['aliasName', 'groupName']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class FlushGroupNameAliasesTestCase(EmsTestCase):
data_file = 'flush_group_name_aliases.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('flushgroupnamealiases')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
try:
out.index('No data')
except ValueError:
self.fail('Key "No data" not found')
@mock.patch('pyems.utils.logger', mock.Mock())
class GetGroupNameByAliasTestCase(EmsTestCase):
data_file = 'get_group_name_by_alias.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli_verbose(self, mock_write):
call_command('getgroupnamebyalias', 'TestGroupAlias', verbosity=2)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('getgroupnamebyalias', 'TestGroupAlias')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['aliasName', 'groupName']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class ListGroupNameAliasesTestCase(EmsTestCase):
data_file = 'list_group_name_aliases.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('listgroupnamealiases')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for data in self.data['data']:
for key in data:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class RemoveGroupNameAliasTestCase(EmsTestCase):
data_file = 'remove_group_name_alias.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('removegroupnamealias', 'video1')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class ListHttpStreamingSessionsTestCase(EmsTestCase):
data_file = 'list_http_streaming_sessions.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('listhttpstreamingsessions')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for data in self.data['data']:
for key in data:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class CreateIngestPointTestCase(EmsTestCase):
data_file = 'create_ingest_point.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('createingestpoint', 'theIngestPoint', 'useMeToViewStream')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class RemoveIngestPointTestCase(EmsTestCase):
data_file = 'remove_ingest_point.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('removeingestpoint', 'theIngestPoint')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class ListIngestPointsTestCase(EmsTestCase):
data_file = 'list_ingest_points.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('listingestpoints')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for data in self.data['data']:
for key in data:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class CreateHLSStreamTestCase(EmsTestCase):
data_file = 'create_hls_stream.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli_verbose(self, mock_write):
call_command('createhlsstream', 'hlstest', '/MyWebRoot/', bandwidths=128, groupName='hls',
playlistType='rolling', playlistLength=10, chunkLength=5, verbosity=2)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('createhlsstream', 'hlstest', '/MyWebRoot/', bandwidths=128, groupName='hls',
playlistType='rolling', playlistLength=10, chunkLength=5)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['localStreamNames', 'targetFolder']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class CreateHDSStreamTestCase(EmsTestCase):
data_file = 'create_hds_stream.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli_verbose(self, mock_write):
call_command('createhdsstream', 'testpullStream', '../evo-webroot', groupName='hds',
playlistType='rolling', verbosity=2)
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli(self, mock_write):
call_command('createhdsstream', 'testpullStream', '../evo-webroot', groupName='hds',
playlistType='rolling')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in ['localStreamNames', 'targetFolder']:
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
@mock.patch('pyems.utils.logger', mock.Mock())
class IsStreamRunningTestCase(EmsTestCase):
data_file = 'is_stream_running.json'
if django.VERSION >= (1, 5):
@mock.patch('django.core.management.base.OutputWrapper.write')
def test_cli_verbose(self, mock_write):
call_command('isstreamrunning', '1')
self.assertGreaterEqual(mock_write.call_count, 1)
out = ''.join([z for x in mock_write.call_args_list for y in x for z in y])
for key in self.data['data'].keys():
try:
out.index(key)
except ValueError:
self.fail('Key %s not found' % key)
| 2.140625 | 2 |
char_samples_gen.py | Baiy97/Ultra_light_OCR_No.5 | 1 | 12768215 | <reponame>Baiy97/Ultra_light_OCR_No.5
import os
import cv2
import numpy as np
from tqdm import tqdm
image_dir = 'data/train_data/TrainImages/'
gt_txt = 'data/train_data/LabelTrain.txt'
gt_dict = dict()
with open(gt_txt, 'r') as f:
lines = f.readlines()
for line in lines:
line = line.strip().split('\t')
if len(line) > 1:
gt_dict[line[0]] = line[1]
else:
gt_dict[line[0]] = ''
with open('details.txt', 'r') as f:
lines = f.readlines()
char_gts = list()
for line in tqdm(lines):
line = line.strip().split('\t')
img_name, text, locs = line
locs = eval(locs)
# block size
image = cv2.imread(image_dir + img_name)
h, w, _ = image.shape
textW = min(int(w / h * 32), 160)
locs = locs[:int(textW / 160 * len(locs))]
block_size = w / len(locs)
# gt word
gt_word = gt_dict[img_name]
# 先不考虑空格
scape_list = []
prev = -1
for i in range(len(locs)):
if locs[i] == 0:
continue
if prev == -1:
prev = int(i * block_size)
continue
if locs[i] != locs[i-1]:
scape_list.append([prev, int(i*block_size)])
prev = int(i*block_size)
if prev != -1:
scape_list.append([prev, w])
if len(scape_list) == len(gt_word):
'''
TODO one by one
'''
for i in range(len(gt_word)):
char_gts.append([img_name, gt_word[i], scape_list[i]])
else:
pass
# if ' ' in gt_word and ' ' in text:
# '''
# split blank and compare one by one
# '''
# gt_word = gt_word.split()
# text = text.split()
# if len(gt_word) == len(text):
# for i in range(len(gt_word)):
# if len(gt_word[i]) == len(text[i]):
# for k in range(len(gt_word[i])):
# char_gts.append([img_name, gt_word[i][k], scape_list[i][k]])
with open('data/train_data/LabelTrain_char_.txt', 'w') as f:
for item in char_gts:
f.write(item[0] + '\t' + item[1] + '\t' + str(item[2][0]) + ',' + str(item[2][1]) + '\n')
print('Finished...')
| 2.375 | 2 |
framedata.py | jgreer013/pymcc | 0 | 12768216 | import mss
import cv2
import numpy as np
class FrameData():
def __init__(self, fileName: str, controllerState: list, imageData):
self.fileName = fileName
self.controllerState = controllerState
self.imageData = imageData
def save(self, outfile):
self.imageData.save(self.fileName)
# write csv line
outfile.write( self.fileName + ',' + ','.join(map(str, self.controllerState)) + '\n' )
| 2.6875 | 3 |
apcsp/labs/bank/util.py | wiisportsresort/apcsp-labs | 0 | 12768217 | <reponame>wiisportsresort/apcsp-labs<gh_stars>0
from typing import Dict, Literal
id_counters: Dict[str, int] = {}
def create_id(namespace: str) -> str:
assert len(namespace) > 0, "Namespace cannot be empty"
if namespace not in id_counters:
id_counters[namespace] = -1
id_counters[namespace] += 1
return f"{namespace}{id_counters[namespace]}"
def sign(amount: float | int) -> str:
"""Returns a string representing the sign of the given amount."""
if amount < 0:
return "-"
elif amount > 0:
return "+"
else:
return ""
def format_amount(amount: int, include_sign: bool | Literal["negative"] = True) -> str:
"""Returns a string representing the given amount."""
if include_sign == "negative":
if amount < 0:
sign_str = sign(amount)
else:
sign_str = ""
else:
sign_str = sign(amount) if include_sign else ""
return f"{sign_str}${abs(amount) // 100:,}.{abs(amount) % 100:02}"
| 3.3125 | 3 |
userbot/modules/utils/user.py | ZJRDroid/PaperplaneRemix | 0 | 12768218 | <reponame>ZJRDroid/PaperplaneRemix<gh_stars>0
""" Userbot module for getting info
about any user on Telegram(including you!). """
from telethon.events import NewMessage
# from userbot import spamwatch
from userbot.events import register
from userbot.utils import parse_arguments, get_user_from_event
from userbot.utils.tgdoc import *
from ..help import add_help_item
TMP_DOWNLOAD_DIRECTORY = "./"
@register(pattern=r"^\.u(?:ser)?(\s+[\S\s]+|$)", outgoing=True)
async def who(event: NewMessage.Event):
""" For .user command, get info about a user. """
if event.fwd_from:
return
args, user = parse_arguments(event.pattern_match.group(1), [
'id', 'forward', 'general', 'bot', 'misc', 'all', 'mention'
])
args['forward'] = args.get('forward', True)
args['user'] = user
replied_user = await get_user_from_event(event, **args)
if not replied_user:
await event.edit("**Failed to get information for user**")
return
user_info = await fetch_info(replied_user, **args)
message_id_to_reply = event.message.reply_to_msg_id
if not message_id_to_reply:
pass
await event.edit(str(user_info), parse_mode="markdown")
async def fetch_info(replied_user, **kwargs):
""" Get details from the User object. """
user = replied_user.user
id_only = kwargs.get('id', False)
show_general = kwargs.get('general', True)
show_bot = kwargs.get('bot', False)
show_misc = kwargs.get('misc', False)
show_all = kwargs.get('all', False)
mention_name = kwargs.get('mention', False)
if show_all:
show_general = True
show_bot = True
show_misc = True
full_name = str(user.first_name + ' ' + (user.last_name or ''))
if mention_name:
title = Link(full_name, f'tg://user?id={user.id}')
else:
title = Bold(full_name)
if id_only:
return KeyValueItem(title, Code(user.id))
general = SubSection(Bold('general'),
KeyValueItem('id', Code(user.id)),
KeyValueItem('first_name', Code(user.first_name)),
KeyValueItem('last_name', Code(user.last_name)),
KeyValueItem('username', Code(user.username)),
KeyValueItem('mutual_contact', Code(user.mutual_contact)),
KeyValueItem('common groups', Code(replied_user.common_chats_count)))
# if spamwatch:
# banobj = spamwatch.get_ban(user.id)
# if banobj:
# general.items.append(KeyValueItem('gbanned', f'True / {banobj.reason}'))
# else:
# general.items.append(KeyValueItem('gbanned', 'False'))
bot = SubSection(Bold('bot'),
KeyValueItem('bot', Code(user.bot)),
KeyValueItem('bot_chat_history', Code(user.bot_chat_history)),
KeyValueItem('bot_info_version', Code(user.bot_info_version)),
KeyValueItem('bot_inline_geo', Code(user.bot_inline_geo)),
KeyValueItem('bot_inline_placeholder',
Code(user.bot_inline_placeholder)),
KeyValueItem('bot_nochats', Code(user.bot_nochats)))
misc = SubSection(Bold('misc'),
KeyValueItem('restricted', Code(user.restricted)),
KeyValueItem('restriction_reason', Code(user.restriction_reason)),
KeyValueItem('deleted', Code(user.deleted)),
KeyValueItem('verified', Code(user.verified)),
KeyValueItem('min', Code(user.min)),
KeyValueItem('lang_code', Code(user.lang_code)))
return Section(title,
general if show_general else None,
misc if show_misc else None,
bot if show_bot else None)
add_help_item(
".user",
"Utilities",
"List information about a particular user.",
"""
`.u(ser) [options] (username|id)`
Or, in response to a message
`.u(ser) [options]`
Options:
`.id`: Show only the user's ID
`.general`: Show general user info
`.bot`: Show bot related info
`.misc`: Show miscelanious info
`.all`: Show all info (overrides other options)
`.mention`: Inline mention the user
`.forward`: Follow forwarded message
"""
)
| 2.359375 | 2 |
brain/src/common/mqtt_client.py | siddharthkundu/raspberry-pi-os-image-builder | 0 | 12768219 | <filename>brain/src/common/mqtt_client.py
from typing import Any, Callable, Dict, Tuple
from concurrent.futures import Future
import os
import threading
import re
import logging
from awscrt import io, mqtt
from awsiot import mqtt_connection_builder
from common.log_event import Logger
from common.config import Config
class MQTT:
def __init__(self, config: Config, logger: Logger) -> None:
self._logger: Logger = logger
self._logger.log_system(logging.INFO, 'Start Init MQTT')
path_to_client: str = f'{config.root}/client_{config.stage}/'
self._endpoint: str = ''
self._path_to_cert: str = ''
self._path_to_key: str = ''
self._path_to_root: str = ''
self._client_id: str = ''
with open(os.path.join(path_to_client, 'config.ini'), 'r') as c_file:
c_lines = c_file.readlines()
for line in c_lines:
line_list = line.split(' ')
if line_list[0] == 'IOT_ENDPOINT':
self._endpoint = line_list[2][:-1]
if line_list[0] == 'CLIENT_ID':
if line_list[2][:-1] == '\n':
self._client_id = line_list[2][:-1]
else:
self._client_id = line_list[2]
certs_path = os.path.join(path_to_client, 'certs')
all_files = [f for f in os.listdir(certs_path) if os.path.isfile(os.path.join(certs_path, f))]
self._path_to_cert = os.path.join(certs_path,
[f for f in all_files if re.match(r'^(?!bootstrap).*\.pem\.crt$', f)][0])
self._path_to_key = os.path.join(certs_path,
[f for f in all_files if re.match(r'^(?!bootstrap).*\.pem\.key$', f)][0])
self._path_to_root = os.path.join(certs_path,
[f for f in all_files if re.match(r'^(?!bootstrap).*\.ca\.pem$', f)][0])
for i in [(self._endpoint, 'ENDPOINT'),
(self._path_to_cert, 'PATH_TO_CERT'),
(self._path_to_key, 'PATH_TO_KEY'),
(self._path_to_root, 'PATH_TO_ROOT'),
(self._client_id, 'CLIENT_ID')]:
if i[0] == '':
self._logger.log_system(logging.ERROR, f"_____ No IoT {i[1]} found _____")
# Spin up resources
event_loop_group = io.EventLoopGroup(1)
host_resolver = io.DefaultHostResolver(event_loop_group)
client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver)
self._mqtt_connection: mqtt.Connection = mqtt_connection_builder.mtls_from_path( # type: ignore
endpoint=self._endpoint,
cert_filepath=self._path_to_cert,
pri_key_filepath=self._path_to_key,
client_bootstrap=client_bootstrap,
ca_filepath=self._path_to_root,
client_id=self._client_id)
# Make the connect() call
connect_future: Future[Dict[str, Any]] = self._mqtt_connection.connect() # type: ignore
# Future.result() waits until a result is available
connect_future.result()
self._logger.log_system(logging.INFO, f"Connected to {self._endpoint} with client ID '{self._client_id}'...")
def subscribe(self, topic_name: str, callback: Callable[[str, str], None]) -> None:
# Subscribe and listen to the messages
mqtt_topic_subscribe_return: Tuple[Future[Dict[str, Any]], int] = self._mqtt_connection.subscribe(
# type: ignore
topic=topic_name,
qos=mqtt.QoS.AT_LEAST_ONCE,
callback=callback)
# Wait for subscription to succeed
mqtt_topic_subscribe_result = mqtt_topic_subscribe_return[0].result()
self._logger.log_system(logging.INFO,
f"Subscribed to topic {topic_name} with {str(mqtt_topic_subscribe_result['qos'])}")
def send(self, topic_name: str, data: str) -> None:
def _send(topic_name: str, data: str):
mqtt_topic_publish_return: Tuple[Future[Dict[str, Any]], int] = self._mqtt_connection.publish(
# type: ignore
topic=topic_name,
payload=data,
qos=mqtt.QoS.AT_LEAST_ONCE
)
mqtt_topic_publish_return[0].result()
threading.Thread(target=_send, args=[topic_name, data], daemon=True).start()
| 2.265625 | 2 |
bmch/fileio.py | romainmartinez/bmch | 1 | 12768220 | # -*- coding: utf-8 -*-
"""fileio module."""
import pandas as pd # create_conf_file
import csv # write_conf_header, create_conf_file
import json # create_conf_file
import os # read_c3d_file
import btk # C3D class
import bmch # C3D class
import numpy as np # C3D class
def write_conf_header(metadata_path):
"""Create and write header in the csv configuration files.
:param metadata_path: path to the metadata folder
:type metadata_path: str
Example::
result = write_conf_header('/home/romain/Downloads/irsst/metadata/')
"""
files = ['emg', 'markers', 'force', 'participants', 'trials']
headers = {
'emg': ['labels', 'publication_name'],
'markers': ['labels'],
'force': ['labels'],
'participants': ['pseudo', 'process', 'laterality', 'group', 'mass', 'height', 'date'],
'trials': ['folder', 'emg', 'markers', 'force']
}
for ifile in files:
with open('{}{}.csv'.format(metadata_path, ifile), 'w') as out:
writer = csv.DictWriter(out, fieldnames=headers[ifile])
writer.writeheader()
def create_conf_file(metadata_path):
"""Create a json conf file based on the csv conf files.
:param metadata_path: path to the metadata folder
:type metadata_path: str
Example::
result = write_conf_header('/home/romain/Downloads/irsst/metadata/')
"""
files = ['emg', 'markers', 'force', 'participants', 'trials']
# read each csv files into dict
csv_dict = {ifile: pd.read_csv('{}{}.csv'.format(metadata_path, ifile)) for ifile in files}
# merge dicts into json files
json_file = {key: json.loads(csv_dict[key].to_json()) for key in csv_dict}
# export json file
json_path = '{}config.json'.format(metadata_path)
with open(json_path, 'w') as json_data:
json_data.write(json.dumps(json_file, indent=4))
def load_conf_file(metadata_path):
"""Load the json configuration file create with the function `create_conf_file`.
:param metadata_path: path to the metadata folder
:type metadata_path: str
Example::
result = load_conf_file('/home/romain/Downloads/irsst/metadata/')
"""
json_path = '{}config.json'.format(metadata_path)
with open(json_path, 'r') as json_data:
return json.load(json_data)
def save_conf_file(metadata_path, json_file):
json_path = '{}config.json'.format(metadata_path)
with open(json_path, 'w') as json_data:
json_data.write(json.dumps(json_file, indent=4))
class C3D:
"""C3D class read c3d files and return data.
:param data_folders: dict with path to the data folder(s) as key and type (*markers and/or emg and/or emg*) as value
:type data_folders: dict
Example::
data_folders = {'/home/romain/Downloads/irsst/inputs/DapO/mvc/': ['emg'],
'/home/romain/Downloads/irsst/inputs/DapO/score/': ['markers']}
c3d = load_conf_file(data_folders)
c3d.read_data()
"""
def __init__(self, data_folders, conf_file):
"""Constructor for C3D"""
print('import c3d files from:')
self.folders = data_folders
self.conf_file = conf_file
self.assign = []
def read_data(self):
# todo complete return docstring
"""Read data from `self.folders`
:return
"""
for ifolder, kind in self.folders.items():
print('\t{}'.format(ifolder))
c3d_files = [f for f in os.listdir(ifolder) if f.endswith('.c3d')]
for ifile in c3d_files:
print('\t\t{}'.format(ifile))
file = os.path.join(ifolder, ifile)
metadata, markers, analogs = self._open_file(file, kind)
save_assign
def _open_file(self, file, kind):
"""Open c3d acquisition (*private function*).
:param file: path to the c3d file
:type file: str
:param kind: type (*markers and/or emg and/or emg*)
:type kind: list
"""
reader = btk.btkAcquisitionFileReader()
reader.SetFilename(file)
reader.Update()
acq = reader.GetOutput()
metadata = {'first_frame': acq.GetFirstFrame(), 'last_frame': acq.GetLastFrame()}
data = {}
for i in ['markers', 'force', 'emg']:
if i in kind:
if i is 'markers':
metadata.update({'point_rate': acq.GetPointFrequency(), 'point_used': acq.GetPointNumber()})
data_temp = self._iterate(acq=acq, kind='markers')
n = metadata['last_frame']
else:
metadata.update({'analog_rate': acq.GetAnalogFrequency(), 'analog_used': acq.GetAnalogNumber()})
data_temp = self._iterate(acq=acq, kind='analogs')
n = (metadata['last_frame'] * metadata['analog_rate']) / acq.GetPointFrequency()
data[i] = self._attribute_channels(data_temp, kind=i, frames=n)
else:
data[i] = None
def _attribute_channels(self, data_temp, kind, frames):
fields = list(data_temp.keys())
targets = list(self.conf_file[kind]['labels'].values())
# TODELETE:
# targets[-1] = 'Voltage.1'
# gui = bmch.util.GuiC3D(targets, fields)
gui = ['Delt_ant.EMG1',
'Delt_med.EMG2',
'Delt_post.EMG3',
'Biceps.EMG4',
'Triceps.EMG5',
'Trap_sup.EMG6',
'Pec.IM EMG12',
'Supra.EMG9',
'Infra.EMG10']
output = np.zeros((int(frames), len(targets)))
for i, iassign in enumerate(gui):
output[:, i] = np.squeeze(data_temp[iassign])
itarget = 'Delt_ant.EMG1'
# check if all target are in fields
# check if all previous assign are in fields
# GUI
gui = bmch.util.GuiC3D(targets, fields)
self.assign.append(gui.assign)
# save assign
return output
@staticmethod
def _iterate(acq, kind='markers'):
"""Iterate through a btkCollection object (*private function*) and return data as dict.
:param acq: btkAcquisition object
:type acq: btk.btkAcquisition
:param kind: type of the data (*markers or analogs*)
:type kind: str
"""
out = {}
if kind == 'markers':
iterator = btk.Iterate(acq.GetPoints())
elif kind == 'analogs':
iterator = btk.Iterate(acq.GetAnalogs())
else:
iterator = []
for it in iterator:
data_temp = it.GetValues()
if data_temp.any():
out.update({it.GetLabel(): data_temp})
return out
| 2.59375 | 3 |
QASMParser/codegraph/errors.py | oerc0122/QASMParser | 5 | 12768221 | <reponame>oerc0122/QASMParser
"""
Errors relating to partitioning
"""
# Partitioning
partitionWarning = ("Partitioning suggests no partitions.\n"
"Recommend running with different partitioning method or disable partitioning")
| 1.421875 | 1 |
scripts/python/merge_annotations.py | BradhamLab/scPipe | 0 | 12768222 | import pandas as pd
def combine_reciprocal_hits(keep_df, other_df):
"""
"""
missed_samples = set(other_df.index.values).difference(
set(keep_df.index.values))
for each in missed_samples:
hit = other_df.loc[each, 'B_id']
if hit not in keep_df['B_id'].values:
new_row = [hit] + [None for i in range(keep_df.shape[1] - 1)]
keep_df.loc[each] = new_row
return keep_df
def combine_single_hits(keep_df, other_df):
"""
"""
new_spus = set(other_df['subject'].unique()).difference(
keep_df['B_id'].values)
for spu in new_spus:
scores = other_df['bitscore'][other_df['subject'] == spu]
row = [scores.idxmax()] + [None for i in range(keep_df.shape[1] - 1)]
keep_df.loc[spu] = row
return keep_df
def add_uniprot_annotations(sample_df, uniprot):
"""
"""
gene_df = pd.DataFrame(index=uniprot.index.values,
columns=["UniProt.ID", "UniProt.Name"],
dtype=str)
for idx in uniprot.index.values:
prot_id, prot_name = uniprot.loc[idx, 'subject'].split('|')[1:]
if isinstance(prot_id, str) and isinstance(prot_name, str):
gene_df.loc[idx, 'UniProt.ID'] = prot_id
gene_df.loc[idx, 'UniProt.Name'] = prot_name
return pd.concat([sample_df, gene_df], axis=1, join='outer', sort=False)
def add_interpro_annotations(sample_df, interpro_file):
"""
"""
data = {'evm': [], 'IPR.IDs': [], 'IPR.Desc': []}
with open(interpro_file, 'r') as f:
for line in f:
line = line.strip().split('\t')
evm = line[0]
ipr_ids = []
desc_ids = []
for each in line[2:]:
ipr, desc = each.split(';')
ipr_ids.append(ipr.strip())
desc_ids.append(desc.strip())
data['evm'].append(evm)
data['IPR.IDs'].append(';'.join(ipr_ids))
data['IPR.Desc'].append(';'.join(desc_ids))
ipr = pd.DataFrame(data)
ipr.set_index('evm', inplace=True)
return pd.concat([sample_df, ipr], axis=1, join='outer', sort=False)
def add_kegg_annotations(sample_df, kegg_file):
"""
"""
data = {'evm': [], 'KEGG.IDs': []}
with open(kegg_file, 'r') as f:
for line in f:
line = line.strip().split('\t')
data['evm'].append(line[0])
data['KEGG.IDs'].append(line[4])
kegg = pd.DataFrame(data)
kegg.set_index('evm', inplace=True)
return pd.concat([sample_df, kegg], axis=1, join='outer', sort=False)
def add_ncbi_annotations(sample_df, ncbi):
"""
"""
gene_df = pd.DataFrame(index=uniprot.index.values,
columns=["NCBI.ID"], dtype=str)
for idx in ncbi.index.values:
gene_df.loc[idx, 'NCBI.ID'] = ncbi.loc[idx, 'subject'].split('|')[-2]
return pd.concat([sample_df, gene_df], axis=1, join='outer', sort=False)
def add_trembl_annotations(sample_df, tremble):
gene_df = pd.DataFrame(index=uniprot.index.values,
columns=["TrEMBL.ID"], dtype=str)
for idx in ncbi.index.values:
gene_df.loc[idx, 'TrEMBL.ID'] = ncbi.loc[idx, 'subject'].split('|')[1]
return pd.concat([sample_df, gene_df], axis=1, join='outer', sort=False)
if __name__ == "__main__":
blast_columns = ['subject', 'perc.id', 'length', 'mismatch', 'gapopen',
'qstart', 'qend', 'sstart', 'send', 'evalue', 'bitscore']
protein_models = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/ProteinModels_SPU_BestHits_peptide.txt",
sep='\t', index_col=0)
transcripts_pep = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/SPU_BestHits_peptide.txt",
sep='\t', index_col=0)
transcripts_nuc = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/SPU_BestHits.txt",
sep='\t', index_col=0)
homologues = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/best_spu_aligns.blastn",
sep='\t', header=None, index_col=0,
names=blast_columns)
uniprot = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/Echinoderm_project/sea_urchin/5. gene_function_annotation/Lytechinus_variegatus_EVM_out_pep.SwissProt.blast",
sep='\t', header=None, index_col=0,
names=blast_columns)
interpro_file = "/home/dakota/SequenceData/GenomeAnnotations/Echinoderm_project/sea_urchin/5. gene_function_annotation/Lytechinus_variegatus_EVM_out_pep.ipr"
kegg_file = "/home/dakota/SequenceData/GenomeAnnotations/Echinoderm_project/sea_urchin/5. gene_function_annotation/Lytechinus_variegatus_EVM_out_pep.KEGG.blast"
ncbi = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/Echinoderm_project/sea_urchin/5. gene_function_annotation/Lytechinus_variegatus_EVM_out_pep.nr.blast",
sep='\t', header=None, index_col=0, names=blast_columns)
trembl = pd.read_csv("/home/dakota/SequenceData/GenomeAnnotations/Echinoderm_project/sea_urchin/5. gene_function_annotation/Lytechinus_variegatus_EVM_out_pep.TrEMBL.blast",
sep='\t', header=None, index_col=0,
names=blast_columns)
annotations = combine_reciprocal_hits(pd.DataFrame(protein_models['B_id']),
pd.DataFrame(transcripts_pep['B_id']))
annotations = combine_reciprocal_hits(annotations,
pd.DataFrame(transcripts_nuc))
annotations = combine_single_hits(annotations, homologues)
annotations.columns.values[0] = 'SPU'
annotations = add_uniprot_annotations(annotations, uniprot)
annotations = add_interpro_annotations(annotations, interpro_file)
annotations = add_kegg_annotations(annotations, kegg_file)
annotations = add_ncbi_annotations(annotations, ncbi)
annotations = add_trembl_annotations(annotations, trembl)
annotations.to_csv('/home/dakota/SequenceData/evm_annotations.csv') | 2.53125 | 3 |
bin/blockchain/deploy-smart_contract.py | tchiunam/blockchain-alpha | 1 | 12768223 | import argparse
import getpass
import json
import os
import subprocess
import sys
from axolpy import configuration, logging, solidity
from web3 import Web3
def init_arg_parser() -> argparse.ArgumentParser:
"""
Initialize argument parser.
:return: An argument parser for inputs.
:rtype: :class:`argparse.ArgumentParser`
"""
arg_parser = argparse.ArgumentParser(
description="Deploy smart contract to Ethereum.")
arg_parser.add_argument("-k", "--private-key")
arg_parser.add_argument("-c", "--contract-name", required=True)
arg_parser.add_argument("--solidity-compiler-version")
return arg_parser
arg_parser = init_arg_parser()
args = arg_parser.parse_args()
logger = logging.get_logger(name=os.path.basename(__file__))
logging.set_level(logging.INFO)
logging.show_milliseconds()
config = configuration.AxolpyConfigManager.get_context(name="blockchain")
base_path = config["main"]["base_path"]
private_key: str = args.private_key if args.private_key else getpass.getpass(
prompt="Private Key: ")
contract_name: str = args.contract_name
contract_filepath: str = f'{config["main"]["contracts.path"]}/{contract_name}.sol'
solidity_compiler_version: str = args.solidity_compiler_version \
if args.solidity_compiler_version \
else config["main"]["solidity.compiler.version"]
contract_content: str = None
logger.info(f"Reading contract file {contract_filepath}")
with open(contract_filepath, "r") as file:
contract_content = file.read()
compiled_sol = solidity.SolidityHelper.solcx_compile_standard(
source_name=f"{contract_name}.sol",
source_content=contract_content,
solidity_compiler_version=solidity_compiler_version)
with open(f'{config["main"]["distribution.path"]}/{contract_name}.json', "w") as file:
json.dump(compiled_sol, file)
# Get bytecode and abi from compiled solidity file
bytecode: str = compiled_sol["contracts"][f"{contract_name}.sol"][contract_name]["evm"]["bytecode"]["object"]
abi: str = compiled_sol["contracts"][f"{contract_name}.sol"][contract_name]["abi"]
# Connect to the provider
w3: Web3 = Web3(Web3.HTTPProvider(config["web3"]["http_provider"]))
chain_id: int = config["main"].getint("chain.id")
wallet_address: str = config["wallet"]["local.address.0"]
nonce = w3.eth.get_transaction_count(wallet_address)
w3contract = w3.eth.contract(abi=abi, bytecode=bytecode)
logger.info(f"Wallet address is {wallet_address}")
contract_txn = w3contract.constructor().buildTransaction(
{"chainId": chain_id, "from": wallet_address, "nonce": nonce})
# Sign the transaction and send it to the network
signed_contract_txn = w3.eth.account.sign_transaction(
contract_txn, private_key)
logger.info("Deploying contract ...")
tx_hash = w3.eth.send_raw_transaction(
transaction=signed_contract_txn.rawTransaction)
logger.info("Waiting for transaction receipt ...")
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
logger.info(f"Contract is deployed to {tx_receipt.contractAddress}")
# Run the corresponding script we built for trial run
with subprocess.Popen([sys.executable,
f"{base_path}/bin/blockchain/call-partner-agreement.py",
"--contract-address",
tx_receipt.contractAddress],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT) as proc:
while True:
line = proc.stdout.readline()
if not line:
break
print(line.decode("utf-8").rstrip())
proc.stdout.close()
| 2.453125 | 2 |
src/constants.py | devJWSong/transformer-nmt-pytorch | 5 | 12768224 | import torch
# Path or parameters for data
DATA_DIR = 'data'
SP_DIR = f'{DATA_DIR}/sp'
SRC_DIR = 'src'
TRG_DIR = 'trg'
SRC_RAW_DATA_NAME = 'raw_data.src'
TRG_RAW_DATA_NAME = 'raw_data.trg'
TRAIN_NAME = 'train.txt'
VALID_NAME = 'valid.txt'
TEST_NAME = 'test.txt'
# Parameters for sentencepiece tokenizer
pad_id = 0
sos_id = 1
eos_id = 2
unk_id = 3
src_model_prefix = 'src_sp'
trg_model_prefix = 'trg_sp'
sp_vocab_size = 16000
character_coverage = 1.0
model_type = 'unigram'
# Parameters for Transformer & training
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
learning_rate = 1e-4
batch_size = 80
seq_len = 200
num_heads = 8
num_layers = 6
d_model = 512
d_ff = 2048
d_k = d_model // num_heads
drop_out_rate = 0.1
num_epochs = 10
beam_size = 8
ckpt_dir = 'saved_model'
| 2.140625 | 2 |
function.py | gvangool/wapi | 1 | 12768225 | <reponame>gvangool/wapi
# -*- coding: utf-8 -*-
# Copyright (c) 2008 <NAME> <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Classes encapsulating Wapi functions into more abstracted containers"""
import re
from wapi.exceptions import ApiMissingParam
NAMESPACE_RE = re.compile('(.*)__.*?')
class ApiFunction(object):
"""Encapsulates a Wapi function"""
def __init__(self, func):
self.func = func
self.name = func.func_name
self.required_parameters = getattr(func, '_required_parameters_', [])
self.optional_parameters = getattr(func, '_optional_parameters_', [])
self.doc = func.__doc__
def __call__(self, request, dct):
for parameter in self.required_parameters:
parameter.get(request, dct)
for parameter in self.optional_parameters:
try:
parameter.get(request, dct)
except ApiMissingParam:
parameter.set_default(request, dct)
return self.func(request, dct)
@property
def requires_login(self):
"""Wheter the function requires a logged-in user"""
return hasattr(self.func, 'requires_login') and self.func.requires_login
@property
def endpoint(self):
"""Returns the function endpoint used by the RestBinding"""
return self.name.replace('__', '/')
@property
def is_read(self):
"""Wheter the function can be called as a read function"""
return not getattr(self.func, '_write_only_', False)
@property
def is_write(self):
"""Wheter the function can be called as a write function"""
return not getattr(self.func, '_read_only_', False)
@property
def documented(self):
"""Wheter the function should be documented"""
return not getattr(self.func, '_undocumented_', False)
def namespace(self):
"""Returns the namespace this function belongs to"""
match = NAMESPACE_RE.match(self.name)
if match:
return match.group(1)
return u''
class ApiNamespace(object):
"""Container grouping multiple functions into the same namespace"""
def __init__(self, name, short_name, functions):
self.name = name
self.short_name = short_name
self.functions = [f for f in functions if f.namespace() == short_name]
self.functions.sort(cmp=lambda x, y: cmp(x.name, y.name))
def __iter__(self):
return self.functions.__iter__()
| 1.90625 | 2 |
Project - Biometric Face Recognition Attendance/signup.py | Shakir-Ali/Attendance-System-Using-Face-Recognition | 1 | 12768226 | import numpy as np
import cv2
import pandas as pd
import face_recognition as fc
import time
import random as rd
import smtplib
import xlrd
fcc=0
v=cv2.VideoCapture(0)
fd=cv2.CascadeClassifier(r"C:\Users\HP\AppData\Local\Programs\Python\Python36\Lib\site-packages\cv2\data\haarcascade_frontalface_alt2.xml")
def cap():
ret,i=v.read()
j=cv2.cvtColor(i,cv2.COLOR_BGR2GRAY)
f=fd.detectMultiScale(j)
if len(f)==1:
for(x,y,w,h) in f:
image=i[y:y+h,x:x+w].copy()
fl=fc.face_locations(image)
fcl=fc.face_encodings(image,fl)
cv2.imshow('image',image)
k= cv2.waitKey(5)
return fcl
break
else:
print("Face not Detected")
def genotp():
ran=rd.random()
otp=ran*10000
return int(otp)
def enterdata():
name=input("Enter Name: ")
roll=input("Enter Roll No.: ")
number=int(input("Enter MObile Number: "))
email=input("Enter E-Mail: ")
print("Hold Still The Camera will initialize to detect your face in few seconds")
print("Name:",name,"\nRoll",roll,"\nNumber",number,"\nEmail",email)
time.sleep(2)
q=0
while(q!=1):
try:
fcc=cap()
if len(fcc) != 0:
print("Successfully Entered Data OTP is sent to your email")
q=1
return name,roll,number,email,fcc
except:
pass
def sendmail(email,otp):
server=smtplib.SMTP('smtp.gmail.com',587)
server.starttls()
server.login("Enter your mail","Enter your password")
msg="Subject: OTP is "+str(otp)+" \nWelcome to our Institute!\nTo Complete Registration Please Enter the following OTP:"+str(otp)+" \nThank you for enrolling with us."
server.sendmail("<NAME>",email,msg)
Data = pd.read_excel("Data.xlsx")
df = pd.DataFrame(Data)
Data1 = pd.read_excel("Attendance.xlsx")
df1 = pd.DataFrame(Data1)
name,roll,number,email,fcc = enterdata()
v.release()
otp=genotp()
sendmail(email,otp)
q=0
dataf=pd.DataFrame({"Name":[name],
"Roll":[roll],
"Number":[number],
"Email":[email],
"Encoding":list(fcc)})
dataf1=pd.DataFrame({'Name':[name],
'Email':[email]})
while(q!=1):
eotp=int(input("Enter OTP"))
if eotp==otp:
q=1
df=df.append(dataf,ignore_index=True,sort=False)
df.to_excel("Data.xlsx",index=False)
df1=df1.append(dataf1,ignore_index=True,sort=False)
df1.to_excel("Attendance.xlsx",index=False)
print("Success")
else :
print("Re-Enter OTP")
| 3.25 | 3 |
main/forms.py | drx/archfinch | 1 | 12768227 | <reponame>drx/archfinch
from django.core.urlresolvers import reverse
from django.template.defaultfilters import slugify
from django.utils.http import int_to_base36
from django.contrib.formtools.wizard import FormWizard
from django import forms
from django.conf import settings
from archfinch.main import tasks
from archfinch.main.models import Item, ItemProfile, Category
from archfinch.links.models import Link
from archfinch.links.scraper import scrape, generate_thumbnail
from archfinch.utils.spam import AntiSpamModelForm
from django.shortcuts import redirect
class AddItemForm1(forms.ModelForm):
category = forms.ModelChoiceField(queryset=Category.objects.filter(hide=False))
class Meta:
model = Item
fields = ('name', 'category')
class AddLinkForm1(forms.ModelForm):
name = forms.CharField(max_length=1000, label='Title', widget=forms.TextInput(attrs={'size': '40'}))
url = forms.CharField(max_length=1000, label='URL', widget=forms.TextInput(attrs={'size': '40'}))
class Meta:
model = Link
fields = ('name', 'url')
class AddItemForm2(forms.Form):
pass
class AddItemWizard(FormWizard):
def done(self, request, form_list):
item = form_list[0]
try:
url = item.cleaned_data['url']
existing_items = Item.objects.filter(link__url=url)
if existing_items:
return redirect(existing_items[0].get_absolute_url())
except KeyError:
pass
item = item.save(commit=False)
item.submitter = request.user
item.get_meta_data()
item.save()
request.user.add_points(10)
if self.model.__name__ == 'Link':
tasks.opinion_set.delay(request.user, item, 4)
return redirect(item.get_absolute_url())
def get_template(self, step):
return 'main/additem.html'
def process_step(self, request, form, step):
if step != 0:
return
if form.is_valid():
if self.model.__name__ == 'Link':
potential_conflicts = Item.objects.none()
else:
potential_conflicts = Item.search.query('"'+form.cleaned_data['name']+'"').filter(category_id=form.cleaned_data['category'].id)
if potential_conflicts.count() > 0:
potential_conflicts = potential_conflicts[0:100]
self.extra_context = locals()
else:
self.form_list.remove(AddItemForm2)
def parse_params(self, request, *args, **kwargs):
self.model = kwargs['model']
self.extra_context['model'] = self.model.__name__
| 1.960938 | 2 |
judgments/middleware.py | nationalarchives/ds-caselaw-public-ui | 0 | 12768228 | import json
from urllib.parse import unquote
from django.http import HttpRequest
from django.template.response import TemplateResponse
class CookieConsentMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def __call__(self, request):
return self.get_response(request)
def process_template_response(
self, request: HttpRequest, response: TemplateResponse
) -> TemplateResponse:
response.context_data["showGTM"] = False
cookie_policy = request.COOKIES.get("cookies_policy", None)
dont_show_cookie_notice = request.COOKIES.get("dontShowCookieNotice", None)
if cookie_policy:
decoder = json.JSONDecoder()
decoded = decoder.decode(unquote(cookie_policy))
response.context_data["showGTM"] = decoded["usage"] or False
if dont_show_cookie_notice:
if dont_show_cookie_notice == "true":
response.context_data["dontShowCookieNotice"] = True
return response
| 2.21875 | 2 |
src/decent.py | cloudzfy/pychallenge | 3 | 12768229 | # Email received from Leopold Mozart:
#
# From: "<NAME>" <<EMAIL>>
# Date: Thu, 1 Sep 2016 01:39:31 -0700
# Message-ID: <<EMAIL>>
# Subject: Re: my broken zip Re: sorry
# MIME-Version: 1.0
# Content-Type: text/plain; charset=UTF-8
# Content-Transfer-Encoding: 7bit
# Content-Disposition: inline
# Precedence: bulk
# X-Autoreply: yes
# Auto-Submitted: auto-replied
#
# Never mind that.
#
# Have you found my broken zip?
#
# md5: bbb8b499a0eef99b52c7f13f4e78c24b
#
# Can you believe what one mistake can lead to?
import md5
import zipfile
import StringIO
import Image
file = open('mybroken.zip')
src = file.read()
file.close()
for i in range(len(src)):
for j in range(256):
changed = src[:i] + chr(j) + src[i+1:]
if md5.md5(changed).hexdigest() == 'bbb8b499a0eef99b52c7f13f4e78c24b':
src = changed
break
z = zipfile.ZipFile(StringIO.StringIO(src))
Image.open(StringIO.StringIO(z.read('mybroken.gif'))).show()
| 2.65625 | 3 |
QUANTAXIS_Monitor_GUI/TasksByThreading/QA_Gui_DateFetch_Task.py | simplezhang57/QUANTAXIS | 2 | 12768230 |
import time
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from PyQt5 import QtCore
from QUANTAXIS.QASU.save_tdx import (QA_SU_save_stock_day,
QA_SU_save_stock_week,
QA_SU_save_stock_month,
QA_SU_save_stock_year,
QA_SU_save_stock_xdxr,
QA_SU_save_stock_min,
QA_SU_save_index_day,
QA_SU_save_index_min,
QA_SU_save_etf_day,
QA_SU_save_etf_min,
QA_SU_save_stock_list,
QA_SU_save_stock_block,
QA_SU_save_stock_info,
QA_SU_save_stock_transaction,
QA_SU_save_option_day)
from QUANTAXIS.QAUtil import DATABASE
'''
https://martinfitzpatrick.name/article/multithreading-pyqt-applications-with-qthreadpool/
QThread
'''
class QA_GUI_Date_Fetch_Task(QThread):
#todo fix here 不会执行 __init__的 QThread 是一个很特别的对象。
#
#def __int__(self, qParentWidget):
# 初始化函数,默认
# super(QA_GUI_Date_Fetch_Task, self).__init__()
# self.qParentWidget = qParentWidget;
# abstract method, 线程工作的地方
def run(self):
pass
# 定义一个信号, 更新任务进度
trigger_new_log = pyqtSignal(str)
trigger_new_progress = pyqtSignal(int)
trigger_start_task_begin = pyqtSignal(str)
trigger_start_task_done = pyqtSignal(str)
#abstract method ?
def connectSignalSlot(self):
self.trigger_new_log.connect(self.updateLogTriggerHandler)
self.trigger_new_progress.connect(self.updateProgressTriggerHandler)
self.trigger_start_task_begin.connect(self.startTaskTriggerHandler)
self.trigger_start_task_done.connect(self.doneTaskTriggerHandler)
def setLoggingUIWidget(self, logDisplay):
self.logDisplay = logDisplay
def setProgressUIWidget(self, qProgressBar):
self.qProgressBar = qProgressBar
def setCheckboxUIWidget(self, qCheckBox):
self.qCheckBox = qCheckBox
#abstract method
def changeRunningTaskColor0(self, qColor=None):
palette = self.qCheckBox.palette()
if qColor == None:
palette.setColor(QPalette.Active, QPalette.WindowText, Qt.black)
else:
palette.setColor(QPalette.Active, QPalette.WindowText, qColor)
self.qCheckBox.setPalette(palette)
pass
#abstract method
def updateLogTriggerHandler(self):
pass
#abstract method
def updateProgressTriggerHandler(self):
pass
#abstract method
def startTaskTriggerHandler(self):
pass
#abstract method
def doneTaskTriggerHandler(self):
pass
class QA_GUI_DateFetch_SU_job01_stock_day(QA_GUI_Date_Fetch_Task):
# todo fix here 不会执行 __init__的 QThread 是一个很特别的对象。
#def __int__(self, qParentWidget):
#super(QA_GUI_DateFetch_SU_job01_stock_day, self).__init__()
#self.qCheckBox = qParentWidget.qCheckBoxJob01_save_stock_day
#self.qProgressBar = qParentWidget.qProgressJob01_save_stock_day;
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.black)
pass
def updateLogTriggerHandler(self, log):
#print("append task log emite triggered!", log);
#self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_day")
self.logDisplay.setRowCount(rowCount+1)
self.logDisplay.setItem(rowCount,0,newItem1)
self.logDisplay.setItem(rowCount,1,newItem2)
#self.logDisplay.scrollToBottom()
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
# thread is working here
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_stock_day(client=DATABASE, ui_log=self.trigger_new_log, ui_progress= self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
###################################################################################################################\
class QA_GUI_DateFetch_SU_job01_stock_week(QA_GUI_Date_Fetch_Task):
# 🛠todo fix here 不会执行 __init__的 QThread 是一个很特别的对象。
# def __int__(self):
# # 初始化函数,默认
# super(QA_GUI_DateFetch_SU_job01_stock_week, self).__init__()
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.yellow)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_week")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
#self.logDisplay.scrollToBottom()
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_stock_week(client=DATABASE, ui_log= self.trigger_new_log, ui_progress= self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
###################################################################################################################\
class QA_GUI_DateFetch_SU_job01_stock_month(QA_GUI_Date_Fetch_Task):
# todo fix here 不会执行 __init__的 QThread 是一个很特别的对象。
# def __int__(self):
# # 初始化函数,默认
# super(QA_GUI_DateFetch_SU_job01_stock_month, self).__init__()
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.blue)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_month")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
#self.logDisplay.scrollToBottom()
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_stock_month(client=DATABASE, ui_log= self.trigger_new_log, ui_progress= self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
###################################################################################################################\
class QA_GUI_DateFetch_SU_job01_stock_year(QA_GUI_Date_Fetch_Task):
# todo fix here 不会执行 __init__的 QThread 是一个很特别的对象。
# def __int__(self):
# # # 初始化函数,默认
# super(QA_GUI_DateFetch_SU_job01_stock_year, self).__init__()
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.magenta)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_year")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
#self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_stock_year(client=DATABASE, ui_log= self.trigger_new_log, ui_progress= self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
###################################################################################################################\
class QA_GUI_DateFetch_SU_job02_stock_xdxr(QA_GUI_Date_Fetch_Task):
# todo fix here 不会执行 __init__的 QThread 是一个很特别的对象。
# def _init_(self):
# # 初始化函数,默认
# super(QA_GUI_DateFetch_SU_job02_stock_xdxr, self).__init__()
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_xdxr")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
#self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_stock_xdxr(client=DATABASE, ui_log= self.trigger_new_log, ui_progress= self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
###################################################################################################################\
class QA_GUI_DateFetch_SU_job03_stock_min(QA_GUI_Date_Fetch_Task):
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_min")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
#self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_stock_min(client=DATABASE, ui_log= self.trigger_new_log, ui_progress= self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
###################################################################################################################
class QA_GUI_DateFetch_SU_job04_index_day(QA_GUI_Date_Fetch_Task):
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_index_day")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
# self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_index_day(client=DATABASE, ui_log=self.trigger_new_log, ui_progress=self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
###################################################################################################################
class QA_GUI_DateFetch_SU_job05_index_min(QA_GUI_Date_Fetch_Task):
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_index_min")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
# self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_index_min(client=DATABASE, ui_log=self.trigger_new_log, ui_progress=self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
###################################################################################################################
class QA_GUI_DateFetch_SU_job06_etf_day(QA_GUI_Date_Fetch_Task):
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_etf_day")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
# self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_etf_day(client=DATABASE, ui_log=self.trigger_new_log, ui_progress=self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
###################################################################################################################
class QA_GUI_DateFetch_SU_job07_etf_min(QA_GUI_Date_Fetch_Task):
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_etf_min")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
# self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_etf_min(client=DATABASE, ui_log=self.trigger_new_log, ui_progress=self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
###################################################################################################################
class QA_GUI_DateFetch_SU_job08_stock_list(QA_GUI_Date_Fetch_Task):
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_list")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
# self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_stock_list(client=DATABASE, ui_log=self.trigger_new_log, ui_progress=self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
class QA_GUI_DateFetch_SU_job09_stock_block(QA_GUI_Date_Fetch_Task):
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_list")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
# self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_stock_block(client=DATABASE, ui_log=self.trigger_new_log, ui_progress=self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
class QA_GUI_DateFetch_SU_job10_stock_info(QA_GUI_Date_Fetch_Task):
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_list")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
# self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_stock_info(client=DATABASE, ui_log=self.trigger_new_log, ui_progress=self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
class QA_GUI_DateFetch_SU_job11_stock_transaction(QA_GUI_Date_Fetch_Task):
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_transaction")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
# self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_stock_transaction(client=DATABASE, ui_log=self.trigger_new_log, ui_progress=self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
class QA_GUI_DateFetch_SU_job12_option_day(QA_GUI_Date_Fetch_Task):
def startTaskTriggerHandler(self, info_str):
self.changeRunningTaskColor0(QtCore.Qt.red)
pass
def doneTaskTriggerHandler(self, info_str):
#
self.changeRunningTaskColor0(QtCore.Qt.green)
pass
def updateProgressTriggerHandler(self, progress):
# print('update task progress ', progress);
self.qProgressBar.setValue(progress)
pass
def updateLogTriggerHandler(self, log):
# print("append task log emite triggered!", log);
# self.logDisplay.append(log)
if log and log.strip():
rowCount = self.logDisplay.rowCount()
newItem1 = QTableWidgetItem(log)
newItem2 = QTableWidgetItem("QA_SU_save_stock_transaction")
self.logDisplay.setRowCount(rowCount + 1)
self.logDisplay.setItem(rowCount, 0, newItem1)
self.logDisplay.setItem(rowCount, 1, newItem2)
# self.logDisplay.scrollToBottom()
pass
def run(self):
self.trigger_start_task_begin.emit("begin")
QA_SU_save_option_day(client=DATABASE, ui_log=self.trigger_new_log, ui_progress=self.trigger_new_progress)
self.trigger_start_task_done.emit("end")
pass
#通达信pytdx 会输出消息, 一同输出到gui界面只能够
class EmittingStream(QtCore.QObject):
textWritten = QtCore.pyqtSignal(str) # 定义一个发送str的信号
def write(self, text):
self.textWritten.emit(str(text))
class QA_GUI_Selected_TaskQueue(QThread):
# QThread 继承的不执行__init__
#def __int__(self, logDisplay):
# 奇怪的问题, 不执行 __init__
# 初始化函数,默认
# super().__init__()
#sfassda
#print("run here")
#exit(0)
#self.logDisplay = logDisplay
#sys.stderr.textWritten.connect(self.outputWrittenStderr)
# 下面将print 系统输出重定向到textEdit中
#sys.stdout = EmittingStream()
#sys.stderr = EmittingStream()
# 接收信号str的信号槽
'''
def outputWrittenStdout(self, text):
cursor = self.logDisplay.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.insertText(text)
self.logDisplay.setTextCursor(cursor)
self.logDisplay.ensureCursorVisible()
def outputWrittenStderr(self, text):
cursor = self.logDisplay.textCursor()
cursor.movePosition(QtGui.QTextCursor.End)
cursor.insertText(text)
self.logDisplay.setTextCursor(cursor)
self.logDisplay.ensureCursorVisible()
'''
# 定义一个信号,
trigger_all_task_start = pyqtSignal(str)
trigger_all_task_done = pyqtSignal(str)
#定义任务(每个是一个线程)
QA_GUI_Task_List = []
def run(self):
self.trigger_all_task_start.emit('all_task_start')
for iSubTask in self.QA_GUI_Task_List:
iSubTask.start()
# wait finish iSubTask
while (iSubTask.isRunning()):
time.sleep(1)
self.trigger_all_task_done.emit('all_task_done')
def putTask(self, subTask):
self.QA_GUI_Task_List.append(subTask)
def clearTask(self):
self.QA_GUI_Task_List.clear() | 2.34375 | 2 |
richter/ml.py | bpptkg/bpptkg-richter | 3 | 12768231 | <filename>richter/ml.py
"""
Utility module for computing Richter magnitude scales, i.e. ML or local
magnitude on BPPTKG seismic network.
"""
import numpy as np
from . import paz
def filter_stream(stream, **kwargs):
"""
Filter ObsPy stream object.
:param stream: ObsPy waveform stream object.
:type stream: :class:`obspy.core.stream.Stream`
Example:
.. code-block:: python
from richter import filter_stream
from obspy import read
stream = read('/path/to/stream.msd')
print(stream)
filtered_stream = filter_stream(
stream, network='VG', station='MEPAS', component='Z')
print(filtered_stream)
"""
filtered_stream = stream.copy().select(**kwargs)
if filtered_stream.count() > 1:
filtered_stream.merge(method=1, fill_value='interpolate')
return filtered_stream
def compute_bpptkg_ml(wa_ampl):
"""
Compute BPPTKG Richter magnitude scales using Wood-Anderson amplitude.
Note that Wood Anderson zero to peak amplitude (wa_ampl) is in mm.
Calibration function log10(A0) for BPPTKG seismic network is -1.4.
:param wa_ampl: Wood-Anderson zero to peak amplitude in mili-meter.
:type wa_ampl: float
:return: BPPTKG Richter magnitude scale.
:rtype: float
Richter magnitude scale is computed using the following equation: ::
ml = log10(wa_ampl) - log10(A0)
where log10(A0) equal to -1.4 and ml is Richter local magnitude scale.
Example:
.. code-block:: python
from richter import compute_bpptkg_ml
# Wood Anderson zero to peak amplitude in mm
wa_ampl = 5
ml = compute_bpptkg_ml(wa_ampl)
print(ml)
"""
return np.log10(wa_ampl) + 1.4
def compute_wa(stream, station, network='VG', component='Z', **kwargs):
"""
Compute stream Wood-Anderson amplitude in meter.
:param stream: ObsPy waveform stream object.
:type stream: :class:`obspy.core.stream.Stream`
:param station: Seismic station name, e.g. MEPAS, MEGRA, etc.
:type station: str
:param network: Seismic network name, default to VG.
:type network: str
:param component: Seismic station component, e.g E, N, Z, default to Z.
:type component: str
:return: Wood-Anderson zero to peak amplitude in meter.
:rtype: float
Exampe:
.. code-block:: python
from richter import compute_wa
from obspy import read
stream = read('/path/to/stream.msd')
wa_ampl = compute_wa(stream, 'MEPAS', component='Z')
print(wa_ampl)
"""
filtered_stream = filter_stream(stream, station=station, network=network,
component=component, **kwargs)
if not filtered_stream:
return None
filtered_stream.simulate(paz_remove=paz.get_paz(station, component),
paz_simulate=paz.PAZ['WOOD_ANDERSON'],
water_level=0.0)
wa_ampl = np.max(np.abs(filtered_stream[0].data))
return wa_ampl
def compute_ml(stream, station, network='VG', component='Z', **kwargs):
"""
Compute Richter magnitude scales.
:param stream: ObsPy waveform stream object.
:type stream: :class:`obspy.core.stream.Stream`
:param station: Seismic station name, e.g. MEPAS, MEGRA, etc.
:type station: str
:param network: Seismic network name, default to VG.
:type network: str
:param component: Seismic station component, e.g E, N, Z, default to Z.
:type component: str
:return: BPPTKG Richter magnitude scale.
:rtype: float
Example:
.. code-block:: python
from richter import compute_ml
from obspy import read
stream = read('/path/to/stream.msd')
ml = compute_ml(stream, 'MEPAS', component='Z')
print(ml)
"""
wa_ampl = compute_wa(stream, station, network=network,
component=component, **kwargs)
if not wa_ampl:
return None
# Convert WA amplitude from meter to mili-meter
richter_ml = compute_bpptkg_ml(wa_ampl * 1000)
return richter_ml
def compute_analog_ml(p2p_amplitude):
"""
Compute Richter magnitude scales from seismic analog peak-to-peak amplitude.
The peak-to-peak value must be obtained from DEL (Deles) analog station and
in mm unit.
:param p2p_amplitude: Peak-to-peak amplitude in mm unit.
:type p2p_amplitude: float
:return: BPPTKG Richter magnitude scale.
:rtype: float
Example:
.. code-block:: python
from richter import compute_analog_ml
p2p_amplitude = 50
ml = compute_analog_ml(p2p_amplitude)
print(ml)
"""
# k1, k2, and k3 is correction factors that map DEL amplitude scale to PUS
# amplitude scale.
k1 = 2800 / (0.13 * 27000)
k2 = 20.0 / 50.0
k3 = 3981.0 / 7943.0
ampl = p2p_amplitude / 2.0
return compute_bpptkg_ml(k1 * k2 * k3 * ampl)
def compute_app(stream, station, network='VG', component='Z', **kwargs):
"""
Compute stream amplitude peak to peak.
:param stream: ObsPy waveform stream object.
:type stream: :class:`obspy.core.stream.Stream`
:param station: Seismic station name, e.g. MEPAS, MEGRA, etc.
:type station: str
:param network: Seismic network name, default to VG.
:type network: str
:param component: Seismic station component, e.g E, N, Z, default to Z.
:type component: str
:return: Stream amplitude peak to peak.
:rtype: int
Example:
.. code-block:: python
from richter import compute_app
from obspy import stream
stream = read('/path/to/stream.msd')
app = compute_app(stream, 'MEPAS', component='Z')
print(app)
"""
filtered_stream = filter_stream(stream, station=station, network=network,
component=component, **kwargs)
if not filtered_stream:
return None
app = np.abs(np.min(filtered_stream[0].data)) + \
np.abs(np.max(filtered_stream[0].data))
return app
def compute_seismic_energy(m):
"""
Compute seismic energy using Gutenberg-Richter equation.
.. math::
log E = 11.8 + 1.5M
where :math:`M` is Richter local magnitude and :math:`E` is energy in ergs.
:param m: Richter local magnitude.
:type m: float
:return: Seismic energy in factor of :math:`10^{12}` ergs.
:rtype: float
Example:
.. code-block:: python
from richter import compute_seismic_energy
ml = 1.5
energy = compute_seismic_energy(ml)
print(energy)
"""
return 10**(11.8 + 1.5 * m) / 10**12
def compute_seismic_energy_from_stream(stream, station, network='VG',
component='Z', **kwargs):
"""
Compute seismic energy using Gutenberg-Richter equation using stream as
input.
Seismic energy is computed using the following equation:
.. math::
log E = 11.8 + 1.5M
where :math:`M` is Richter local magnitude and :math:`E` is energy in ergs.
:param stream: ObsPy waveform stream object.
:type stream: :class:`obspy.core.stream.Stream`
:param station: Seismic station name, e.g. MEPAS, MEGRA, etc.
:type station: str
:param network: Seismic network name, default to VG.
:type network: str
:param component: Seismic station component, e.g E, N, Z, default to Z.
:type component: str
:return: Seismic energy in factor of :math:`10^{12}` ergs.
:rtype: float
Example:
.. code-block:: python
from richter import compute_seismic_energy_from_stream
from obspy import read
stream = read('/path/to/stream.msd')
energy = compute_energy_from_stream(stream, 'MEPAS', component='Z')
print(energy)
"""
ml = compute_ml(stream, station, network=network,
component=component, **kwargs)
if ml is None:
return None
return compute_seismic_energy(ml)
| 2.921875 | 3 |
tests-deprecating/milvus_benchmark/milvus_benchmark/env/local.py | CyberFlameGO/milvus | 10,504 | 12768232 | <reponame>CyberFlameGO/milvus<filename>tests-deprecating/milvus_benchmark/milvus_benchmark/env/local.py
import logging
from milvus_benchmark.env.base import BaseEnv
logger = logging.getLogger("milvus_benchmark.env.local")
class LocalEnv(BaseEnv):
"""docker env class wrapper"""
env_mode = "local"
def __init__(self, deploy_mode=None):
super(LocalEnv, self).__init__(deploy_mode)
def start_up(self, hostname, port):
res = True
try:
self.set_hostname(hostname)
except Exception as e:
logger.error(str(e))
res = False
return res
| 1.921875 | 2 |
server/headless_client.py | LeonardoPohl/DPS-2-Nitocris | 0 | 12768233 | import base64
import json
import pickle
from sys import argv
from time import sleep
import websocket
map_function = None
socket: websocket.WebSocketApp = None
def set_map_function(code: str):
global map_function
map_function = pickle.loads(base64.b64decode(code))
def execute_map(data):
decoded_data = pickle.loads(base64.b64decode(data))
result = map_function(decoded_data)
encoded_result = base64.b64encode(pickle.dumps(result)).decode("utf-8")
socket.send(json.dumps({"type": "result", "value": encoded_result}))
def handle_message(ws, message):
message = json.loads(message)
{"function": set_map_function, "data": execute_map,}[
message["type"]
](message["value"])
def wait_and_start_websocket(*_):
sleep(1)
start_websocket()
def on_open(ws):
ws.send(json.dumps({"type": "ready", "value": None}))
def start_websocket():
global socket
socket = websocket.WebSocketApp(
websocket_url,
on_open=on_open,
on_message=handle_message,
on_close=wait_and_start_websocket,
)
socket.run_forever()
if __name__ == "__main__":
_, websocket_url = argv
start_websocket()
| 2.796875 | 3 |
setup.py | WqyJh/winpexpect | 1 | 12768234 | #
# This file is part of WinPexpect. WinPexpect is free software that is made
# available under the MIT license. Consult the file "LICENSE" that is
# distributed together with this file for the exact licensing terms.
#
# WinPexpect is copyright (c) 2008-2010 by the WinPexpect authors. See the
# file "AUTHORS" for a complete overview.
import sys
from setuptools import setup
if sys.version_info[0] == 3:
from lib2to3.fixes import fix_types
fix_types._TYPE_MAPPING['StringTypes'] = '(str,)'
setup(
name = 'winpexpect',
version = '1.6',
description = 'A version of pexpect that works under Windows.',
author = '<NAME>, <NAME>',
author_email = '<EMAIL>, <EMAIL>',
url = 'https://bitbucket.org/weyou/winpexpect',
license = 'MIT',
classifiers = ['Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python',
'Operating System :: Microsoft :: Windows'],
package_dir = {'': 'lib'},
py_modules = ['pexpect', 'winpexpect', 'expectstub'],
test_suite = 'nose.collector',
install_requires = ['pywin32 >= 214'],
zip_safe = False,
use_2to3 = True
)
| 1.203125 | 1 |
clusteringattacks.py | LamiaGharbi/Intelligent-IDS- | 0 | 12768235 | import warnings
import numpy as np
import pandas as pd
from matplotlib import patches
from sklearn.cluster import DBSCAN
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
import matplotlib.pyplot as plt
import concurrent.futures
import time
from pylab import bone, pcolor, colorbar, plot, show, rcParams, savefig
from hyperopt import fmin, hp, tpe, Trials, STATUS_OK
warnings.filterwarnings("ignore")
dataset_train=pd.read_csv('kdd_train.csv')
dataset_test=pd.read_csv('kdd_test.csv')
col_names = ["duration","protocol_type","service","flag","src_bytes",
"dst_bytes","land","wrong_fragment","urgent","hot","num_failed_logins",
"logged_in","num_compromised","root_shell","su_attempted","num_root",
"num_file_creations","num_shells","num_access_files","num_outbound_cmds",
"is_host_login","is_guest_login","count","srv_count","serror_rate",
"srv_serror_rate","rerror_rate","srv_rerror_rate","same_srv_rate",
"diff_srv_rate","srv_diff_host_rate","dst_host_count","dst_host_srv_count",
"dst_host_same_srv_rate","dst_host_diff_srv_rate","dst_host_same_src_port_rate",
"dst_host_srv_diff_host_rate","dst_host_serror_rate","dst_host_srv_serror_rate",
"dst_host_rerror_rate","dst_host_srv_rerror_rate","label"]
print("Shape of Training Dataset:", dataset_train.shape)
print("Shape of Testing Dataset:", dataset_test.shape)
# # Assigning attribute name to dataset
dataset_train = pd.read_csv("kdd_train.csv", header=None, names = col_names)
dataset_test = pd.read_csv("kdd_test.csv", header=None, names = col_names)
# #label distribution of Training set and testing set
print('Label distribution Training set:')
print(dataset_train['label'].value_counts())
print()
print('Label distribution Test set:')
print(dataset_test['label'].value_counts())
# # colums that are categorical and not binary yet: protocol_type (column 2), service (column 3), flag (column 4).
# # explore categorical features
print('Training set:')
for col_name in dataset_train.columns:
if dataset_train[col_name].dtypes == 'object' :
unique_cat = len(dataset_train[col_name].unique())
print("Feature '{col_name}' has {unique_cat} categories".format(col_name=col_name, unique_cat=unique_cat))
# #see how distributed the feature service is, it is evenly distributed and therefore we need to make dummies for all.
# print()
print('Distribution of categories in service:')
print(dataset_train['service'].value_counts().sort_values(ascending=False).head())
# # Test set
print('Test set:')
for col_name in dataset_test.columns:
if dataset_test[col_name].dtypes == 'object' :
unique_cat = len(dataset_test[col_name].unique())
print("Feature '{col_name}' has {unique_cat} categories".format(col_name=col_name, unique_cat=unique_cat))
#categorical_columns=['protocol_type', 'service', 'flag']
# # insert code to get a list of categorical columns into a variable, categorical_columns
categorical_columns=['protocol_type', 'service', 'flag']
# # Get the categorical values into a 2D numpy array
dataset_train_categorical_values = dataset_train[categorical_columns]
dataset_test_categorical_values = dataset_test[categorical_columns]
dataset_train_categorical_values.head()
# # protocol type
unique_protocol=sorted(dataset_train.protocol_type.unique())
string1 = 'Protocol_type_'
unique_protocol2=[string1 + x for x in unique_protocol]
# # service
unique_service=sorted(dataset_train.service.unique())
string2 = 'service_'
unique_service2=[string2 + x for x in unique_service]
# # flag
unique_flag=sorted(dataset_train.flag.unique())
string3 = 'flag_'
unique_flag2=[string3 + x for x in unique_flag]
# # put together
dumcols=unique_protocol2 + unique_service2 + unique_flag2
print(dumcols)
# #do same for test set
unique_service_test=sorted(dataset_test.service.unique())
unique_service2_test=[string2 + x for x in unique_service_test]
testdumcols=unique_protocol2 + unique_service2_test + unique_flag2
# #Transform categorical features into numbers using LabelEncoder()
dataset_train_categorical_values_enc=dataset_train_categorical_values.apply(LabelEncoder().fit_transform)
print(dataset_train_categorical_values_enc.head())
# # test set
dataset_test_categorical_values_enc=dataset_test_categorical_values.apply(LabelEncoder().fit_transform)
# #One-Hot-Encoding¶
enc = OneHotEncoder()
dataset_train_categorical_values_encenc = enc.fit_transform(dataset_train_categorical_values_enc)
dataset_train_cat_data = pd.DataFrame(dataset_train_categorical_values_encenc.toarray(),columns=dumcols)
# # test set
dataset_test_categorical_values_encenc = enc.fit_transform(dataset_test_categorical_values_enc)
dataset_test_cat_data = pd.DataFrame(dataset_test_categorical_values_encenc.toarray(),columns=testdumcols)
dataset_train_cat_data.head()
trainservice=dataset_train['service'].tolist()
testservice= dataset_test['service'].tolist()
difference=list(set(trainservice) - set(testservice))
string = 'service_'
difference=[string + x for x in difference]
print(difference)
for col in difference:
dataset_test_cat_data[col] = 0
print(dataset_test_cat_data.shape)
# #Join encoded categorical dataframe with the non-categorical dataframe
newdf=dataset_train.join(dataset_train_cat_data)
newdf.drop('flag', axis=1, inplace=True)
newdf.drop('protocol_type', axis=1, inplace=True)
newdf.drop('service', axis=1, inplace=True)
# # test data
newdf_test=dataset_test.join(dataset_test_cat_data)
newdf_test.drop('flag', axis=1, inplace=True)
newdf_test.drop('protocol_type', axis=1, inplace=True)
newdf_test.drop('service', axis=1, inplace=True)
print(newdf.shape)
print(newdf_test.shape)
# # take label column
labeldf=newdf['label']
labeldf_test=newdf_test['label']
# # change the label column
newlabeldf=labeldf.replace({ 'neptune' : 1 ,'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1,'mailbomb': 1, 'apache2': 1, 'processtable': 1, 'udpstorm': 1, 'worm': 1,
'ipsweep' : 2,'nmap' : 2,'portsweep' : 2,'satan' : 2,'mscan' : 2,'saint' : 2
,'ftp_write': 3,'guess_passwd': 3,'imap': 3,'multihop': 3,'phf': 3,'spy': 3,'warezclient': 3,'warezmaster': 3,'sendmail': 3,'named': 3,'snmpgetattack': 3,'snmpguess': 3,'xlock': 3,'xsnoop': 3,'httptunnel': 3,
'buffer_overflow': 4,'loadmodule': 4,'perl': 4,'rootkit': 4,'ps': 4,'sqlattack': 4,'xterm': 4})
newlabeldf_test=labeldf_test.replace({ 'neptune' : 1 ,'back': 1, 'land': 1, 'pod': 1, 'smurf': 1, 'teardrop': 1,'mailbomb': 1, 'apache2': 1, 'processtable': 1, 'udpstorm': 1, 'worm': 1,
'ipsweep' : 2,'nmap' : 2,'portsweep' : 2,'satan' : 2,'mscan' : 2,'saint' : 2
,'ftp_write': 3,'guess_passwd': 3,'imap': 3,'multihop': 3,'phf': 3,'spy': 3,'warezclient': 3,'warezmaster': 3,'sendmail': 3,'named': 3,'snmpgetattack': 3,'snmpguess': 3,'xlock': 3,'xsnoop': 3,'httptunnel': 3,
'buffer_overflow': 4,'loadmodule': 4,'perl': 4,'rootkit': 4,'ps': 4,'sqlattack': 4,'xterm': 4})
# # put the new label column back
newdf['label'] = newlabeldf
newdf_test['label'] = newlabeldf_test
y_train= newdf['label']
y_test= newdf_test['label']
import csv
with open('newdataset/labeltrain.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
#for row in rows:
spamwriter.writerows(map(lambda x: [x], y_train))
with open('newdataset/labeltest.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
#for row in rows:
spamwriter.writerows(map(lambda x: [x], y_test))
X_train=newdf.drop('label',axis='columns')
X_train = X_train[1:] #take the data less the header row
for i in range(len(X_train)):
with open('newdataset/train.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
#for row in rows:
spamwriter.writerow(X_train.iloc[i])
new_header = X_train.iloc[0]
X_train.columns = new_header
X_test=newdf_test.drop('label',axis='columns')
X_test = X_test[1:] #take the data less the header row
for i in range(len(X_test)):
with open('newdataset/test.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile, delimiter=',')
#for row in rows:
spamwriter.writerow(X_test.iloc[i])
X_test.columns = new_header
new_header = X_test.iloc[0]
X_test.columns = new_header
| 1.804688 | 2 |
sources/kuzzle/firmware/firmware.py | etrousset/kuzzle-iot-board | 1 | 12768236 | #!/usr/bin/python3
import signal
import RPi.GPIO as GPIO
import logging
import coloredlogs
import sys
sys.path.append("..")
import argparse
import ruamel.yaml as YAML
import time
import threading
import asyncio
from neopixeldevice import NeopixelDevice, LED_PIN, LightMode, ws as ws_
from utils import *
from pn532 import Pn532
from kuzzle.kuzzle import KuzzleIOT
import namedtupled
yaml = YAML.YAML()
CONFIG_PATH = '../config'
log = logging.getLogger('MAIN')
UID = None
devices = {}
pn532 = None
neo = None
# @formatter: off
default_state = {
"mode": LightMode.COLOR_RAMP.value,
"ramp": [
(255, 0, 0),
(127, 127, 0),
(0, 255, 0),
(0, 127, 127),
(0, 0, 255),
(127, 0, 127),
(255, 127, 0),
(255, 255, 255),
]
}
# @formatter:on
GPIO.setmode(GPIO.BCM)
buttons = {
"button_0": "RELEASED",
"button_1": "RELEASED",
"button_2": "RELEASED",
"button_3": "RELEASED",
}
def init_hw_components(fw_config, hw_config):
global devices
global pn532
global pi
global UID
global neo
kuzzle_cfg = fw_config.kuzzle
dev_conn = () # devices to connect
UID = rpi_get_serial()
log.info('Getting device base UID: %s', UID)
log.info('Connecting to Kuzzle on {}:{}'.format(kuzzle_cfg.host, kuzzle_cfg.port))
log.debug("Neopixel: led_count = {}".format(hw_config.rgb_light.led_count))
neo = NeopixelDevice(hw_config.rgb_light.led_count, LED_PIN, strip_type=ws_.WS2811_STRIP_GRB)
devices["kuzzle_neo"] = KuzzleIOT(
'rgb_light_{}'.format(UID),
'neopixel-linear',
host=kuzzle_cfg.host,
port=kuzzle_cfg.port,
owner=fw_config.device.owner,
additional_info={'led_count': hw_config.rgb_light.led_count}
)
dev_conn += (devices["kuzzle_neo"].connect(neo.on_kuzzle_connected),)
devices["kuzzle_rfid"] = KuzzleIOT(
"NFC_" + UID,
"RFID_reader",
host=kuzzle_cfg.host,
port=kuzzle_cfg.port,
owner=fw_config.device.owner
)
dev_conn += (devices["kuzzle_rfid"].connect(None),)
if hw_config.motion_sensor.enabled:
devices["kuzzle_motion"] = KuzzleIOT(
"motion_" + UID,
"motion-sensor",
host=kuzzle_cfg.host,
port=kuzzle_cfg.port,
owner=fw_config.device.owner
)
dev_conn += (devices["kuzzle_motion"].connect(None),)
if hw_config.buttons.enabled:
devices["kuzzle_buttons"] = KuzzleIOT(
"buttons_{}".format(UID),
"button",
host=kuzzle_cfg.host,
port=kuzzle_cfg.port,
owner=fw_config.device.owner
)
dev_conn += (devices["kuzzle_buttons"].connect(None),)
devices["kuzzle_light"] = KuzzleIOT(
"light_lvl_{}".format(UID),
"light_sensor",
host=kuzzle_cfg.host,
port=kuzzle_cfg.port,
owner=fw_config.device.owner
)
dev_conn += (devices["kuzzle_light"].connect(None),)
asyncio.get_event_loop().run_until_complete(
asyncio.gather(*dev_conn)
)
attached_devices = []
for d in devices:
attached_devices.append(devices[d].device_uid)
board = KuzzleIOT(
UID,
hw_config.type,
host=kuzzle_cfg.host,
port=kuzzle_cfg.port,
owner=fw_config.device.owner,
additional_info={
"devices": attached_devices,
"hw_version": hw_config.hw_version,
"sw_version": fw_config.firmware.version
}
)
asyncio.get_event_loop().run_until_complete(
asyncio.gather(
board.connect(None),
)
)
log.debug('All KuzzleIoT instances are connected...')
neo.state = default_state
neo.publish_state()
pn532 = Pn532('/dev/serial0', devices["kuzzle_rfid"].publish_state)
def logs_init():
coloredlogs.install(logger=log,
fmt='[%(thread)X] - %(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.DEBUG,
stream=sys.stdout)
class GpioHandler:
def __init__(self, hw_config):
self.hw_config = hw_config
def on_gpio_changed(self, gpio, level):
if gpio in self.hw_config.buttons.gpios:
buttons[
'button_{}'.format(self.hw_config.buttons.gpios.index(gpio))] = 'PRESSED' if not level else 'RELEASED'
log.debug('Buttons state: %s', buttons)
devices["kuzzle_buttons"].publish_state(buttons)
elif gpio == self.hw_config.motion_sensor.gpio:
log.debug('Motion: %s', 'True' if level else 'False')
devices["kuzzle_motion"].publish_state({'motion': True if level else False})
else:
log.warning('Unexpected GPIO: %d', gpio)
def on_gpio_changed_up(self, channel):
time.sleep(0.03) # 30 ms sleep to make sure the GPIO state is stabilized before reading it
self.on_gpio_changed(channel, GPIO.input(channel))
def motion_sensor_install(self):
GPIO.setup(self.hw_config.motion_sensor.gpio, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(self.hw_config.motion_sensor.gpio, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.add_event_detect(self.hw_config.motion_sensor.gpio, GPIO.BOTH, callback=self.on_gpio_changed_up)
def buttons_install(self):
GPIO.setup(self.hw_config.buttons.gpios, GPIO.IN, pull_up_down=GPIO.PUD_UP)
for gpio in self.hw_config.buttons.gpios:
GPIO.add_event_detect(gpio, GPIO.BOTH, callback=self.on_gpio_changed_up, bouncetime=50)
def cleanup(hw_config):
if hw_config.connection_led.enabled:
GPIO.output(hw_config.connection_led.gpio, 0)
if hw_config.power_led.enabled:
GPIO.output(hw_config.power_led.gpio, 0)
global neo
neo.state = {
'on': False,
}
GPIO.cleanup()
def start_sensing_light(hw_config):
log.info("Starting light level sensing thread: reading in MCP channel {}".format(hw_config.light_sensor.mcp_channel))
import tept5700
tept = tept5700.Tept5700(5.2, 10000, mcp_channel=hw_config.light_sensor.mcp_channel)
try:
while 1:
voltage, lux = tept.read_lux()
devices["kuzzle_light"].publish_state({"level": lux}) # "{:.3f}".format(lux)})
time.sleep(1)
except KeyboardInterrupt as e:
pass
class SignalHandler:
def __init__(self, hw_config):
self.hw_config = hw_config
def on_sigterm(self, sig_num, stack_frame):
log.debug("I'm dying!!!")
GPIO.output(self.hw_config.connection_led.gpio, 0)
time.sleep(0.5)
GPIO.output(self.hw_config.connection_led.gpio, 1)
time.sleep(0.5)
GPIO.output(self.hw_config.connection_led.gpio, 0)
time.sleep(0.5)
GPIO.output(self.hw_config.connection_led.gpio, 1)
time.sleep(0.5)
GPIO.output(self.hw_config.connection_led.gpio, 0)
log.info("service stopped")
cleanup(self.hw_config)
exit(0)
def startup():
logs_init()
fw_config, hw_config = load_configs(CONFIG_PATH)
fw_config = namedtupled.map(fw_config)
hw_config = namedtupled.map(hw_config)
kuzzle_config = fw_config.kuzzle
sh = SignalHandler(hw_config)
signal.signal(signal.SIGTERM, sh.on_sigterm)
gpio_handler = GpioHandler(hw_config)
if hw_config.power_led.enabled:
GPIO.setup(hw_config.power_led.gpio, GPIO.OUT)
GPIO.output(hw_config.power_led.gpio, 1)
if hw_config.connection_led.gpio:
GPIO.setup(hw_config.connection_led.gpio, GPIO.OUT)
GPIO.output(hw_config.connection_led.gpio, 0)
retry = 50
while retry:
khost = kuzzle_config.host
kport = kuzzle_config.port
res = KuzzleIOT.server_info(khost, kport)
if res:
retry = 0
log.debug('Connected to Kuzzle on http://{}:{}, version = {}'.format(
khost,
kport,
res["serverInfo"]["kuzzle"]["version"])
)
init_hw_components(fw_config, hw_config)
GPIO.output(hw_config.connection_led.gpio, 1)
if hw_config.motion_sensor.enabled:
gpio_handler.motion_sensor_install()
if hw_config.buttons.enabled:
gpio_handler.buttons_install()
pn532_thread = threading.Thread(target=pn532.start_polling, name="pn532_polling")
pn532_thread.daemon = True
pn532_thread.start()
light_sensor_thread = threading.Thread(target=start_sensing_light, args=(hw_config,),
name="light_sensor")
light_sensor_thread.daemon = True
light_sensor_thread.start()
else:
log.warning("Unable to connect to Kuzzle...")
retry -= 1
if retry:
log.info('Trying to reconnect in 5s, %d retries remaining', retry)
else:
log.critical('Impossible to connect to Kuzzle service...quitting')
exit(-1)
time.sleep(5)
try:
log.info("Entering event loop...")
asyncio.get_event_loop().run_forever()
log.info("Configuration changed, restarting firmware...")
except KeyboardInterrupt as e:
pass
finally:
cleanup(hw_config)
if __name__ == '__main__':
startup()
| 2.09375 | 2 |
twitoff/__init__.py | ekoly/twitoff-app | 0 | 12768237 | #!/usr/bin/env python3
"""
App base.
- APP: flask app object
- DB: sqlalchemy database
- UTIL: utility methods
"""
import logging
from twitoff.app import make_app
APP, DB, REDIS = make_app()
application = APP
LOG = logging.getLogger("twitoff")
from twitoff import Routes
from twitoff.service.util_service import UtilService
UTIL = UtilService()
logging.basicConfig(level=logging.DEBUG)
| 2.078125 | 2 |
Python_Files/murach/book_apps/ch10/create_account.py | Interloper2448/BCGPortfolio | 0 | 12768238 | <reponame>Interloper2448/BCGPortfolio<filename>Python_Files/murach/book_apps/ch10/create_account.py
def main():
full_name = get_full_name()
print()
password = get_password()
print()
first_name = get_first_name(full_name)
print("Hi " + first_name + ", thanks for creating an account.")
def get_full_name():
while True:
name = input("Enter full name: ").strip()
if " " in name:
return name
else:
print("You must enter your full name.")
def get_first_name(full_name):
index1 = full_name.find(" ")
first_name = full_name[:index1]
return first_name
def get_password():
while True:
digit = False
cap_letter = False
password = input("Enter password: ").strip()
for char in password:
if char.isdigit():
digit = True
elif char.isupper():
cap_letter = True
if digit == False or cap_letter == False or len(password) < 8:
print("Password must be 8 characters or more \n" +
"with at least one digit and one uppercase letter.")
else:
return password
if __name__ == "__main__":
main()
| 4.09375 | 4 |
authors/apps/articles/migrations/0025_merge_20190508_1042.py | andela/ah-the-jedi-backend | 1 | 12768239 | # Generated by Django 2.1 on 2019-05-08 10:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('articles', '0024_auto_20190508_0741'),
('articles', '0020_merge_20190508_0813'),
]
operations = [
]
| 1.351563 | 1 |
lib/evaluate_model.py | 98miranil/DeepLearning_Solver | 0 | 12768240 | <reponame>98miranil/DeepLearning_Solver<gh_stars>0
#EVALUATION OF THE MODEL
def evaluate_model(model, X_test, y_test):
_, score = model.evaluate(X_test, y_test, verbose = 0)
print(score)
def predict_model(model, X):
y = model.predict(X)
print(y)
def predict_class_model(model, X):
y = model.predict_classes(X)
print(y) | 2.625 | 3 |
Mag2021-main-code/Proactive caching/PENN/run_PENN.py | wjj19950101/Magrank_v1 | 0 | 12768241 | # #################################################################
# Python codes PENN for caching
# Codes have been tested successfully on Python 3.6.0 with TensorFlow 1.14.0.
# #################################################################
import scipy.io as sio
import numpy as np
import runner
import math
import sys
K = 10 # number of files
num_H = 1000 # number of training samples, 10000 for K=10 and 20, 15000 for K=30
num_val = math.ceil(0.1*num_H) # number of validation samples
training_epochs = 3000 # number of training epochs
N_mont = 10 # number of Montercalo simulations
LR = 0.01 # initial learning rate
batch_size = min(num_H, 1000) # batch size
# load data
Xtrain = sio.loadmat('../Data/Sup_WFpol_Nf'+str(K)+'.mat')['X_train']
Ytrain = sio.loadmat('../Data/Sup_WFpol_Nf'+str(K)+'.mat')['pol_tr']
X = sio.loadmat('../Data/Sup_WFpol_Nf'+str(K)+'.mat')['X_test']
Y = sio.loadmat('../Data/Sup_WFpol_Nf'+str(K)+'.mat')['pol_te']
pf_test = sio.loadmat('../Data/Sup_WFpol_Nf'+str(K)+'.mat')['pf_test']
num_tr = Xtrain.shape[2]
num_te = X.shape[2]
d_past= Xtrain.shape[1]
layernum = [d_past*K, 10*K,K] # layer size
Xtrain = np.reshape(Xtrain,(d_past*K,num_tr))
X = np.reshape(X,(d_past*K,num_te))
# training
Ratio,Time = runner.run(Xtrain, Ytrain,X,Y,pf_test,num_H,num_val,N_mont, training_epochs=training_epochs, LR=LR,
batch_size=batch_size, K=K, layernum=layernum)
# performance
Sort_Ratio = np.sort(Ratio)
print('The second worst ratio is: %f ' % Sort_Ratio[1] )
print('Average time for each training is: %f s' % (np.mean(Time)) )
| 2.0625 | 2 |
eyecite/__init__.py | pombredanne/eyecite | 0 | 12768242 | <gh_stars>0
from .annotate import annotate
from .find_citations import get_citations
from .resolve import resolve_citations
from .utils import clean_text, dump_citations
__all__ = [
"annotate",
"get_citations",
"clean_text",
"resolve_citations",
"dump_citations",
]
| 1.296875 | 1 |
app/migrations/0002_auto_20200803_1017.py | raptor419/SIH2020_AN314_AMRTrack | 0 | 12768243 | # Generated by Django 2.1.1 on 2020-08-03 04:47
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('app', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='pathtest',
name='district',
field=models.CharField(default=None, max_length=50),
preserve_default=False,
),
migrations.AddField(
model_name='pathtest',
name='state',
field=models.CharField(default=None, max_length=50),
preserve_default=False,
),
]
| 1.710938 | 2 |
seapy/other.py | FRidh/seapy | 8 | 12768244 | <filename>seapy/other.py
"""
Other equations.
================
This module contains all kind of additional equations than can be useful.
"""
import numpy as np
def total_loss_factor(frequency, reverberation_time):
"""
The total loss factor can be related to the reverberation time for any subsystem.
:param frequency: Frequency :math:`f`.
:param reverberation_time: Reverberation time :math:`T`.
:returns: Total loss factor.
.. math:: \\eta = \\frac{2.2}{f T}
See Craik, equation 1.19, page 9.
"""
return 2.2 / (frequency * reverberation_time)
def total_loss_factor_masonry(frequency):
"""
A good estimate total loss factor for masonry type structures.
:param frequency: Frequency :math:`f`.
:returns: Total loss factor.
.. math:: \\eta_2 \\approx \\frac{1}{\\sqrt{f}} + 0.015
See Craik, equation 1.21, page 9.
"""
return 1.0 / np.sqrt(frequency) + 0.015
| 2.625 | 3 |
vb2py/PythonCard/samples/minimalTree/minimalTree.py | ceprio/xl_vb2py | 0 | 12768245 | <reponame>ceprio/xl_vb2py
#!/usr/bin/python
"""
__version__ = "$Revision: 1.6 $"
__date__ = "$Date: 2004/05/05 16:53:27 $"
"""
from PythonCard import model
# events
# itemActivated, itemExpanding, itemExpanded,
# selectionChanging, selectionChanged
class Minimal(model.Background):
def on_initialize(self, event):
tree = self.components.tree
root = tree.addRoot("1")
tree.setItemHasChildren(root, 1)
tree.selectItem(root)
def on_tree_itemExpanding(self, event):
tree = self.components.tree
item=event.item
# This event can happen twice in the self.Expand call
if tree.isExpanded(item):
return
obj = int(tree.getItemText(item))
if tree.getChildrenCount(item, 0) == 0:
lst = [obj * 2, (obj *2) + 1]
for o in lst:
new_item = tree.appendItem(item, str(o))
tree.setItemHasChildren(new_item, 1)
event.skip()
if __name__ == '__main__':
app = model.Application(Minimal)
app.MainLoop()
| 2.421875 | 2 |
shot_detector/filters/compound/mean_cascade.py | w495/shot_detector | 18 | 12768246 | # -*- coding: utf8 -*-
"""
The main idea of this module, that you can combine
any number of any filters without any knowledge about their
implementation. You have only one requirement — user functions
should return a filter (or something that can be cast to a filter).
"""
from __future__ import absolute_import, division, print_function
from builtins import range
from shot_detector.filters import (
DelayFilter,
MeanSWFilter,
)
WINDOW_SIZE = 25
delay = DelayFilter()
original = delay(0)
mean = MeanSWFilter(
# window_size=50,
# strict_windows=True,
# mean_name='EWMA',
cs=False,
)
def multi_mean(start=5, stop=50, step=None, pivot=None, **kwargs):
"""
:param start:
:param stop:
:param step:
:param pivot:
:param kwargs:
:return:
"""
if step is None:
step = 1
res = min_size_filter_generator(start, stop, step, pivot, **kwargs)
res = sum(res) / (stop - start) / step
return res
def min_size_filter_generator(start,
stop,
step=None,
pivot=None,
**kwargs):
"""
:param start:
:param stop:
:param step:
:param pivot:
:param kwargs:
:return:
"""
if step is None:
step = 1
if pivot is None:
pivot = start
for size1 in range(start, stop, step):
m1 = mean(s=pivot, **kwargs)
m2 = mean(s=size1 + 1, **kwargs)
yield m2 - m1
| 2.625 | 3 |
posts/tests/test_models.py | nick-rebrik/Yatube | 0 | 12768247 | from django.contrib.auth import get_user_model
from django.test import TestCase
from posts.models import Post, Group
User = get_user_model()
class PostModelTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
user = User.objects.create_user(username='TestUser')
cls.post = Post.objects.create(
text='Text in post more then 15 simbols',
author=user
)
def test_str_method(self):
post = PostModelTest.post
first_15_simbols = post.text[:15]
self.assertEqual(first_15_simbols, str(post))
def test_verbose_name(self):
post = PostModelTest.post
field_verboses = {
'text': 'Текст',
'author': 'Автор',
'group': 'Группа',
}
for field, verbose_name in field_verboses.items():
with self.subTest(field=field):
self.assertEqual(
post._meta.get_field(field).verbose_name, verbose_name)
def test_help_text(self):
post = PostModelTest.post
field_help_text = {
'group': 'Выберете группу сообщества для публикации.',
'text': 'Введите текст публикации.'
}
for field, help_text in field_help_text.items():
with self.subTest(field=field):
self.assertEqual(
post._meta.get_field(field).help_text, help_text)
class GroupModelTest(TestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.group = Group.objects.create(
title='Test case',
slug='test'
)
def test_str_method(self):
group = GroupModelTest.group
str_method_name = group.title
self.assertEqual(str_method_name, str(group))
def test_verbose_name(self):
group = GroupModelTest.group
verbose_name = 'Группа'
self.assertEqual(
group._meta.get_field('title').verbose_name, verbose_name)
| 2.421875 | 2 |
NetworkHandler/Encoder.py | GitiHubi/deepContinualAuditing | 0 | 12768248 | <reponame>GitiHubi/deepContinualAuditing<filename>NetworkHandler/Encoder.py
# import pytorch libraries
from torch import nn
# define encoder class
class Encoder(nn.Module):
# define class constructor
def __init__(self, hidden_size, bottleneck):
# call super class constructor
super(Encoder, self).__init__()
# init encoder architecture
self.linearLayers = self.init_layers(hidden_size)
self.reluLayer = nn.LeakyReLU(negative_slope=0.4, inplace=True)
if bottleneck == 'linear':
self.bottleneck = nn.Identity()
elif bottleneck == 'lrelu':
self.bottleneck = nn.LeakyReLU(negative_slope=0.4, inplace=True)
elif bottleneck == 'tanh':
self.bottleneck = nn.Tanh()
# init encoder layers
def init_layers(self, layer_dimensions):
# init layers
layers = []
# iterate over layer dimensions
for i in range(0, len(layer_dimensions)-1):
# create linear layer
linearLayer = self.LinearLayer(layer_dimensions[i], layer_dimensions[i + 1])
# collect linear layer
layers.append(linearLayer)
# register linear layer
self.add_module('linear_' + str(i), linearLayer)
# return layers
return layers
# init leaky ReLU layer
def LinearLayer(self, input_size, hidden_size):
# init linear layer
linear = nn.Linear(input_size, hidden_size, bias=True)
# init linear layer parameters
nn.init.xavier_uniform_(linear.weight)
nn.init.constant_(linear.bias, 0.0)
# return linear layer
return linear
# define forward pass
def forward(self, x):
# iterate over distinct layers
for i in range(0, len(self.linearLayers)):
# case: non-bottleneck layer
if i < len(self.linearLayers)-1:
# run forward pass through layer
x = self.reluLayer(self.linearLayers[i](x))
# case: bottleneck layer
else:
# run forward pass through layer
x = self.bottleneck(self.linearLayers[i](x))
# return result
return x
| 2.578125 | 3 |
Logistic Regression/LR.py | codeofdave/ML | 0 | 12768249 | <reponame>codeofdave/ML
import matplotlib.pyplot as plt
import numpy as np
import xlrd
def readdata():
with xlrd.open_workbook('data.xlsx') as workbook :
table = workbook.sheet_by_name('Sheet1')
x1 = table.col_values(0)[1:]
x2 = table.col_values(1)[1:]
label = table.col_values(2)[1:]
return x1,x2,label
x1,x2,label = readdata()
# X:(14,3) W:(3,1) Y,LABEL:(14,1),d(COST)/d(W):(3,1)
# Z = X*W
# Y = 1 / (e^(-Z) + 1)
# COST = (LABEL-1)*log(1-Y) - LABEL*log(Y)
# d(COST) / d(W) = X.T * (Y - LABEL)
X = np.array([[1]*len(x1),x1,x2]).T
W = np.random.rand(3,1)
LABEL = np.array([label]).T
tmp= []
for epoch in range(10000):
Z = np.dot(X,W)
Y = 1 / (np.exp(-1*Z)+1)
W -= 0.001*np.dot(X.T,(Y-LABEL))
if (epoch+1) % 100 == 0:
tmp.append([W.copy(),epoch])
print(W)
#w0 + w1*x1 + w2*x2 = 0 >>> x2 = -w1/w2 * x1 - w0/w2
for Wi in tmp:
plt.clf()
w0 = Wi[0][0,0]
w1 = Wi[0][1,0]
w2 = Wi[0][2,0]
print(w0,w1,w2)
x1 = np.arange(0,7,1)
x2 = -1*x1*w1/w2 - w0/w2
plt.plot(x1,x2,c='r',label='decision boundary')
plt.scatter(np.mat(X)[:,1][np.mat(LABEL)==0].A,np.mat(X)[:,2][np.mat(LABEL)==0].A,s=50,label='label 0')
plt.scatter(np.mat(X)[:,1][np.mat(LABEL)==1].A,np.mat(X)[:,2][np.mat(LABEL)==1].A,s=50,label='label 1',marker='^')
plt.xlabel('x1',size=20)
plt.ylabel('x2',size=20)
plt.legend()
plt.grid()
plt.title('iter:%s' % str(Wi[1]))
plt.pause(0.01)
plt.show()
| 2.9375 | 3 |
slhc/suites/module_ellone.py | kotmasha/kodlab-uma-encapsulated | 0 | 12768250 | <reponame>kotmasha/kodlab-uma-encapsulated
### module for basic operations with ellone metric
from scipy.spatial.distance import cityblock as ellone
from scipy.spatial.distance import pdist
def get_metric(params=None):
met_func=lambda pt1,pt2: ellone(pt1,pt2) if params is None else lambda pt1,pt2: ellone(params*pt1,params*pt2)
matr_func=lambda points_list: pdist(points_list,metric='cityblock') if params is None else lambda points_list: pdist([params*pt for pt in points_list],metric='cityblock')
return met_func,matr_func
def midpoint(pt1,pt2):
return 0.5*(pt1+pt2)
| 2.6875 | 3 |