max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
rl_groundup/functions/linear_policy.py | TristanBester/rl_groundup | 1 | 12768951 | <filename>rl_groundup/functions/linear_policy.py
# Created by <NAME>.
import numpy as np
class LinearPolicy(object):
'''A parametric function used to approximate the state-action value
function for a specific policy. The approximate value for a given state-
action pair is equal to the dot product of the state-action pairs feature
vector and the parameter vector of the function.
'''
def __init__(self, n_weights, action_vec_dim, n_actions):
self.weights = np.zeros(n_weights + action_vec_dim)
self.n_actions = n_actions
def evaluate(self, feature_vector):
return np.dot(self.weights, feature_vector)
def greedy_action(self, feature_vectors):
action_values = [self.evaluate(x) for x in feature_vectors]
return np.argmax(action_values)
| 2.78125 | 3 |
test/tests/test_environments/python_src/main.py | jithindevasia/fission | 6,891 | 12768952 | <gh_stars>1000+
def main():
return 'THIS_IS_MAIN_MAIN'
def func():
return 'THIS_IS_MAIN_FUNC'
| 1.320313 | 1 |
test_scripts/ns_instance/get_one.py | lremember/VFC | 1 | 12768953 | import requests
resp = requests.get('http://10.12.5.131:30280/api/nslcm/v1/ns/caaed655-8eee-47eb-8649-7fbef4e8fb29')
print resp.status_code, resp.json()
| 2.203125 | 2 |
mailman2/models.py | edinburghhacklab/hackdb | 0 | 12768954 | # SPDX-FileCopyrightText: 2022 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: MIT
import re
from allauth.account.models import EmailAddress
from django.contrib.auth import get_user_model
from django.contrib.auth.models import Group
from django.core.exceptions import ValidationError
from django.db import models
def find_user_from_address(address):
try:
emailaddress = EmailAddress.objects.get(email=address, verified=True)
return emailaddress.user
except EmailAddress.DoesNotExist:
return None
class MailingList(models.Model):
NONE = 0
CONFIRM = 1
REQUIRE_APPROVAL = 2
CONFIRM_AND_APPROVE = 3
SUBSCRIBE_POLICY_CHOICES = [
(NONE, "None"),
(CONFIRM, "Confirm"),
(REQUIRE_APPROVAL, "Require approval"),
(CONFIRM_AND_APPROVE, "Confirm and approve"),
]
name = models.CharField(max_length=64, unique=True)
description = models.CharField(max_length=255, blank=True)
info = models.TextField(blank=True)
advertised = models.BooleanField()
subscribe_policy = models.SmallIntegerField(choices=SUBSCRIBE_POLICY_CHOICES)
archive_private = models.BooleanField()
subscribe_auto_approval = models.TextField(blank=True)
auto_unsubscribe = models.BooleanField(
default=False,
help_text="Should non-group members be automatically unsubscribed?",
)
def __str__(self):
return self.name
def check_subscribe_auto_approval(self, address):
for pattern in self.subscribe_auto_approval.split("\n"):
if pattern.startswith("^"):
if re.match(pattern, address):
return True
elif pattern.lower() == address.lower():
return True
return False
def user_can_see(self, user):
if self.advertised:
return True
if self.user_can_subscribe(user):
return True
return False
def user_can_subscribe(self, user):
if self.subscribe_policy in [self.NONE, self.CONFIRM]:
return True
for group in user.groups.all():
if self.group_policies.filter(group=group).exists():
return True
# if self.check_subscribe_auto_approval(user.email):
# return True
return False
def user_recommend(self, user):
for group in user.groups.all():
if self.group_policies.filter(
group=group, policy__gte=GroupPolicy.RECOMMEND
).exists():
return True
def user_prompt(self, user):
for group in user.groups.all():
try:
return self.group_policies.get(
group=group, policy=GroupPolicy.PROMPT
).prompt
except GroupPolicy.DoesNotExist:
pass
def user_subscribe_policy(self, user):
for policy in self.group_policies.order_by("-policy"):
if user.groups.contains(policy.group):
return policy
def address_can_remain(self, address):
if not self.auto_unsubscribe:
return True
if self.check_subscribe_auto_approval(address):
return True
user = find_user_from_address(address)
if user:
if self.user_can_subscribe(user):
return True
return False
class Meta:
permissions = [("audit_list", "Can audit the subscribers of a mailing list")]
class GroupPolicy(models.Model):
ALLOW = 0
RECOMMEND = 1
PROMPT = 2
FORCE = 3
POLICY_CHOICES = [
(ALLOW, "Allow"),
(RECOMMEND, "Recommend"),
(PROMPT, "Prompt"),
(FORCE, "Force"),
]
mailing_list = models.ForeignKey(
MailingList, on_delete=models.CASCADE, related_name="group_policies"
)
group = models.ForeignKey(
Group, on_delete=models.CASCADE, related_name="mailinglist_policies"
)
policy = models.SmallIntegerField(choices=POLICY_CHOICES, default=ALLOW)
prompt = models.TextField(blank=True)
def __str__(self):
return f"{self.mailing_list}:{self.group}:{self.get_policy_display()}"
def clean(self):
if self.policy == self.PROMPT:
if not self.prompt:
raise ValidationError("Must supply a message for a prompt policy.")
class Meta:
verbose_name_plural = "Group policies"
unique_together = ("mailing_list", "group")
class ChangeOfAddress(models.Model):
created = models.DateTimeField(null=False, blank=False, auto_now_add=True)
user = models.ForeignKey(get_user_model(), on_delete=models.PROTECT)
old_email = models.EmailField()
new_email = models.EmailField()
class Meta:
verbose_name_plural = "Changes of address"
class MailmanUser(models.Model):
user = models.OneToOneField(get_user_model(), on_delete=models.CASCADE)
advanced_mode = models.BooleanField(default=False)
| 2.203125 | 2 |
src/python/datatype/BooleanType.py | hiteshsahu/Python4ML | 0 | 12768955 | <filename>src/python/datatype/BooleanType.py
"""
Boolean Data Type(bool):
- Only be True or False
- None is false
- All numbers are True, except 0.
- All strings are True, except Empty strings. ie ""
- All Collections(list, tuple, set, and dictionary) are True, except Empty ones.ie. (), [], {},
- Objects from a class with a __len__ function that returns 0 is always false
"""
# Empty, 0 or null value are False Values
print(bool(None)) # Null
print(bool(False)) # false
print(bool(0)) # 0 Number
print(bool("")) # empty String
print(bool(())) # empty Tuple
print(bool([])) # empty List
print(bool({})) # empty Dictionary
# Class with__len__ returning 0 is False
class TestClass():
def __len__(self):
return 0
testObj = TestClass()
# Can use to assert if its empty, null or return 0 in __len__
if bool(testObj):
print("Object is True ", bool(testObj),"\n")
else:
print("Object is False ", bool(testObj), "\n")
print(bool(True)) # True
print(bool(2.75)) # Non 0 Number
print(bool("Hitesh")) # Non empty String
print(bool(('Hitesh', 'Sahu'))) # Non empty Tuple
print(bool(['Hitesh', 'Sahu'])) # Non empty List
print(bool({'firstName':'Hitesh',
'lastName': 'Sahu'})) # Nonempty Dictionary
| 4.375 | 4 |
bin/python/divvy/ledger/commands/Print.py | coinjet/rippled | 0 | 12768956 | from __future__ import absolute_import, division, print_function, unicode_literals
from divvy.ledger.Args import ARGS
from divvy.ledger import SearchLedgers
import json
SAFE = True
HELP = """print
Print the ledgers to stdout. The default command."""
def run_print(server):
ARGS.display(print, server, SearchLedgers.search(server))
| 2.140625 | 2 |
can_tools/scrapers/official/AL/al_vaccine.py | kxdan/can-scrapers | 0 | 12768957 | <filename>can_tools/scrapers/official/AL/al_vaccine.py
import pandas as pd
import us
from can_tools.scrapers import CMU, variables
from can_tools.scrapers.official.base import ArcGIS
pd.options.mode.chained_assignment = None # Avoid unnessacary SettingWithCopy warning
class ALCountyVaccine(ArcGIS):
ARCGIS_ID = "4RQmZZ0yaZkGR1zy"
has_location = True
location_type = "county"
state_fips = int(us.states.lookup("Alabama").fips)
source = "https://alpublichealth.maps.arcgis.com/apps/opsdashboard/index.html#/e4a232feb1344ce0afd9ac162f3ac4ba"
source_name = "Alabama Department of Public Health"
variables = {
"PERSONVAX": variables.INITIATING_VACCINATIONS_ALL,
"PERSONCVAX": variables.FULLY_VACCINATED_ALL,
"NADMIN": variables.TOTAL_DOSES_ADMINISTERED_ALL,
}
def fetch(self):
service = "Vax_Dashboard_Public_3_VIEW"
return self.get_all_jsons(service, 1, "7")
def normalize(self, data):
data = self.arcgis_jsons_to_df(data)
data = self._rename_or_add_date_and_location(
data, location_column="CNTYFIPS", timezone="US/Central"
)
data = self._reshape_variables(data, self.variables)
locations_to_drop = [0, 99999]
data = data.query("location != @locations_to_drop")
return data
| 2.515625 | 3 |
printBlockAnsi.py | oshaboy/PrintUnicode | 0 | 12768958 | #!/usr/bin/python3
from PIL import Image
import math, sys
figure=" "
regular=" "
em = " "
en = " "
scale=[
"\x1b[30m█",
"\x1b[3{}m░",
"\x1b[9{}m░",
"\x1b[3{}m▒",
"\x1b[9{}m▒",
"\x1b[3{}m▓",
"\x1b[9{}m▓",
"\x1b[3{}m█",
"\x1b[9{}m█"
]
def hue_raw(fullpixel):
global threshhold_global
hue_arr = [False,False,False]
if (fullpixel[0] >=threshhold_global ):
hue_arr[0] = True
if (fullpixel[1] >= threshhold_global):
hue_arr[1] = True
if (fullpixel[2] >= threshhold_global ):
hue_arr[2] = True
return hue_arr
def hue(fullpixel):
global threshhold_global
hue_arr=hue_raw(fullpixel)
return hue_arr[0]*1+hue_arr[1]*2+hue_arr[2]*4
def weighted_average(fullpixel):
global threshhold_global
hue_arr=hue_raw(fullpixel)
count=0
pixsum=0
for i in range(3):
if (hue_arr[i]):
count+=1
pixsum+=fullpixel[i]
if count==0:
count=3
return pixsum//count
threshhold_global=127
def image_2_block_ansi(filename, output="", dither=True, double_flag = False):
global threshhold_global
if double_flag:
pixelWidth=2
else:
pixelWidth=1
image = Image.open(filename).convert("RGB")
imgdump = image.load()
if output != "":
output_file = open(output,"wb")
for y in range(image.height):
string = "\x1b[40m"
#output_file.write(bytes([9]))
for x in range(image.width):
pixel = imgdump[x,y]
for i in range(3):
if (pixel[i] > 256):
pixel[i] = 256
hue_arr=hue_raw(pixel)
color=hue(pixel)
brightness=weighted_average(pixel) // 29
string += scale[brightness].format(color) * pixelWidth
if dither:
new_pixel=[].extend(pixel)
pixels_i_care_about=[None,None,None,None]
if x!=image.width-1:
pixels_i_care_about[0]=[].extend(imgdump[x+1,y])
if y!=image.height-1 and x!=image.width-1:
pixels_i_care_about[1]=[].extend(imgdump[x+1,y+1])
if y!=image.height-1:
pixels_i_care_about[2]=[].extend(imgdump[x,y+1])
if x!=0 and y!=image.height-1:
pixels_i_care_about[3]=[].extend(imgdump[x-1,y+1])
for i in range(3):
if (hue_arr[i]):
error=pixel[i]%29
else:
error=0
if pixels_i_care_about[0]!=None:
pixels_i_care_about[0][i] += math.floor((7/16)*error)
if pixels_i_care_about[1]!=None:
pixels_i_care_about[1][i] += math.floor((1/16)*error)
if pixels_i_care_about[2]!=None:
pixels_i_care_about[2][i] += math.floor((5/16)*error)
if pixels_i_care_about[3]!=None:
pixels_i_care_about[3][i] += math.floor((3/16)*error)
if pixels_i_care_about[0]!=None:
imgdump[x+1,y] = tuple(pixels_i_care_about[0])
if pixels_i_care_about[1]!=None:
imgdump[x+1,y+1] = tuple(pixels_i_care_about[1])
if pixels_i_care_about[2]!=None:
imgdump[x,y+1] = tuple(pixels_i_care_about[2])
if pixels_i_care_about[3]!=None:
imgdump[x-1,y+1] = tuple(pixels_i_care_about[3])
if output == "":
print(string)
else:
output_file.write(string.encode('utf_8'))
output_file.write(bytes([13,10]))
if output != "":
output_file.close()
help_text="""
Usage: printBlockAnsi.py image [-h] [-d] [-s] [-i] [-w] [-t #n]
-h: Print this
-d: Disable Dithering
-w: print every character twice
-t: use threshhold for hue selection
"""
if __name__ == "__main__":
argc=len(sys.argv)
if (argc==1):
print ("NO")
sys.exit(1)
in_file=sys.argv[1]
dither_flag=True
pixel_width_flag=False
while (argc>2 and sys.argv[2][0]=="-"):
if sys.argv[2]=="-h":
print(help_text)
sys.exit(0)
elif sys.argv[2]=="-d":
dither_flag=False
elif sys.argv[2]=="-w":
pixel_width_flag=True
elif sys.argv[2]=="-t":
if argc>3 and sys.argv[3].isnumeric() and int(sys.argv[3])>=0:
threshhold_global=int(sys.argv[3])
for i in range(3,len(sys.argv)-1):
sys.argv[i]=sys.argv[i+1]
argc=argc-1
for i in range(2,len(sys.argv)-1):
sys.argv[i]=sys.argv[i+1]
argc=argc-1
if (argc>2):
out_file=sys.argv[2]
else:
out_file=""
image_2_block_ansi(in_file, output="", dither=dither_flag, double_flag = pixel_width_flag)
| 2.90625 | 3 |
setup.py | codeRulc/pytest-progress | 0 | 12768959 | import os
import io
from setuptools import setup
def get_version(filename):
here = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(here, filename))
version_match = f.read()
f.close()
if version_match:
return version_match
raise RuntimeError("Unable to find version string.")
setup(name='pytest-progress',
version=get_version('version.txt'),
description='pytest plugin for instant test progress status',
long_description=io.open('README.rst', encoding='utf-8', errors='ignore').read(),
author='santosh',
author_email=u'<EMAIL>',
url=u'https://github.com/ssrikanta/pytest-progress',
license = 'MIT',
license_file = 'LICENSE',
py_modules=['pytest_progress'],
entry_points={'pytest11': ['progress = pytest_progress']},
install_requires=['pytest>=2.7'],
keywords='py.test pytest report',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Framework :: Pytest',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Operating System :: Microsoft :: Windows',
'Operating System :: MacOS :: MacOS X',
'Topic :: Software Development :: Testing',
'Topic :: Software Development :: Quality Assurance',
'Topic :: Software Development :: Libraries',
'Topic :: Utilities',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
]
)
| 1.820313 | 2 |
PlanheatMappingModule/PlanHeatDMM/dialogs/protection_level_map_dialog.py | Planheat/Planheat-Tool | 2 | 12768960 | <reponame>Planheat/Planheat-Tool
# -*- coding: utf-8 -*-
"""
Matching user fields dialog
:author: <NAME>
:version: 0.1
:date: 19 Oct. 2017
"""
__docformat__ = "restructuredtext"
import os
import sys
from PyQt5 import uic
from PyQt5 import QtCore,QtWidgets, QtGui
from PyQt5.Qt import QTableWidgetItem, QEvent
from dialogs.message_box import showErrordialog, showQuestiondialog
from dialogs.multi_map_dialog import MultiMapDialog
from model.protection_level_map import ProtectionLevelMap
FONT_PIXEL_SIZE=12
FORM_CLASS, _ = uic.loadUiType(os.path.join(
os.path.dirname(__file__), 'protection_level_map_dialog.ui'))
class ProtectionLevelMapDialog(QtWidgets.QDialog, FORM_CLASS):
""" Protection Level Mapping Dialog
Mapping Dialog for match municipality protection degree with protection level of plugin
Refresh all records: Delete all changed not saved and back to the last saved point
Delete record: Delete the selected record, equivalent to select a None Value
Delete All records: Delete all records, equivalent to choice a None Value for all records
"""
def __init__(self, planHeatDMM):
"""Dialog Constructor """
super(ProtectionLevelMapDialog, self).__init__(None)
# Set up the user interface from Designer.
# After setupUI you can access any designer object by doing
# self.<objectname>, and you can use autoconnect slots - see
# http://qt-project.org/doc/qt-4.8/designer-using-a-ui-file.html
# #widgets-and-dialogs-with-auto-connect
self.setupUi(self)
try:
self.setWindowIcon(QtGui.QIcon(planHeatDMM.plugin_dir + os.path.sep + 'resources/logo.ico'))
#self.setWindowFlags(self.windowFlags() & QtCore.Qt.WindowMinimizeButtonHint)
self.setWindowModality(QtCore.Qt.ApplicationModal)
#self.setFixedSize(600,350)
self.planHeatDMM = planHeatDMM
self.multiChange=[]
self.refreshToolButton.setIcon(planHeatDMM.resources.icon_refresh_icon)
self.deleteToolButton.setIcon(planHeatDMM.resources.icon_del_icon)
self.deleteAllToolButton.setIcon(planHeatDMM.resources.icon_trash_icon)
self.multiMapToolButton.setIcon(planHeatDMM.resources.icon_multi_map_icon)
self.refreshToolButton.clicked.connect(self.refreshRecordsTable)
self.deleteToolButton.clicked.connect(self.deleteRecordTable)
self.deleteAllToolButton.clicked.connect(self.deleteAllRecordsTable)
self.multiMapToolButton.clicked.connect(self.multiMapDialog)
self.protectionLevelMapTable.cellClicked.connect(self.clickCell)
self.protectionLevelMapTable.pressed.connect(self.pressedCell)
self.protectionLevelMapTable.verticalHeader().sectionClicked.connect(self.clickRow)
self.protectionLevelMapTable.horizontalHeader().sectionClicked.connect(self.clickAllRows)
self.protectionLevelMapTable.installEventFilter(self)
self.setSystemDependantFontSize()
self.setHeaders()
self.addRecordsTable()
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - Constructor Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - Constructor Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
raise
def setHeaders(self):
""" Set Headers to table widget"""
self.protectionLevelMapTable.setColumnCount(2)
horHeaders = ["Cadaster Protection Level ", "PlanHeat Protection Level "]
self.protectionLevelMapTable.setHorizontalHeaderLabels(horHeaders)
self.protectionLevelMapTable.setRowCount(0)
self.protectionLevelMapTable.resizeColumnsToContents()
self.protectionLevelMapTable.resizeRowsToContents()
self.protectionLevelMapTable.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)
self.protectionLevelMapTable.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
def addRecordsTable(self):
""" Add Records to the Table Widget"""
try:
for user_protection in self.planHeatDMM.data.inputSHPFieldProtectionDegreeValues:
model = QtGui.QStandardItemModel()
for data in self.planHeatDMM.data.protectionLevels:
protection_level = QtGui.QStandardItem(str(data.protectionLevel))
description = QtGui.QStandardItem(data.description)
model.appendRow([protection_level, description])
view = QtWidgets.QTreeView()
view.setMouseTracking(False)
view.header().hide()
view.setRootIsDecorated(False)
self.comboProtectionLevel = QtWidgets.QComboBox()
self.comboProtectionLevel.setMouseTracking(False)
self.comboProtectionLevel.setView(view)
self.comboProtectionLevel.setModel(model)
#self.comboProtectionLevel = QtWidgets.QComboBox()
#for data in self.planHeatDMM.data.protectionLevels:
# self.comboProtectionLevel.addItem(str(data.protectionLevel))
rowPosition = self.protectionLevelMapTable.rowCount()
self.protectionLevelMapTable.insertRow(rowPosition)
if self.planHeatDMM.data.protectionLevelMap:
l = [x for x in self.planHeatDMM.data.protectionLevelMap if x.user_definition_protection == user_protection and x.DMM_protection_level not in ("","Not evaluate")]
if l:
self.comboProtectionLevel.setCurrentText(str(l[0].DMM_protection_level))
self.protectionLevelMapTable.setItem(rowPosition,0, QTableWidgetItem(user_protection))
self.protectionLevelMapTable.setCellWidget(rowPosition,1,self.comboProtectionLevel)
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - addRecordsTable Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - addRecordsTable Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def deleteRecordTable(self):
""" Reset selected record from widget"""
try:
"""
index = self.protectionLevelMapTable.selectionModel().selectedRows()
for i in sorted(index,reverse=True):
self.protectionLevelMapTable.removeRow(i.row())
"""
modelIndex = self.protectionLevelMapTable.selectionModel().selectedRows()
self.multiChange = [index.row() for index in modelIndex]
for rowPosition in self.multiChange:
combo = self.protectionLevelMapTable.cellWidget(rowPosition,1)
combo.setCurrentText("0")
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - deleteRecordTable Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - deleteRecordTable Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def deleteAllRecordsTable(self):
""" Reset All records from widget
"""
try:
records = self.protectionLevelMapTable.rowCount()
if records > 0 :
message = "{} rows reset, are you sure?".format(records)
if showQuestiondialog(self,"Reset All Records",message) == QtWidgets.QMessageBox.Yes:
for rowPosition in range(self.protectionLevelMapTable.rowCount()):
combo = self.protectionLevelMapTable.cellWidget(rowPosition,1)
combo.setCurrentText("0")
#while (self.protectionLevelMapTable.rowCount() > 0):
#self.protectionLevelMapTable.removeRow(0)
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - deleteAllRecordsTable Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - deleteAllRecordsTable Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def refreshRecordsTable(self):
""" Refresh all records from the last saving operation , or initial state """
try:
message = "Discard, not saved changes, are you sure?"
if showQuestiondialog(self,"Discard Changes",message) == QtWidgets.QMessageBox.Yes:
#self.deleteAllRecordsTable(showMessageDialog=False)
while (self.protectionLevelMapTable.rowCount() > 0):
self.protectionLevelMapTable.removeRow(0)
self.addRecordsTable()
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - refreshRecordsTable Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - refreshRecordsTable Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def accept(self, *args, **kwargs):
""" Save mapping data, from widget to list"""
try:
self.planHeatDMM.data.protectionLevelMap = []
for x in range(0,self.protectionLevelMapTable.rowCount(),1):
data = ProtectionLevelMap()
for y in range(0,self.protectionLevelMapTable.columnCount(),1):
if y == 0:
item = self.protectionLevelMapTable.item(x,y)
data.user_definition_protection = "" if item is None else item.text()
elif y == 1:
cell = self.protectionLevelMapTable.cellWidget(x,y)
data.DMM_protection_level = 0 if cell is None else int(cell.currentText())
self.planHeatDMM.data.protectionLevelMap.append(data)
return QtWidgets.QDialog.accept(self, *args, **kwargs)
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - accept Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - accept Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def clickCell(self,rowNum,columnNum):
try:
self.selectedRows()
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - clickCell Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - clickCell Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def pressedCell(self):
try:
self.selectedRows()
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - pressedCell Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - pressedCell Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def clickRow(self,rowNum):
try:
self.selectedRows()
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - clickRow Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - clickRow Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def clickAllRows(self):
try:
self.protectionLevelMapTable.selectAll()
self.selectedRows()
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - clickAllRows Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - clickAllRows Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def selectedRows(self):
try:
modelIndex = self.protectionLevelMapTable.selectionModel().selectedRows()
if modelIndex:
self.multiMapToolButton.setEnabled(True)
else:
self.multiMapToolButton.setEnabled(False)
self.multiChange = []
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - selectedRows Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - selectedRows Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
@QtCore.pyqtSlot()
def multiMapDialog(self):
try:
modelIndex = self.protectionLevelMapTable.selectionModel().selectedRows();
self.multiChange = [index.row() for index in modelIndex]
levels = [str(level.protectionLevel) for level in self.planHeatDMM.data.protectionLevels]
dialog = MultiMapDialog(self.planHeatDMM,levels)
dialog.show()
dialogResult = dialog.exec_()
if dialogResult == QtWidgets.QDialog.Accepted:
for rowPosition in self.multiChange:
combo = self.protectionLevelMapTable.cellWidget(rowPosition,1)
combo.setCurrentText(self.planHeatDMM.data.listSelectedValue)
self.planHeatDMM.data.listSelectedValue = None
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - multiMapDialog Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - multiMapDialog Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def eventFilter(self, widget, event):
""" Manage lost focus event on QTablewidget, avoiding deselect rows """
try:
if event.type() == QEvent.FocusOut:return True
return QtWidgets.QDialog.eventFilter(self, widget, event)
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - eventFilter Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - eventFilter Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def resizeEvent(self, event):
try:
#print(event.size().width())
self.protectionLevelMapTable.setGeometry(55,20,event.size().width()-70,240)
#self.buildUseMapTable.resizeColumnsToContents()
return QtWidgets.QDialog.resizeEvent(self,event)
except:
self.planHeatDMM.resources.log.write_log("ERROR","ProtectionLevelMapDialog - resizeEvent Unexpected error:" + str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
showErrordialog(self.planHeatDMM.dlg,"ProtectionLevelMapDialog - resizeEvent Unexpected error:",str(sys.exc_info()[0]) + " " + str(sys.exc_info()[1]))
def setSystemDependantFontSize(self):
font = self.font()
font.setPixelSize(FONT_PIXEL_SIZE)
self.setFont(font)
self.dialoglabels = self.findChildren(QtWidgets.QLabel)
for label in self.dialoglabels:
label.setFont(font) | 2.125 | 2 |
projects/tutorials/pointnav_habitat_rgb_ddppo.py | ehsanik/allenact | 0 | 12768961 | <gh_stars>0
import os
from typing import Dict, Any, List, Optional, Sequence
import gym
import torch
import torch.nn as nn
import torch.optim as optim
from torch.optim.lr_scheduler import LambdaLR
from torchvision import models
from core.algorithms.onpolicy_sync.losses import PPO
from core.algorithms.onpolicy_sync.losses.ppo import PPOConfig
from core.base_abstractions.experiment_config import ExperimentConfig, MachineParams
from core.base_abstractions.preprocessor import (
ResNetPreprocessor,
SensorPreprocessorGraph,
)
from core.base_abstractions.sensor import SensorSuite
from core.base_abstractions.task import TaskSampler
from plugins.habitat_plugin.habitat_constants import (
HABITAT_DATASETS_DIR,
HABITAT_CONFIGS_DIR,
)
from plugins.habitat_plugin.habitat_sensors import (
RGBSensorHabitat,
TargetCoordinatesSensorHabitat,
)
from plugins.habitat_plugin.habitat_task_samplers import PointNavTaskSampler
from plugins.habitat_plugin.habitat_utils import (
construct_env_configs,
get_habitat_config,
)
from plugins.robothor_plugin.robothor_tasks import PointNavTask
from projects.pointnav_baselines.models.point_nav_models import (
ResnetTensorPointNavActorCritic,
)
from utils.experiment_utils import (
Builder,
PipelineStage,
TrainingPipeline,
LinearDecay,
evenly_distribute_count_into_bins,
)
class PointNavHabitatRGBPPOTutorialExperimentConfig(ExperimentConfig):
"""A Point Navigation experiment configuration in Habitat."""
# Task Parameters
MAX_STEPS = 500
REWARD_CONFIG = {
"step_penalty": -0.01,
"goal_success_reward": 10.0,
"failed_stop_reward": 0.0,
"shaping_weight": 1.0,
}
DISTANCE_TO_GOAL = 0.2
# Simulator Parameters
CAMERA_WIDTH = 640
CAMERA_HEIGHT = 480
SCREEN_SIZE = 224
# Training Engine Parameters
ADVANCE_SCENE_ROLLOUT_PERIOD: Optional[int] = None
NUM_PROCESSES = max(5 * torch.cuda.device_count() - 1, 4)
TRAINING_GPUS = list(range(torch.cuda.device_count()))
VALIDATION_GPUS = [torch.cuda.device_count() - 1]
TESTING_GPUS = [torch.cuda.device_count() - 1]
task_data_dir_template = os.path.join(
HABITAT_DATASETS_DIR, "pointnav/gibson/v1/{}/{}.json.gz"
)
TRAIN_SCENES = task_data_dir_template.format(*(["train"] * 2))
VALID_SCENES = task_data_dir_template.format(*(["val"] * 2))
TEST_SCENES = task_data_dir_template.format(*(["test"] * 2))
CONFIG = get_habitat_config(
os.path.join(HABITAT_CONFIGS_DIR, "tasks/pointnav_gibson.yaml")
)
CONFIG.defrost()
CONFIG.NUM_PROCESSES = NUM_PROCESSES
CONFIG.SIMULATOR_GPU_IDS = TRAINING_GPUS
CONFIG.DATASET.SCENES_DIR = "habitat/habitat-api/data/scene_datasets/"
CONFIG.DATASET.POINTNAVV1.CONTENT_SCENES = ["*"]
CONFIG.DATASET.DATA_PATH = TRAIN_SCENES
CONFIG.SIMULATOR.AGENT_0.SENSORS = ["RGB_SENSOR"]
CONFIG.SIMULATOR.RGB_SENSOR.WIDTH = CAMERA_WIDTH
CONFIG.SIMULATOR.RGB_SENSOR.HEIGHT = CAMERA_HEIGHT
CONFIG.SIMULATOR.TURN_ANGLE = 30
CONFIG.SIMULATOR.FORWARD_STEP_SIZE = 0.25
CONFIG.ENVIRONMENT.MAX_EPISODE_STEPS = MAX_STEPS
CONFIG.TASK.TYPE = "Nav-v0"
CONFIG.TASK.SUCCESS_DISTANCE = DISTANCE_TO_GOAL
CONFIG.TASK.SENSORS = ["POINTGOAL_WITH_GPS_COMPASS_SENSOR"]
CONFIG.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.GOAL_FORMAT = "POLAR"
CONFIG.TASK.POINTGOAL_WITH_GPS_COMPASS_SENSOR.DIMENSIONALITY = 2
CONFIG.TASK.GOAL_SENSOR_UUID = "pointgoal_with_gps_compass"
CONFIG.TASK.MEASUREMENTS = ["DISTANCE_TO_GOAL", "SUCCESS", "SPL"]
CONFIG.TASK.SPL.TYPE = "SPL"
CONFIG.TASK.SPL.SUCCESS_DISTANCE = DISTANCE_TO_GOAL
CONFIG.TASK.SUCCESS.SUCCESS_DISTANCE = DISTANCE_TO_GOAL
CONFIG.MODE = "train"
SENSORS = [
RGBSensorHabitat(
height=SCREEN_SIZE, width=SCREEN_SIZE, use_resnet_normalization=True,
),
TargetCoordinatesSensorHabitat(coordinate_dims=2),
]
PREPROCESSORS = [
Builder(
ResNetPreprocessor,
{
"input_height": SCREEN_SIZE,
"input_width": SCREEN_SIZE,
"output_width": 7,
"output_height": 7,
"output_dims": 512,
"pool": False,
"torchvision_resnet_model": models.resnet18,
"input_uuids": ["rgb_lowres"],
"output_uuid": "rgb_resnet",
},
),
]
OBSERVATIONS = [
"rgb_resnet",
"target_coordinates_ind",
]
TRAIN_CONFIGS = construct_env_configs(CONFIG)
@classmethod
def tag(cls):
return "PointNavHabitatRGBPPO"
@classmethod
def training_pipeline(cls, **kwargs):
ppo_steps = int(250000000)
lr = 3e-4
num_mini_batch = 1
update_repeats = 3
num_steps = 30
save_interval = 5000000
log_interval = 10000
gamma = 0.99
use_gae = True
gae_lambda = 0.95
max_grad_norm = 0.5
return TrainingPipeline(
save_interval=save_interval,
metric_accumulate_interval=log_interval,
optimizer_builder=Builder(optim.Adam, dict(lr=lr)),
num_mini_batch=num_mini_batch,
update_repeats=update_repeats,
max_grad_norm=max_grad_norm,
num_steps=num_steps,
named_losses={"ppo_loss": PPO(**PPOConfig)},
gamma=gamma,
use_gae=use_gae,
gae_lambda=gae_lambda,
advance_scene_rollout_period=cls.ADVANCE_SCENE_ROLLOUT_PERIOD,
pipeline_stages=[
PipelineStage(loss_names=["ppo_loss"], max_stage_steps=ppo_steps)
],
lr_scheduler_builder=Builder(
LambdaLR, {"lr_lambda": LinearDecay(steps=ppo_steps)}
),
)
def machine_params(self, mode="train", **kwargs):
if mode == "train":
workers_per_device = 1
gpu_ids = (
[]
if not torch.cuda.is_available()
else self.TRAINING_GPUS * workers_per_device
)
nprocesses = (
1
if not torch.cuda.is_available()
else evenly_distribute_count_into_bins(self.NUM_PROCESSES, len(gpu_ids))
)
elif mode == "valid":
nprocesses = 1
gpu_ids = [] if not torch.cuda.is_available() else self.VALIDATION_GPUS
elif mode == "test":
nprocesses = 1
gpu_ids = [] if not torch.cuda.is_available() else self.TESTING_GPUS
else:
raise NotImplementedError("mode must be 'train', 'valid', or 'test'.")
sensor_preprocessor_graph = (
SensorPreprocessorGraph(
source_observation_spaces=SensorSuite(self.SENSORS).observation_spaces,
preprocessors=self.PREPROCESSORS,
)
if mode == "train"
or (
(isinstance(nprocesses, int) and nprocesses > 0)
or (isinstance(nprocesses, Sequence) and sum(nprocesses) > 0)
)
else None
)
return MachineParams(
nprocesses=nprocesses,
devices=gpu_ids,
sensor_preprocessor_graph=sensor_preprocessor_graph,
)
# Define Model
@classmethod
def create_model(cls, **kwargs) -> nn.Module:
return ResnetTensorPointNavActorCritic(
action_space=gym.spaces.Discrete(len(PointNavTask.class_action_names())),
observation_space=kwargs["sensor_preprocessor_graph"].observation_spaces,
goal_sensor_uuid="target_coordinates_ind",
rgb_resnet_preprocessor_uuid="rgb_resnet",
hidden_size=512,
goal_dims=32,
)
# Define Task Sampler
@classmethod
def make_sampler_fn(cls, **kwargs) -> TaskSampler:
return PointNavTaskSampler(**kwargs)
def train_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
config = self.TRAIN_CONFIGS[process_ind]
return {
"env_config": config,
"max_steps": self.MAX_STEPS,
"sensors": self.SENSORS,
"action_space": gym.spaces.Discrete(len(PointNavTask.class_action_names())),
"distance_to_goal": self.DISTANCE_TO_GOAL, # type:ignore
}
def valid_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
config = self.CONFIG.clone()
config.defrost()
config.DATASET.DATA_PATH = self.VALID_SCENES
config.MODE = "validate"
config.freeze()
return {
"env_config": config,
"max_steps": self.MAX_STEPS,
"sensors": self.SENSORS,
"action_space": gym.spaces.Discrete(len(PointNavTask.class_action_names())),
"distance_to_goal": self.DISTANCE_TO_GOAL, # type:ignore
}
def test_task_sampler_args(
self,
process_ind: int,
total_processes: int,
devices: Optional[List[int]] = None,
seeds: Optional[List[int]] = None,
deterministic_cudnn: bool = False,
) -> Dict[str, Any]:
raise NotImplementedError("Testing not implemented for this tutorial.")
| 1.851563 | 2 |
Array/387FirstUniqueCharacterString.py | john-the-dev/leetcode | 0 | 12768962 | <gh_stars>0
# 387. First Unique Character in a String
'''
Given a string, find the first non-repeating character in it and return its index. If it doesn't exist, return -1.
Examples:
s = "leetcode"
return 0.
s = "loveleetcode"
return 2.
Note: You may assume the string contains only lowercase English letters.
'''
from common import *
from collections import defaultdict
class Solution:
'''
Hash map to record # of times a character appear. Then find the first one whose count is 1.
O(n) runtime, O(1) storage (there are constant # of characters).
Beat 57% runtime, 43% storage of all Leetcode submissions.
'''
def firstUniqChar(self, s: str) -> int:
h = defaultdict(int)
for c in s:
h[c] += 1
for i,c in enumerate(s):
if h[c] == 1: return i
return -1
# Tests.
assert(Solution().firstUniqChar("leetcode") == 0)
assert(Solution().firstUniqChar("loveleetcode") == 2)
assert(Solution().firstUniqChar("etet") == -1)
| 3.71875 | 4 |
try1/libs/__init__.py | kiranscaria/structure_knowledge_distillation | 0 | 12768963 | <gh_stars>0
from .bn import ABN
from .misc import GlobalAvgPool2d
from .residual import IdentityResidualBlock
from .dense import DenseModule
| 1.109375 | 1 |
src/dbxdeploy/deploy/WorkingDirFactory.py | DataSentics/dbx-deploy | 0 | 12768964 | import os
from pathlib import Path
class WorkingDirFactory:
def create(self):
return Path(os.getcwd())
| 2.21875 | 2 |
ui_automation_core/unit_test/logging_test.py | Harshavardhanchowdary/python-ui-testing-automation | 0 | 12768965 | import os
from unittest import TestCase
from unittest.mock import patch
from ui_automation_core.utilities.log_utils import LogUtils
class LoggingTest(TestCase):
def test_LogUtils_is_instantiate(self):
log_dir = '../logs/'
log_util = LogUtils(log_dir)
self.assertEqual('An instance of LogUtils is created and `logs` folder is created.', str(log_util))
@patch('builtins.print')
def test_create_log_directory(self, mock_print):
"""
Tests whether `Logs` directory gets created if it does not exist or skips execution if the directory exists.
:param mock_print:
:return:
"""
is_log_folder_exists = os.path.isdir('../logs/')
log_dir = '../logs/'
LogUtils(log_dir).create_log_directory()
if is_log_folder_exists:
mock_print.assert_called_with('Directory ../logs/ already exists')
else:
mock_print.assert_called_with('Directory ../logs/ Created')
| 2.765625 | 3 |
train.py | vincentyexiang/AI_Make_Music | 1 | 12768966 | # -*- coding: UTF-8 -*-
"""
训练神经网络,将参数(Weight)存入 HDF5 文件
"""
import numpy as np
import tensorflow as tf
from utils import *
from network import *
"""
==== 一些术语的概念 ====
# Batch size : 批次(样本)数目。一次迭代(Forword 运算(用于得到损失函数)以及 BackPropagation 运算(用于更新神经网络参数))所用的样本数目。Batch size 越大,所需的内存就越大
# Iteration : 迭代。每一次迭代更新一次权重(网络参数),每一次权重更新需要 Batch size 个数据进行 Forward 运算,再进行 BP 运算
# Epoch : 纪元/时代。所有的训练样本完成一次迭代
# 假如 : 训练集有 1000 个样本,Batch_size=10
# 那么 : 训练完整个样本集需要: 100 次 Iteration,1 个 Epoch
# 但一般我们都不止训练一个 Epoch
"""
# 训练神经网络
def train():
notes = get_notes()
# 得到所有不重复(因为用了 set)的音调数目
num_pitch = len(set(notes))
network_input, network_output = prepare_sequences(notes, num_pitch)
model = network_model(network_input, num_pitch)
filepath = "weights-{epoch:02d}-{loss:.4f}.hdf5"
# 用 Checkpoint(检查点)文件在每一个 Epoch 结束时保存模型的参数(Weights)
# 不怕训练过程中丢失模型参数。可以在我们对 Loss(损失)满意了的时候随时停止训练
checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath, # 保存的文件路径
monitor='loss', # 监控的对象是 损失(loss)
verbose=0,
save_best_only=True, # 不替换最近的数值最佳的监控对象的文件
mode='min' # 取损失最小的
)
callbacks_list = [checkpoint]
# 用 fit 方法来训练模型
model.fit(network_input, network_output, epochs=100, batch_size=64, callbacks=callbacks_list)
def prepare_sequences(notes, num_pitch):
"""
为神经网络准备好供训练的序列
"""
sequence_length = 100 # 序列长度
# 得到所有音调的名字
pitch_names = sorted(set(item for item in notes))
# 创建一个字典,用于映射 音调 和 整数
pitch_to_int = dict((pitch, num) for num, pitch in enumerate(pitch_names))
# 创建神经网络的输入序列和输出序列
network_input = []
network_output = []
for i in range(0, len(notes) - sequence_length, 1):
sequence_in = notes[i: i + sequence_length]
sequence_out = notes[i + sequence_length]
network_input.append([pitch_to_int[char] for char in sequence_in])
network_output.append(pitch_to_int[sequence_out])
n_patterns = len(network_input)
# 将输入的形状转换成神经网络模型可以接受的
network_input = np.reshape(network_input, (n_patterns, sequence_length, 1))
# 将 输入 标准化 / 归一化
# 归一话可以让之后的优化器(optimizer)更快更好地找到误差最小值
network_input = network_input / float(num_pitch)
# 将期望输出转换成 {0, 1} 组成的布尔矩阵,为了配合 categorical_crossentropy 误差算法使用
network_output = tf.keras.utils.to_categorical(network_output)
return network_input, network_output
if __name__ == '__main__':
train()
| 2.625 | 3 |
tests/integration/test_project_view.py | alexcfaber/katka-core | 1 | 12768967 | from uuid import UUID
import pytest
from katka import models
@pytest.mark.django_db
class TestProjectViewSet:
def test_list(self, client, logged_in_user, my_team, my_project):
response = client.get("/projects/")
assert response.status_code == 200
parsed = response.json()
assert len(parsed) == 1
assert parsed[0]["name"] == "Project D"
assert parsed[0]["slug"] == "PRJD"
parsed_team = parsed[0]["team"]
assert UUID(parsed_team) == my_team.public_identifier
def test_filtered_list(self, client, logged_in_user, my_team, my_project, my_other_team, my_other_project):
response = client.get("/projects/?team=" + str(my_other_team.public_identifier))
assert response.status_code == 200
parsed = response.json()
assert len(parsed) == 1
assert parsed[0]["name"] == "Project 2"
assert parsed[0]["slug"] == "PRJ2"
parsed_team = parsed[0]["team"]
assert UUID(parsed_team) == my_other_team.public_identifier
def test_filtered_list_non_existing_team(
self, client, logged_in_user, my_team, my_project, my_other_team, my_other_project
):
response = client.get("/applications/?project=12345678-1234-5678-1234-567812345678")
assert response.status_code == 200
parsed = response.json()
assert len(parsed) == 0
def test_list_excludes_inactive(self, client, logged_in_user, my_team, deactivated_project):
response = client.get("/projects/")
assert response.status_code == 200
parsed = response.json()
assert len(parsed) == 0
def test_get(self, client, logged_in_user, my_team, my_project):
response = client.get(f"/projects/{my_project.public_identifier}/")
assert response.status_code == 200
parsed = response.json()
assert parsed["name"] == "Project D"
assert parsed["slug"] == "PRJD"
assert UUID(parsed["team"]) == my_team.public_identifier
def test_get_excludes_inactive(self, client, logged_in_user, my_team, deactivated_project):
response = client.get(f"/projects/{deactivated_project.public_identifier}/")
assert response.status_code == 404
def test_delete(self, client, logged_in_user, my_team, my_project):
response = client.delete(f"/projects/{my_project.public_identifier}/")
assert response.status_code == 204
p = models.Project.objects.get(pk=my_project.public_identifier)
assert p.deleted is True
def test_update(self, client, logged_in_user, my_team, my_project):
url = f"/projects/{my_project.public_identifier}/"
data = {"name": "Project X", "slug": "PRJX", "team": my_team.public_identifier}
response = client.put(url, data, content_type="application/json")
assert response.status_code == 200
p = models.Project.objects.get(pk=my_project.public_identifier)
assert p.name == "Project X"
def test_update_deactivated_team(self, client, logged_in_user, deactivated_team, my_project):
url = f"/projects/{my_project.public_identifier}/"
data = {"name": "Project X", "slug": "PRJX", "team": deactivated_team.public_identifier}
response = client.put(url, data, content_type="application/json")
assert response.status_code == 403
def test_update_nonexistent_team(self, client, logged_in_user, my_project):
url = f"/projects/{my_project.public_identifier}/"
data = {"name": "Project X", "slug": "PRJX", "team": "00000000-0000-0000-0000-000000000000"}
response = client.put(url, data, content_type="application/json")
assert response.status_code == 403
def test_partial_update(self, client, logged_in_user, my_team, my_project):
url = f"/projects/{my_project.public_identifier}/"
data = {"name": "Project X"}
response = client.patch(url, data, content_type="application/json")
assert response.status_code == 200
p = models.Project.objects.get(pk=my_project.public_identifier)
assert p.name == "Project X"
def test_create(self, client, logged_in_user, my_team, my_project):
before = models.Project.objects.count()
url = f"/projects/"
data = {"name": "Project X", "slug": "PRJX", "team": my_team.public_identifier}
response = client.post(url, data=data, content_type="application/json")
assert response.status_code == 201
p = models.Project.objects.get(name="Project X")
assert p.name == "Project X"
assert models.Project.objects.count() == before + 1
| 2.4375 | 2 |
opac/apps.py | rimphyd/Django-OPAC | 1 | 12768968 | <reponame>rimphyd/Django-OPAC
from django.apps import AppConfig
class OpacConfig(AppConfig):
name = 'opac'
| 1.257813 | 1 |
src/polkadot/globber.py | brian-l/polkadot | 3 | 12768969 | <reponame>brian-l/polkadot
import fnmatch
import os
def glob(path, base):
"""
glob.glob ignores hidden files. this one allows them, and matches against the relative path provided in the config.
:param str path: string or wildcard that represents one or more files
:param str base: base directory to run the relative path search in
:returns: generator of filenames in the current directory that matched
"""
for root, dirs, files in os.walk(base):
for fname in files:
full = os.path.normpath(os.path.join(root, fname).replace(base, '.'))
if fnmatch.fnmatch(full, path):
yield full
| 3.296875 | 3 |
pytgbot/webhook.py | annihilatorrrr/pytgbot | 52 | 12768970 | # -*- coding: utf-8 -*-
from luckydonaldUtils.logger import logging
from pytgbot.bot import Bot
from pytgbot.exceptions import TgApiServerException, TgApiParseException
__author__ = 'luckydonald'
logger = logging.getLogger(__name__)
class Webhook(Bot):
"""
Subclass of Bot, will be returned of a sucessful webhook setting.
Differs with the normal Bot class, as the sending function stores the result to send,
so you can actually get that and return the data on your incomming message.
"""
stored_request = None
def _prepare_request(self, command, query):
"""
:param command: The Url command parameter
:type command: str
:param query: will get json encoded.
:type query: dict
:return:
"""
from luckydonaldUtils.encoding import to_native as n
from pytgbot.api_types.sendable import Sendable
from pytgbot.api_types import as_array
from DictObject import DictObject
import json
params = {}
for key in query.keys():
element = query[key]
if element is not None:
if isinstance(element, Sendable):
params[key] = json.dumps(as_array(element))
else:
params[key] = element
url = self._base_url.format(api_key=n(self.api_key), command=n(command))
return DictObject(url=url, params=params)
# end def
def _do_request(self, url, params=None, files=None, use_long_polling=None, request_timeout=None):
"""
:param url: The complete url to send to
:type url: str
:keyword params: Parameter for that connection
:keyword files: Optional files parameters
:keyword use_long_polling: if it should use long polling.
(see http://docs.python-requests.org/en/latest/api/#requests.Response.iter_content)
:type use_long_polling: bool
:keyword request_timeout: When the request should time out.
:type request_timeout: int
:return: json data received
:rtype: DictObject.DictObject
"""
import requests
r = requests.post(url, params=params, files=files, stream=use_long_polling,
verify=True, timeout=request_timeout)
# No self signed certificates. Telegram should be trustworthy anyway...
from DictObject import DictObject
try:
logger.debug("Response: {}".format(r.json()))
json_data = DictObject.objectify(r.json())
except Exception:
logger.exception("Parsing answer failed.\nRequest: {r!s}\nContent: {r.content}".format(r=r))
raise
# end if
json_data["response"] = r # TODO: does this failes on json lists? Does TG does that?
return json_data
# end def
def _process_response(self, json_data):
# TG should always return an dict, with at least a status or something.
if self.return_python_objects:
if json_data.ok != True:
raise TgApiServerException(
error_code=json_data.error_code if "error_code" in json_data else None,
response=json_data.response if "response" in json_data else None,
description=json_data.description if "description" in json_data else None,
request=r.request
)
# end if not ok
if "result" not in json_data:
raise TgApiParseException('Key "result" is missing.')
# end if no result
return json_data.result
# end if return_python_objects
return json_data
# end def
def do(self, command, files=None, use_long_polling=False, request_timeout=None, **query):
"""
Send a request to the api.
If the bot is set to return the json objects, it will look like this:
```json
{
"ok": bool,
"result": {...},
# optionally present:
"description": "human-readable description of the result",
"error_code": int
}
```
:param command: The Url command parameter
:type command: str
:keyword request_timeout: When the request should time out.
:type request_timeout: int
:keyword files: if it needs to send files.
:keyword use_long_polling: if it should use long polling.
(see http://docs.python-requests.org/en/latest/api/#requests.Response.iter_content)
:type use_long_polling: bool
:param query: will get json encoded.
:return: The json response from the server, or, if `self.return_python_objects` is `True`, a parsed return type.
:rtype: DictObject.DictObject | pytgbot.api_types.receivable.Receivable
"""
params = self._prepare_request(command, query)
r = self._do_request(
params.url, params=params.params,
files=files, stream=use_long_polling, timeout=request_timeout
)
return self._process_response(r)
# end def do
| 2.578125 | 3 |
lib.py | tekton/redis_browser | 0 | 12768971 | import base64
from simple_redis_conn import r
import json
# from simple_redis_conn import w
# connect to redis
# scan for all the things to create an in-memory DB of things
# go through all the keys and get their types
# for a key, check its type- don't have it? get it!
# # based on the type get the stuff...
# # if we can send the type, even better!
# /keys/:key(?type=<type>) && other information
# get hash
# ...try to convert all the things inside to json objects if possible
def recursive_redis_scan(scan_val, rtn_dict={}):
print scan_val[0]
if len(scan_val) > 1:
for k in scan_val[1]:
rtn_dict[base64.urlsafe_b64encode(k)] = k
if scan_val[0] > 0:
rtn_dict = recursive_redis_scan(r.scan(scan_val[0]), rtn_dict)
return rtn_dict
def get_redis_keys():
rtn_dict = recursive_redis_scan(r.scan()) # just send the first scan that goes on
return json.dumps(rtn_dict)
def decode_key(key):
key = base64.b64decode(key)
return key
def get_redis_key_data(k, start=None, end=None):
key = base64.b64decode(k)
key_type = r.type(key)
state_dict = {
"hash": {"function": r.hgetall},
"list": {"function": r.llen},
"string": {"function": r.get},
"set": {"function": r.smembers},
}
if key_type == "set":
rtn_dict = {}
rtn_dict["set_vals"] = list(state_dict[key_type]["function"](key))
else:
rtn_dict = state_dict[key_type]["function"](key)
rtn_dict["redis_key"] = key
return json.dumps(rtn_dict)
| 2.796875 | 3 |
hammer/clienttype.py | usd-yamazaki/chainhammer | 0 | 12768972 | <gh_stars>0
#!/usr/bin/env python3
"""
@summary: Which client type do we have?
quorum-raft/ibft OR energyweb OR parity OR geth OR ...
@version: v43 (16/December/2018)
@since: 29/May/2018
@organization:
@author: https://github.com/drandreaskrueger
@see: https://github.com/drandreaskrueger/chainhammer for updates
"""
################
## Dependencies:
from pprint import pprint
import hammer
try:
from web3 import Web3, HTTPProvider # pip3 install web3
except:
print ("Dependencies unavailable. Start virtualenv first!")
exit()
# extend sys.path for imports:
if __name__ == '__main__' and __package__ is None:
from os import sys, path
sys.path.append(path.dirname(path.dirname(path.abspath(__file__))))
from hammer.config import RPCaddress
class Error(Exception):
pass
class MethodNotExistentError(Error):
pass
def curl_post(method, txParameters=None, RPCaddress=RPCaddress, ifPrint=False):
"""
call Ethereum RPC functions that are still missing from web3.py
see
https://github.com/jpmorganchase/quorum/issues/369#issuecomment-392240389
"""
payload= {"jsonrpc" : "2.0",
"method" : method,
"id" : 1}
if txParameters:
payload["params"] = [txParameters]
headers = {'Content-type' : 'application/json'}
response = hammer.clienttools.postRpc(RPCaddress, json=payload, headers=headers)
response_json = response.json()
if ifPrint:
print('raw json response: {}'.format(response_json))
if "error" in response_json:
raise MethodNotExistentError()
else:
return response_json['result']
def clientTypeWarnings(nodeName, nodeType, nodeVersion, consensus, networkId, chainName, chainId):
if nodeName=="TestRPC":
print ("WARN: TestRPC has odd timestamp units, check 'tps.timestampToSeconds()' for updates")
if consensus=="raft":
print ("WARN: raft consensus did report timestamps in nanoseconds. Is that still the case?")
def clientType(w3):
"""
figure out which client (quorum, parity, geth, energyweb, etc.),
which client type (fork of geth, or fork of parity),
which consensus algorithm (e.g. RAFT, IBFT, aura, clique),
and networkId, and chainId, and chainName.
Sorry, very ugly, and probably faulty too, and for sure will break some day.
The fractions of the Ethereum world seem to have unsolved standardisation issues.
See github issues
* https://github.com/jpmorganchase/quorum/issues/505
* https://github.com/jpmorganchase/quorum/issues/507
* https://github.com/paritytech/parity-ethereum/issues/9432
"""
consensus = "???"
chainName = "???"
networkId = -1
chainId = -1
try:
answer = curl_post(method="net_version")
networkId = int(answer)
except MethodNotExistentError:
pass
# How to detect raft consensus?
# Unfortunately this fails with /quorum-example/7nodes
# because they forgot to open the RPC api "raft"
# see issues
#
#
try:
answer = curl_post(method="raft_role") # , ifPrint=True)
if answer:
consensus = "raft"
except MethodNotExistentError:
pass
# IBFT consensus?
# There is a specific answer, just in an unusual place; see issue
# https://github.com/jpmorganchase/quorum/issues/505
try:
answer = curl_post(method="admin_nodeInfo")
if 'istanbul' in answer.get('protocols', {}).keys():
consensus = "istanbul"
except:
pass
# Geth / Parity / Energy Web:
nodeString = w3.version.node
nodeName = nodeString.split("/")[0]
known = ("Geth", "Parity", "Parity-Ethereum", "OpenEthereum", "Energy Web", "TestRPC")
if nodeName not in known:
print ("Interesting, '%s', a new node type? '%s'" % (nodeName, nodeString))
if nodeName == "Parity-Ethereum":
nodeName = "Parity"
nodeVersion = nodeString.split("/")[1]
if nodeName == "Parity":
# see issue https://github.com/paritytech/parity-ethereum/issues/10215
nodeVersion = nodeString.split("/")[2]
# Quorum pretends to be Geth - so how to distinguish vanillaGeth from QuorumGeth?
# - see https://github.com/jpmorganchase /quorum/issues/507
nodeType = nodeName
if consensus in ('raft', 'istanbul'):
# TODO: Because raft RPC is not open in example (see above), this can
# still not distinguish between vanilla geth, and quorum RAFT.
nodeName = "Quorum"
if nodeName == "Energy Web":
nodeType = "Parity"
consensus = "PoA" # Dangerous assumption. TODO: ... after they took care of the open issues, this gets easier.
if nodeName == "OpenEthereum":
nodeType = "Parity"
if nodeType=="Parity":
try:
chainName = curl_post(method="parity_chain") # foundation, tobalaba
if chainName=="foundation":
consensus = "PoW" # dangerous assumption, because some day that might actually change. For now fine.
except MethodNotExistentError:
pass
try:
answer = curl_post(method="parity_chainId")
try:
chainId = int(answer, 16)
except TypeError:
chainId = -1
except MethodNotExistentError:
pass
if nodeName=="Geth":
# TODO: This can still not distinguish between vanilla geth, and quorum RAFT.
try:
answer = curl_post(method="admin_nodeInfo")
answer_config = answer['protocols']['eth'].get('config', None)
if answer_config:
if "clique" in answer_config:
consensus="clique"
if "ethash" in answer_config:
consensus="ethash"
chainId = answer_config.get('chainId', None)
# TODO:
# Does geth also have a concept of chainName (e.g. for Morden/Ropsten/...)? How to query?
# chainName = curl_post(method="net_version") #
except MethodNotExistentError:
pass
clientTypeWarnings(nodeName, nodeType, nodeVersion, consensus, networkId, chainName, chainId)
return nodeName, nodeType, nodeVersion, consensus, networkId, chainName, chainId
def run_clientType(w3):
"""
test the above
"""
nodeName, nodeType, nodeVersion, consensus, networkId, chainName, chainId = clientType(w3)
txt = "nodeName: %s, nodeType: %s, nodeVersion: %s, consensus: %s, network: %s, chainName: %s, chainId: %s"
print ( txt % (nodeName, nodeType, nodeVersion, consensus, networkId, chainName, chainId))
def justTryingOutDifferentThings(ifPrint=False):
"""
perhaps these calls can help, or a combination thereof?
also see
https://github.com/jpmorganchase/quorum/blob/3d91976f08074c1f7f605beaadf4b37783026d85/internal/web3ext/web3ext.go#L600-L671
"""
for method in ("web3_clientVersion", "admin_nodeInfo", "net_version", "rpc_modules",
"parity_chainId", "parity_chain", "parity_consensusCapability",
"parity_nodeKind", "parity_versionInfo", "eth_chainId"):
print ("\n%s:" % method)
try:
pprint ( curl_post(method=method, ifPrint=ifPrint) )
except:
pass
def simple_web3connection(RPCaddress):
"""
get a web3 object.
simple, just for this demo here,
do not use elsewhere, instead use clienttools.start_web3connection
"""
w3 = Web3(HTTPProvider(RPCaddress, request_kwargs={'timeout': 120}))
print ("web3 connection established, blockNumber =", w3.eth.blockNumber, end=", ")
print ("node version string = ", w3.version.node)
return w3
if __name__ == '__main__':
w3 = simple_web3connection(RPCaddress=RPCaddress)
run_clientType(w3)
print()
justTryingOutDifferentThings() # ifPrint=True)
| 2.15625 | 2 |
parseepo/validate.py | cverluise/parseEPO | 0 | 12768973 | <gh_stars>0
from parseepo.exception import SingleAttrException, RowException
def single_attr(val, attr, publication_number):
if len(val) == 1: # Temporary check, might be discarded if no error
pass
else:
raise SingleAttrException(
f"{publication_number}: Number of values exceed expected number "
f"of values for Attr {attr}"
)
def row(row_):
if len(row_) == 7:
pass
else:
raise RowException(
f"Length of row different from expected length for row {row_}"
)
| 2.84375 | 3 |
built-in/PyTorch/Official/cv/image_classification/Shufflenetv2_for_PyTorch/prof_demo.py | Ascend/modelzoo | 12 | 12768974 | <reponame>Ascend/modelzoo<gh_stars>10-100
# -*- coding: utf-8 -*-
# Copyright 2020 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""prof_demo.py
"""
import torch
import torch.nn as nn
import torch.optim as optim
import time
import argparse
def build_model():
# 请自定义模型并加载预训练模型
from models import shufflenet_v2_x1_0
model = shufflenet_v2_x1_0()
return model
def get_raw_data():
input_tensor = torch.randn(2, 3, 224, 224)
return input_tensor
def criterion(x, y=None):
return x.sum()
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='PyTorch Prof')
parser.add_argument('--device', type=str, default='cpu',
help='set which type of device used. Support cpu, cuda:0(device_id), npu:0(device_id).')
parser.add_argument('--amp', default=False, action='store_true',
help='use amp during prof')
parser.add_argument('--loss-scale', default=64.0, type=float,
help='loss scale using in amp, default 64.0, -1 means dynamic')
parser.add_argument('--opt-level', default='O2', type=str,
help='opt-level using in amp, default O2')
parser.add_argument('--FusedSGD', default=False, action='store_true',
help='use FusedSGD during prof')
args = parser.parse_args()
# 1.准备工作
if args.device.startswith('cuda'):
torch.cuda.set_device(args.device)
prof_kwargs = {'use_cuda': True}
elif args.device.startswith('npu'):
torch.npu.set_device(args.device)
prof_kwargs = {'use_npu': True}
else:
prof_kwargs = {}
if args.amp:
from apex import amp
# 2.构建模型
model = build_model()
if args.FusedSGD and args.device.startswith('npu'):
from apex.optimizers import NpuFusedSGD
optimizer = NpuFusedSGD(model.parameters(), lr=0.01)
else:
optimizer = optim.SGD(model.parameters(), lr=0.01)
model = model.to(args.device)
if args.amp:
model, optimizer = amp.initialize(model, optimizer, opt_level=args.opt_level,
loss_scale=None if args.loss_scale == -1 else args.loss_scale)
# 3.生成input
input_tensor = get_raw_data()
input_tensor = input_tensor.to(args.device)
# 4. 至少先运行一次,保证prof正确
def run():
output_tensor = model(input_tensor)
loss = criterion(output_tensor) # 使用均方误差损失
optimizer.zero_grad()
if args.amp:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
else:
loss.backward()
optimizer.step()
return loss
for i in range(5):
start_time = time.time()
loss = run()
print('iter: %d, loss: %.2f, time: %.2f'%(i, loss, (time.time() - start_time)*1000))
# 4. profiling
with torch.autograd.profiler.profile(**prof_kwargs) as prof:
run()
print(prof.key_averages().table())
prof.export_chrome_trace("pytorch_prof_%s.prof" % args.device + ('_amp' if args.amp else ''))
| 2.078125 | 2 |
main.py | GabrielMissael/solution | 0 | 12768975 | from solution.recollection.get_tweets import get_tweets
from solution.ml.sentiment import AddSentimentAnalysis
from solution.ml.clustering import cluster
from solution.viz.wordclouds import GenWordcloud
from solution.viz.ngrams import ngram
from solution.viz.visualization import scatter
import pandas as pd
import os.path
def process(pais, prioridad):
file = f'data/{pais}/{prioridad}/embs.pkl'
if not os.path.exists(file):
df = cluster(
file = f'data/{pais}/{prioridad}/tweets.json',
verbose=True
)
df.to_pickle(file)
df = AddSentimentAnalysis(file)
df.to_pickle(f'data/{pais}/{prioridad}/sentiment.pkl')
def main():
# Uncomment if data not available
# get_tweets(n = 5000)
paises = ['mexico']
prioridades = ['excelencia_operativa']
# Uncomment to run all the 20 dataset (countries and priorities)
# paises = ['argentina', 'colombia', 'mexico', 'peru', 'spain']
# prioridades = ['crecimiento', 'excelencia_operativa','futuro_sostenible', 'salud_financiera']
for pais in paises:
for prioridad in prioridades:
process(pais, prioridad)
path = f'data/{pais}/{prioridad}/'
print('HEY')
df = pd.read_pickle(path + 'embs.pkl')
GenWordcloud(df, path + 'wordcloud.png')
ngram(df, 2, path + 'ngram.html')
scatter(df, path + 'scatter.html')
df = pd.read_pickle(path + 'sentiment.pkl')
if __name__ == '__main__':
print('HEY')
main() | 2.6875 | 3 |
examples/performance/__main__.py | gujun4990/sqlalchemy | 1 | 12768976 | <gh_stars>1-10
"""Allows the examples/performance package to be run as a script."""
from . import Profiler
if __name__ == '__main__':
Profiler.main()
| 1.171875 | 1 |
installer.py | Marusoftware/Marutools | 0 | 12768977 | #! /usr/bin/python3
# -*- coding: utf-8 -*-
import tkinter
from tkinter import ttk
from tkinter import filedialog
import tkinter.messagebox as tkmsg
import os
import time
import sys
import subprocess
############add APP info here###############
APP_NAME = "marueditor"
APP_NAME_TITLE = "Maruediter"
APP_LUA_JP = [""]
APP_LUA_EN = [""]
APP_TYPE = "32" # 64 , 32 , select
APP_ICON = "./marueditor.png"
APP_VER = "1.0"
APP_COMPANY = "Marusoftare"
############################################
lang = 0
step_v = 0
def bye():
next_step()
root.destroy()
exit()
def next_step():
global step_v, step
step_v = step_v + 1
step.set(step_v)
root = tkinter.Tk(className=APP_NAME+" Installer")
step = tkinter.IntVar()
if os.path.exists(APP_ICON):
root.iconphoto(True, tkinter.PhotoImage(file=APP_ICON))
step.set(0)
lang_v = tkinter.IntVar()
root.title("Language")
root.protocol("WM_DELETE_WINDOW", bye)
l = ttk.Label(root, text="Please select language when use install.")
l.pack(side="top")
b1 = ttk.Button(root, text='NEXT', command=next_step)
b1.pack(side="bottom")
rb1 = ttk.Radiobutton(text = '日本語', variable = lang_v, value = 0)
rb1.pack()
rb2 = ttk.Radiobutton(text = 'English', variable = lang_v, value = 1)
rb2.pack()
root.wait_variable(step)
root.geometry("500x300")
lang = lang_v.get()
lua_v = tkinter.IntVar()
l2 = tkinter.Listbox(root)
l2.pack()
if lang == 0:
root.title(APP_NAME_TITLE + " インストーラ")
rb1.configure(text="許諾", variable = lua_v)
rb2.configure(text="拒否", variable = lua_v)
l.configure(text="下記の利用規約をお読みください。")
for i in range(len(APP_LUA_JP)):
l2.insert("end", APP_LUA_JP[i])
else:
root.title(APP_NAME_TITLE + " Installer")
rb1.configure(text="accept", variable = lua_v)
rb2.configure(text="ignore", variable = lua_v)
l.configure(text="Please read LUA.")
for i in range(len(APP_LUA_EN)):
l2.insert("end", APP_LUA_EN[i])
root.wait_variable(step)
if lua_v.get() == 1:
exit()
l2.destroy()
if APP_TYPE == "select":
type_v = tkinter.IntVar()
if lang == 0:
l.configure(text="バージョンを選択してください。")
else:
l.configure(text="Please select version.")
rb1.configure(text="64bit", variable=type_v)
rb2.configure(text="32bit", variable=type_v)
root.wait_variable(step)
if type_v.get() == 0:
APP_TYPE = "64"
else:
APP_TYPE = "32"
else:
pass
rb1.destroy()
rb2.destroy()
e = ttk.Entry(root)
e.pack()
if os.name == 'nt':
if APP_TYPE == "64":
def_dir = os.environ["ProgramW6432"]
else:
def_dir = os.environ["ProgramFiles(x86)"]
else:
def_dir = "/"
e.insert("0",def_dir)
def select():
e.delete(0,tkinter.END)
e.insert("0",filedialog.askdirectory(initialdir=def_dir))
if e.get() == "":
e.insert("end",def_dir)
if lang == 0:
b2 = ttk.Button(root, text="選択", command=select)
l.configure(text="インストール先を選択してください。")
b1.configure(text="インストール")
else:
b2 = ttk.Button(root, text="Select", command=select)
l.configure(text="Please select install directory.")
b1.configure(text="Install")
b2.pack()
root.wait_variable(step)
i_dir = e.get()
if not os.path.exists(i_dir):
i_dir = def_dir
i_dir = os.path.join(i_dir,APP_NAME)
print(i_dir)
e.destroy()
b2.destroy()
p = ttk.Progressbar(root, orient="h", length=200, mode='determinate', maximum=100, value=0)
p.pack()
if lang == 0:
l.configure(text="インストール中です。")
else:
l.configure(text="Installing.")
b1.config(state="disable")
try:
if not os.path.exists(i_dir):
os.mkdir(i_dir)
p.configure(value=10)
p.update()
except PermissionError:
try:
os.chmod(os.path.dirname(i_dir),777)
os.mkdir(i_dir)
p.configure(value=10)
p.update()
except PermissionError:
if os.name == 'nt':
if lang == 0:
tkmsg.showerror("エラー","権限エラー:\n管理者権限で実行してください。")
exit()
else:
tkmsg.showerror("Error","PermissionError:\nPlease run in administer.")
exit()
else:
if lang == 0:
tkmsg.showerror("エラー","権限エラー:\nroot権限で('sudo'を付けて)再実行してください。")
exit()
else:
tkmsg.showerror("Error","PermissionError:\nPlease run in root.(add 'sudo')")
exit()
############add install command here#########
root.wait_variable(step)
#############################################
p.configure(value=100)
p.update()
if lang == 0:
l.configure(text="完了しました。")
b1.configure(text="終了")
else:
l.configure(text="Done.")
b1.configure(text="Exit")
b1.config(state="active")
root.wait_variable(step)
exit()
root.mainloop()
| 2.578125 | 3 |
menustoolbars/simple_menu.py | gnthibault/PyQt6-Tutorial-Examples | 38 | 12768978 | <gh_stars>10-100
#!/usr/bin/python
"""
ZetCode PyQt6 tutorial
This program creates a menubar. The
menubar has one menu with an exit action.
Author: <NAME>
Website: zetcode.com
"""
import sys
from PyQt6.QtWidgets import QMainWindow, QApplication
from PyQt6.QtGui import QIcon, QAction
class Example(QMainWindow):
def __init__(self):
super().__init__()
self.initUI()
def initUI(self):
exitAct = QAction(QIcon('exit.png'), '&Exit', self)
exitAct.setShortcut('Ctrl+Q')
exitAct.setStatusTip('Exit application')
exitAct.triggered.connect(QApplication.instance().quit)
self.statusBar()
menubar = self.menuBar()
fileMenu = menubar.addMenu('&File')
fileMenu.addAction(exitAct)
self.setGeometry(300, 300, 350, 250)
self.setWindowTitle('Simple menu')
self.show()
def main():
app = QApplication(sys.argv)
ex = Example()
sys.exit(app.exec())
if __name__ == '__main__':
main()
| 3.453125 | 3 |
two_d_nav/tests/test_astar.py | ZikangXiong/two-d-nav-gym | 0 | 12768979 | <filename>two_d_nav/tests/test_astar.py
import numpy as np
from two_d_nav.elements import VelRobot, Cat, create_maze
from two_d_nav.engine import MazeNavigationEngine
from two_d_nav.search import constants as c
from two_d_nav.search.a_star import Agent
from two_d_nav.search.location import Location
from two_d_nav.search.state import State
def define_walls(board, lines):
for l in lines:
x1 = l[1][0]
y1 = l[1][1]
x2 = l[2][0]
y2 = l[2][1]
thickness = l[3]
is_horizontal = y1 == y2
if is_horizontal:
for i in range(thickness):
xval = min(x1, x2)
for x in range(abs(x1 - x2)):
board[y1 + i - 1][xval + x - 1] = c.INFINITY
else:
for i in range(thickness):
yval = min(y1, y2)
for y in range(abs(y1 - y2)):
board[yval + y - 1][x1 + i - 1] = c.INFINITY
return board
def define_obstacles(board, obs_list):
size = c.OBSTACLE_SIZE
for o in obs_list:
x = int(o.init_x)
y = int(o.init_y)
for i in range(x, x + size):
for j in range(y, y + size):
board[j - 1][i - 1] = c.INFINITY
return board
def create_board(lines, obs_list):
board_size = c.BOARD_SIZE
board = np.zeros(shape=(board_size, board_size))
board = define_walls(board, lines)
board = define_obstacles(board, obs_list)
return board, board_size, board_size
def deconstruct_path(path):
actions = []
current = path[0]
for i in range(len(path) - 1):
_next = path[i]
if current.col - c.STEP == _next.col:
actions.append("up")
elif current.col + c.STEP == _next.col:
actions.append("down")
elif current.row - c.STEP == _next.row:
actions.append("left")
elif current.row + c.STEP == _next.row:
actions.append("right")
current = _next
return actions
def run_search(board, board_rows, board_cols):
start = Location(c.START_POS_X, c.START_POS_Y)
goal = Location(c.GOAL_POS_X, c.GOAL_POS_Y)
state = State(board.flatten(), start, board_rows, board_cols)
ag = Agent(state)
path, pathCost = ag.a_star_search(start, goal)
actions = deconstruct_path(path)
return actions
def print_actions(actions):
current = actions[0]
counter = 0
for i in range(len(actions)):
_next = actions[i]
if current == _next:
counter += 1
if current != _next or i == len(actions) - 1:
print("", counter, " ", current)
counter = 1
current = _next
def test_engine():
# See the environment in the envs folder.
_robot = VelRobot(100, 700)
obs1 = Cat(400.0, 600.0)
obs2 = Cat(700.0, 100.0)
obs3 = Cat(300.0, 500.0)
obs4 = Cat(150.0, 200.0)
obs5 = Cat(350.0, 250.0)
obs_list = [obs1, obs2, obs3, obs4, obs5]
_maze = create_maze(1)
board, board_rows, board_cols = create_board(_maze.lines, obs_list)
actions = run_search(board, board_rows, board_cols)
print("\n\nFollowing are the actions to take:\n")
print_actions(actions)
print("\n\n")
eng = MazeNavigationEngine(_robot, obs_list, _maze)
eng.run()
if __name__ == '__main__':
test_engine()
| 2.90625 | 3 |
CalibTracker/SiStripChannelGain/test/Gains_CT_step2.py | PKUfudawei/cmssw | 1 | 12768980 | # Auto generated configuration file
# using:
# Revision: 1.381.2.28
# Source: /local/reps/CMSSW/CMSSW/Configuration/PyReleaseValidation/python/ConfigBuilder.py,v
# with command line options: step4 --data --conditions auto:com10 --scenario pp -s ALCAHARVEST:SiStripGains --filein file:PromptCalibProdSiStripGains.root -n -1 --no_exec
import FWCore.ParameterSet.Config as cms
from FWCore.ParameterSet.VarParsing import VarParsing
options = VarParsing("analysis")
options.register("globalTag", "auto:run3_data_express", VarParsing.multiplicity.singleton, VarParsing.varType.string, "Global tag (express, to check the homogeneity of the calibration range)")
options.register("outputDbFile", "sqlite_file:promptCalibConditions.db", VarParsing.multiplicity.singleton, VarParsing.varType.string, "Connection string of output database")
options.register("fitMethod", "Legacy", VarParsing.multiplicity.singleton, VarParsing.varType.string, "Fit strategy (Legacy, DDRng, DDRngAllConv, or DDRngConvExceptTOBL5L6")
options.register("DQMOutput", False, VarParsing.multiplicity.singleton, VarParsing.varType.bool, "Produce DQM output")
options.parseArguments()
process = cms.Process('ALCAHARVEST')
# import of standard configurations
process.load('Configuration.StandardSequences.Services_cff')
process.load('SimGeneral.HepPDTESSource.pythiapdt_cfi')
process.load('FWCore.MessageService.MessageLogger_cfi')
process.load('Configuration.EventContent.EventContent_cff')
process.load('Configuration.StandardSequences.AlCaHarvesting_cff')
process.load('Configuration.Geometry.GeometryRecoDB_cff')
process.load('Configuration.StandardSequences.MagneticField_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.maxEvents = cms.untracked.PSet(
input = cms.untracked.int32(-1)
)
# Input source
process.source = cms.Source("PoolSource",
secondaryFileNames = cms.untracked.vstring(),
fileNames = cms.untracked.vstring(options.inputFiles),
processingMode = cms.untracked.string('RunsAndLumis')
)
process.options = cms.untracked.PSet(
Rethrow = cms.untracked.vstring('ProductNotFound'),
fileMode = cms.untracked.string('FULLMERGE')
)
# Production Info
process.configurationMetadata = cms.untracked.PSet(
version = cms.untracked.string('$Revision: 1.381.2.28 $'),
annotation = cms.untracked.string('step4 nevts:-1'),
name = cms.untracked.string('PyReleaseValidation')
)
# Output definition
process.load("Configuration.StandardSequences.DQMSaverAtJobEnd_cff") ## multi-run
## temporary workaround
process.load("FWCore.Services.InitRootHandlers_cfi")
process.InitRootHandlers.ResetRootErrHandler = cms.untracked.bool(False)
# Additional output definition
# Other statements
process.PoolDBOutputService.toPut.append(process.ALCAHARVESTSiStripGains_dbOutput)
process.pclMetadataWriter.recordsToMap.append(process.ALCAHARVESTSiStripGains_metadata)
from Configuration.AlCa.GlobalTag import GlobalTag
process.GlobalTag = GlobalTag(process.GlobalTag, options.globalTag, '')
process.PoolDBOutputService.connect = cms.string(options.outputDbFile)
# Path and EndPath definitions
process.ALCAHARVESTDQMSaveAndMetadataWriter = cms.Path(process.dqmSaver+process.pclMetadataWriter)
process.SiStripGains = cms.Path(process.ALCAHARVESTSiStripGains)
process.dqmSaver.saveAtJobEnd = cms.untracked.bool(options.DQMOutput)
if options.outputFile:
process.alcaSiStripGainsHarvester.StoreGainsTree = cms.untracked.bool(True)
process.TFileService = cms.Service("TFileService", fileName = cms.string(options.outputFile))
process.alcaSiStripGainsHarvester.GoodFracForTagProd = cms.untracked.double(.95)
process.alcaSiStripGainsHarvester.NClustersForTagProd = cms.untracked.double(2.e8)
if options.fitMethod == "Legacy":
process.alcaSiStripGainsHarvester.FitDataDrivenRange = cms.untracked.bool(False)
process.alcaSiStripGainsHarvester.FitGaussianConvolution = cms.untracked.bool(False)
process.alcaSiStripGainsHarvester.FitGaussianConvolutionTOBL5L6 = cms.untracked.bool(False)
elif options.fitMethod == "DDRng":
process.alcaSiStripGainsHarvester.FitDataDrivenRange = cms.untracked.bool(True)
process.alcaSiStripGainsHarvester.FitGaussianConvolution = cms.untracked.bool(False)
process.alcaSiStripGainsHarvester.FitGaussianConvolutionTOBL5L6 = cms.untracked.bool(False)
elif options.fitMethod == "DDRngAllConv":
process.alcaSiStripGainsHarvester.FitDataDrivenRange = cms.untracked.bool(True)
process.alcaSiStripGainsHarvester.FitGaussianConvolution = cms.untracked.bool(True)
process.alcaSiStripGainsHarvester.FitGaussianConvolutionTOBL5L6 = cms.untracked.bool(True)
elif options.fitMethod == "DDRngConvExceptTOBL5L6":
process.alcaSiStripGainsHarvester.FitDataDrivenRange = cms.untracked.bool(True)
process.alcaSiStripGainsHarvester.FitGaussianConvolution = cms.untracked.bool(True)
process.alcaSiStripGainsHarvester.FitGaussianConvolutionTOBL5L6 = cms.untracked.bool(False)
else:
raise RuntimeError("Unknown fit method: {0}".format(options.fitMethod))
# Schedule definition
process.schedule = cms.Schedule(process.SiStripGains,
process.ALCAHARVESTDQMSaveAndMetadataWriter)
#process.alcaSiStripGainsHarvester.calibrationMode = cms.untracked.string("IsoBunch")
| 1.539063 | 2 |
Dermis2.py | cheapjack/StasisCraft | 0 | 12768981 | <reponame>cheapjack/StasisCraft<gh_stars>0
#!/usr/bin/python
#Install the modules we need
#from pyfirmata import Arduino, util, INPUT
from mcpi import minecraft
from mcpi import minecraftstuff
from time import sleep
import server
import serial
# Use the command /getpos or F3 in Minecraft client to find out where you are then use those
# x, y, z coordinates to build things
# translate mc coords for mcpi ones
# add this to x
mcx = 177
# - this from y
mcy = 64
# - this from z
mcz = 135
# Connect to the server we use the imported server.py to make it work with CloudMaker
mc = minecraft.Minecraft.create(server.address)
#Post a message to the minecraft chat window
mc.postToChat("Ready to read Dermis Temperature 1!")
dermisfull = False
dermisfull2 = False
dermisfull3 = False
# Text Bubble 1
# use `/js blocktype("My\nMessage", blocktypenumbercode) to build text and note \n represents a new line
def MemoryCloud1(startx,starty,startz, chartwidth, chartheight, chartdepth, blocktype, blockid):
# Main Bubble
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + chartwidth, (starty-mcy) + chartheight, (startz - mcz) + chartdepth, blocktype, blockid)
# inset bottom
mc.setBlocks((startx + mcx) + 1, (starty-mcy) - 1, (startz-mcz), (startx + mcx) + (chartwidth-1), (starty-mcy) -1, (startz - mcz) + chartdepth, blocktype, blockid)
#inset top
mc.setBlocks((startx + mcx) + 1, (starty-mcy) + (chartheight + 1), (startz-mcz), (startx + mcx) + (chartwidth-1), (starty-mcy) + (chartheight + 1), (startz - mcz) + chartdepth, blocktype, blockid)
# define a barchart function
def DermisTemperatureBlock(startx, starty, startz, dermiswidth, dermisheight, blocktype, blockid):
# Make a stage
mc.setBlocks((startx + mcx) - 2, (starty-mcy), (startz-mcz) - 2, (startx + mcx) + (dermiswidth + 2), (starty-mcy), (startz - mcz) + (dermiswidth + 2), blocktype, blockid)
# Make glass walls
mc.setBlocks((startx + mcx) - 1, (starty-mcy), (startz-mcz) - 1, (startx + mcx) + dermiswidth + 1, (starty-mcy) + dermisheight, (startz - mcz) + 1 + dermiswidth, 20)
# Hollow inside of walls
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + dermiswidth, (starty-mcy) + dermisheight, (startz - mcz) + dermiswidth, 0)
#Take off the 'lid'
mc.setBlocks((startx + mcx), (starty-mcy)+dermisheight, (startz-mcz), (startx + mcx) + dermiswidth, (starty-mcy) + dermisheight, (startz - mcz) + (dermiswidth), 0)
# Make an underfloor light
mc.setBlocks((startx + mcx) - 1, (starty-mcy) - 1, (startz-mcz) - 1, (startx + mcx) + dermiswidth + 1, (starty-mcy) - 1, (startz - mcz) + dermiswidth + 1, 89)
mc.setBlocks((startx + mcx), (starty-mcy) - 1, (startz-mcz), (startx + mcx) + dermiswidth, (starty-mcy) - 1, (startz - mcz) + (dermiswidth), blocktype, blockid)
def HairDown(startx, starty, startz, hairheight, blocktype, blockid):
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + hairheight, (starty-mcy), (startz - mcz), blocktype, blockid)
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx), (starty-mcy) + hairheight, (startz - mcz), 0)
def HairUp(startx, starty, startz, hairheight, blocktype, blockid):
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + hairheight, (starty-mcy), (startz - mcz), 0)
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx), (starty-mcy) + hairheight, (startz - mcz), blocktype, blockid)
def VasoDilate(startx, starty, startz, dilation, blocktype, blockid):
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + 20, (starty-mcy), (startz - mcz) - dilation , blocktype, blockid)
def VasoConstrict(startx, starty, startz, dilation, blocktype, blockid):
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx) + 20, (starty-mcy), (startz - mcz) - 4 , 35, 2)
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz)-1, (startx + mcx) + 20, (starty-mcy), (startz - mcz) - dilation , blocktype, blockid)
# Gonna make you sweat
def Sweat(startx, starty, startz, sweatheight, blocktype):
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx), (starty-mcy) + sweatheight, (startz - mcz), blocktype)
def NoSweat(startx, starty, startz, sweatheight, blocktype):
mc.setBlocks((startx + mcx), (starty-mcy), (startz-mcz), (startx + mcx), (starty-mcy) + sweatheight, (startz - mcz), 0)
# Hair Response
def DermisListener1(startx, starty, startz, dermisno):
global dermisfull
#Listen for blocks filling up leve
blockType = mc.getBlock((startx + mcx), (starty - mcy), (startz - mcz), 1)
if blockType != 0:
print "It's really hot in SkinTemp ", dermisno, blockType
sleep(0.5)
#print "1st", dermisfull
#mc.postToChat("WARNING! Dermis Sensor" + str(dermisno) + " is full!")
HairDown(394, 89, -1150, 20, 35, 12)
if not dermisfull:
mc.postToChat("WARNING! Dermis Sensor" + str(dermisno) + " is full!")
mc.postToChat("Triggering dermis Hair response")
mc.postToChat("Hypothalamus you need to reduce temperature!")
mc.postToChat("Hypothalamus, direct your response team!")
sleep(0.5)
HairDown(394, 89, -1150, 20, 35, 12)
sleep(0.5)
dermisfull = True
#print "2nd", dermisfull
else:
if dermisfull:
HairUp(394, 89, -1150, 20, 35, 12)
print "Nothing from " + str(dermisno)
dermisfull = False
#print dermisFull
sleep(5)
# Vascular Dilation
def DermisListener2(startx, starty, startz, dermisno):
global dermisfull2
#Listen for blocks filling up level
blockType = mc.getBlock((startx + mcx), (starty - mcy), (startz - mcz))
if blockType != 0:
print "It's really hot in SkinTemp " , dermisno, blockType
sleep(0.5)
#print "1st", dermisfull2
#mc.postToChat("WARNING! Dermis Sensor" + str(dermisno) + " is full!")
VasoDilate(393, 87, -1161, 4, 35, 14)
#HairUp(394, 89, -1150, 20, 35, 12)
if not dermisfull2:
mc.postToChat("WARNING! Dermis Sensor" + str(dermisno) + " is full!")
mc.postToChat("Hypothalamus you need to reduce temperature!")
sleep(0.5)
VasoDilate(393, 87, -1161, 4, 35, 14)
sleep(0.5)
dermisfull2 = True
#print "2nd", dermisfull2
else:
if dermisfull2:
#HairDown(394, 89, -1150, 20, 35, 12)
VasoConstrict(393, 87, -1161, 2, 35, 14)
print "Nothing from " + str(dermisno)
dermisfull2 = False
#print dermisFull2
sleep(5)
# Build First blocks
# The MemoryCloud needs more x and less z to centre it
# Overwrite Memory Text
#MemoryCloud1(288, 105, -1164, 20, 12, 2, 35, 8)
DermisTemperatureBlock(285, 89, -1160, 4, 10, 35, 4)
# Second block
#MemoryCloud1(334, 105, -1164, 20, 12, 2, 35, 6)
DermisTemperatureBlock(332, 89, -1160, 4, 10, 35, 4)
# Draw window into epidermis to observe vaso-dilation
VasoDilate(393, 88, -1161, 4, 20, 0)
# Draw a sweat example
#Sweat(399, 84, -1176, 5, 8)
#Main Loop
while True:
#Make a DermisListeners() and listen
#arguments are startx, starty, startz, dermiswidth, dermisheight, dermisno
# with the same width and thickness as the corresponding DermisTemperatureBlocks
# SkinTemp 1 Hair Response
#DermisListener1(285, 89, -1160, 1)
DermisListener1(287, 99, -1158, 1)
# Skin Temp2 Vascular system response
DermisListener2(334, 99, -1158, 2)
#DermisListener2(332, 99, -1155, 4, 10, 1)
#DermisListener3(332, 89, -1160, 4, 10, 2)
#while True:
# Remember your chart is (x_coord, x_coord, x_coord, chartwidth, dermisheight, block block id(usually 0))
# TemperatureChart1(394, 68, -326, 2, 40, 35, 5)
#TemperatureChart2(394, 68, -318, 2, 40, 35, 4)
#TemperatureChart3(394, 68, -310, 2, 40, 35, 4)
#TemperatureChart4(394, 68, -302, 2, 40, 35, 4)
#TemperatureChart5(394, 68, -294, 2, 40, 35, 4)
print "stopped"
| 2.46875 | 2 |
data/datasets/.ipynb_checkpoints/aic-checkpoint.py | lxc86739795/vehicle_reid_by_parsing | 36 | 12768982 | <gh_stars>10-100
# encoding: utf-8
"""
@author: <NAME>
@contact: <EMAIL>
"""
import glob
import re
import os.path as osp
from .bases import ImageDataset
import warnings
class AICity19(ImageDataset):
"""
AICYTY
Reference:
Zheng et al. CityFlow: A City-Scale Benchmark for Multi-Target Multi-Camera Vehicle Tracking and Re-Identification. CVPR 2019.
URL: https://github.com/zhengthomastang
Dataset statistics:
# identities: 666
# images: 36935 (train) + 1052 (query) + 18290 (gallery)
# in practice the query and gallery is from veri
"""
dataset_dir = 'aic19'
def __init__(self, root='/home/liuxinchen3/notespace/data', verbose=True, **kwargs):
self.dataset_dir = osp.join(root, self.dataset_dir)
self.train_dir = osp.join(self.dataset_dir, 'image_train_offset')
self.query_dir = osp.join(self.dataset_dir, 'image_query_eval')
self.gallery_dir = osp.join(self.dataset_dir, 'image_test_eval')
required_files = [
self.dataset_dir,
self.train_dir,
self.query_dir,
self.gallery_dir
]
self.check_before_run(required_files)
train = self._process_dir(self.train_dir, relabel=True)
query = self._process_dir(self.query_dir, relabel=False)
gallery = self._process_dir(self.gallery_dir, relabel=False)
self.train = train
self.query = query
self.gallery = gallery
super(AICity19, self).__init__(train, query, gallery, **kwargs)
def _process_dir(self, dir_path, relabel=False):
img_paths = glob.glob(osp.join(dir_path, '*.jpg'))
pattern = re.compile(r'([\d]+)_c(\d\d\d)')
pid_container = set()
for img_path in img_paths:
pid, _ = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
pid_container.add(pid)
pid2label = {pid: label for label, pid in enumerate(pid_container)}
dataset = []
for img_path in img_paths:
pid, camid = map(int, pattern.search(img_path).groups())
if pid == -1: continue # junk images are just ignored
#print('img_path:', img_path, ' pid : ', pid, ' camid : ', camid)
#assert 0 <= pid
#assert 0 <= camid
camid -= 1 # index starts from 0
if relabel: pid = pid2label[pid]
dataset.append((img_path, pid, camid))
return dataset
| 2.140625 | 2 |
textconverter.py | adityaDave2011/InformationSecurity | 0 | 12768983 | def get_msg_with_punctuations(old, new):
new_org_text = new
for i in range(len(old)):
if not str.isalpha(old[i]):
new_org_text = new_org_text[0:i] + old[i] + new_org_text[i:]
for i in range(len(old)):
if str.isupper(old[i]):
new_org_text = new_org_text[0:i] + str.upper(new_org_text[i]) + new_org_text[i + 1:]
return new_org_text
def get_alphabetic_msg(text):
normal = ''
for ch in text:
if str.isalpha(ch):
normal += ch
return normal | 3.328125 | 3 |
tests/test_dimension.py | ckauth/eurostat-api-client | 4 | 12768984 | from eurostatapiclient.models.dimension import Category, BaseItem, ItemList, \
Dimension
import unittest
class TestCategory(unittest.TestCase):
def test_properties(self):
id = 'ID0'
index = 4
label = 'label with text'
category = Category(id, index, label)
self.assertEqual(category.id, id)
self.assertEqual(category.index, index)
self.assertEqual(category.label, label)
class TestBaseItem(unittest.TestCase):
def test_properties(self):
id = 'ID0'
index = 4
label = 'label with text'
category = BaseItem(id, index, label)
self.assertEqual(category.id, id)
self.assertEqual(category.index, index)
self.assertEqual(category.label, label)
def test_item_list_assignation(self):
item_list = ItemList()
self.assertRaises(ValueError, item_list.__setitem__, 0, 'd')
def test_item_list_count(self):
item_list = ItemList()
self.assertEqual(len(item_list), 0)
self.assertEqual(item_list.count, 0)
category1 = BaseItem('id', 0, 'label')
category2 = BaseItem('id', 1, 'label')
item_list.append(category1)
item_list.append(category2)
self.assertEqual(len(item_list), 2)
self.assertEqual(item_list.count, 2)
class TestDimension(unittest.TestCase):
def test_add_category(self):
id = 'ID0'
index = 4
label = 'label with text'
size = 2
dimension = Dimension(id, index, label, size)
category = Category(id, index, label)
self.assertEqual(dimension.categories.count, 0)
dimension.add_category(category)
self.assertEqual(dimension.categories.count, 1)
def test_create_from_json(self):
json = {
'label': "time",
'category': {
'index': {
'2010': '1',
'2011': '0'
},
'label': {
'2010': '2010',
'2011': 'test'
},
}
}
id = 'ID0'
index = 4
size = 5
label = 'time'
dimension = Dimension.create_from_json(id, index, size, json)
self.assertEqual(dimension.categories.count, 2)
self.assertEqual(dimension.label, label)
self.assertEqual(dimension.size, size)
self.assertEqual(dimension.categories[0].label, 'test')
| 2.515625 | 3 |
ds4se/_nbdev.py | rmclanton/ds4se | 0 | 12768985 | # AUTOGENERATED BY NBDEV! DO NOT EDIT!
__all__ = ["index", "modules", "custom_doc_links", "git_url"]
index = {}
modules = [].py",
"exp/info.py"].py"]
doc_url = "https://ncoop57.github.io/ds4se/"
git_url = "https://github.com/ncoop57/ds4se/tree/master/"
def custom_doc_links(name): return None
| 1.296875 | 1 |
scripts/ingest/main.py | shlomo88/Nietzsche | 41 | 12768986 | import os
import json
import requests
from elasticsearch import Elasticsearch, RequestsHttpConnection, helpers, exceptions
HOST = os.environ.get('ES_ENDPOINT')
INDEX = os.environ.get('ES_INDEX')
QUOTES_DUMP = os.environ.get('QUOTES_DUMP')
headers = {"Content-Type": "application/json"}
def main():
try:
es = Elasticsearch(
hosts=[{
"host": HOST.split('//')[1],
"port": 443
}],
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection,
)
with open(QUOTES_DUMP) as file:
quotes = json.load(file)
print(f"Ingesting {len(quotes)} quotes into {INDEX} index.")
for quote in quotes:
response = es.index(index=INDEX,
doc_type='_doc',
id=quote['id'],
body=quote,
request_timeout=60)
print("ElasticSearchService: Index creation response: ", response)
except Exception as exception:
# print some context about this error
print(exception)
raise exception
if __name__ == "__main__":
main()
| 2.734375 | 3 |
mysite/LibreBadge/migrations/0014_auto_20200317_0237.py | energized-id/energized-id | 8 | 12768987 | # Generated by Django 3.0.3 on 2020-03-17 02:37
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('LibreBadge', '0013_auto_20200317_0236'),
]
operations = [
migrations.AlterField(
model_name='badgetemplate',
name='badge',
field=models.FileField(upload_to=''),
),
]
| 1.335938 | 1 |
src/notifier_script/notifier_script.pyw | kroutsis/Lan-Notifications | 3 | 12768988 | #<NAME>
import socket, threading, sys, time, os
import tkinter as tk
client_list = []
connections = {}
check_buttons = {}
window = tk.Tk()
text_header = tk.StringVar()
text_message_wid = tk.Text(window, height=5, width=40, font=("Calibri"))
outer_frame = tk.Frame(window)
cb_canvas = tk.Canvas(outer_frame, bd=0, height=180, width=300)
cb_frame = tk.Frame(cb_canvas)
check_button_all_var = tk.BooleanVar()
def gui(start_server_thread = False):
if start_server_thread:
#create server thread
server_thread = threading.Thread(target=start_server)
server_thread.start()
#static tkinter widgets
window.title('LAN-Notifier')
window.resizable(width=False, height=False)
lbl_text = "SERVER",HOST,"(",PORT,")"
lbl = tk.Label(window, text=lbl_text, font=("Arial Bold", 12))
lbl.grid(column=0, row=0)
text_header_wid = tk.Entry(window, width=40, textvariable=text_header, font=("Calibri"))
text_header_wid.grid(column=0, row=3, pady=5)
text_message_wid.grid(column=0, row=4, padx=10, pady=5)
clear_btn = tk.Button(window, text=" NEW ", command= lambda: clear_it())
clear_btn.grid(column=0, row=5, sticky="nw", padx=10, pady=5)
send_btn = tk.Button(window, text=" SEND ", command= lambda: send_it())
send_btn.grid(column=0, row=5, sticky="ne", padx=10, pady=5)
ysb = tk.Scrollbar(outer_frame, orient="vertical", command=cb_canvas.yview)
ysb.grid(column=1, row=0, sticky="ns")
cb_canvas.configure(yscrollcommand=ysb.set)
cb_canvas.grid(column=0, row=0, padx=2)
outer_frame.grid(column=0, row=2)
def on_closing():
from tkinter import messagebox
if messagebox.askokcancel("Quit", "Do you want to quit?"):
window.destroy()
#sys.exit()
os._exit(0)
def set_cb_values(key):
if key['var'].get() == True:
key['checked'] = True
else:
key['checked'] = False
check_button_all.deselect()
if all_checked():
check_button_all.select()
def manage_msg(txt_msg):
msg_lines = txt_msg.split("\n")
count = 0
for i, line in enumerate(msg_lines):
if len(line) > 40:
count += 1
tmp_line = line[:40]
msg_lines[i] = line[:tmp_line.rfind(" ")]+"\n"+line[tmp_line.rfind(" ")+1:40]+line[40:]
txt_msg = "\n".join(msg_lines)
return manage_msg(txt_msg)
if count == 0:
return txt_msg
def send_it():
already_sent = True
text_message_wid.config(state=tk.DISABLED)
txt_head = text_header.get()
txt_msg = text_message_wid.get("1.0", "end-1c")
txt_msg = manage_msg(txt_msg)
if txt_head == "" or txt_msg == "":
from tkinter import messagebox
messagebox.showwarning('Send Message','Write a header and a message.\n Select clients.\n Then press SEND button.')
return False
send_time = time.strftime("%H:%M:%S", time.localtime())
msg = send_time + "|" + txt_head + "|" + txt_msg
txt_msg = txt_msg.replace('\n', ' ')
log = "["+get_time()+"] MESSAGE: "+txt_head+"|"+txt_msg+" sent to user(s):\n"
write_log_file(log)
for addr in check_buttons:
if check_buttons[addr]['checked'] == True and check_buttons[addr]['color'] != "green":
log = str(addr)+", "
write_log_file(log)
already_sent = False
check_buttons[addr]['sock'].send(msg.encode())
time.sleep(1)
if check_buttons[addr]['color'] == "":
check_buttons[addr]['color'] = "red"
check_buttons[addr]['canvas'].itemconfig(check_buttons[addr]['indicator'],
fill=check_buttons[addr]['color'])
write_log_file("\n")
if already_sent == True:
from tkinter import messagebox
messagebox.showwarning('Send Message','This message is already sent to all selected clients.\n Press NEW button to send a new message.')
return False
def clear_it():
text_message_wid.config(state=tk.NORMAL)
#text_header_wid.delete(0, tk.END)
text_message_wid.delete("1.0", "end-1c")
for addr in check_buttons:
check_buttons[addr]['color'] = ""
check_buttons[addr]['canvas'].itemconfig(check_buttons[addr]['indicator'],
fill=check_buttons[addr]['color'])
def check_all():
if check_button_all_var.get() == True:
for addr in check_buttons:
check_buttons[addr]['checked'] = True
check_buttons[addr]['widget'].select()
def all_checked():
for addr in check_buttons:
if check_buttons[addr]['checked'] == True:
continue
else:
return False
return True
def on_mousewheel(event):
cb_canvas.yview_scroll(-1*(event.delta//120), "units")
#destroy previous checkbutton widgets
for child in cb_frame.winfo_children():
child.destroy()
#remove checkbuttons
cb_del = check_buttons.keys() - connections.keys()
if cb_del:
del check_buttons[cb_del.pop()]
#check_all checkbutton
check_button_all = tk.Checkbutton(window, text="All",
onvalue=True, offvalue=False,
var=check_button_all_var,
command=lambda: check_all())
check_button_all.grid(column=0, row=1)
#show checkbuttons and indicators
for row, addr in enumerate(connections.keys()):
checkbutton_text = addr[0]
#check client_list for address' name
for client in client_list:
if client[0] == addr[0]:
client_name = client[1]
checkbutton_text = addr[0] + " [" + client_name + "]"
row += 1 #row 0: check_all checkbutton
if addr in check_buttons: #old checkbuttons
check_buttons[addr]['widget'] = tk.Checkbutton(cb_frame, text=checkbutton_text,
onvalue=True, offvalue=False,
var=check_buttons[addr]['var'],
command=lambda key=check_buttons[addr]: set_cb_values(key))
if check_buttons[addr]['checked'] == True:
check_buttons[addr]['widget'].select()
check_buttons[addr]['canvas'] = tk.Canvas(cb_frame, width=20, height=28)
check_buttons[addr]['indicator'] = check_buttons[addr]['canvas'].create_oval(10, 10, 20, 20,
fill=check_buttons[addr]['color'])
else: #new checkbuttons
check_buttons[addr] = {}
check_buttons[addr]['sock'] = connections[addr]
check_buttons[addr]['var'] = tk.BooleanVar()
check_buttons[addr]['widget'] = tk.Checkbutton(cb_frame, text=checkbutton_text,
onvalue=True, offvalue=False,
var=check_buttons[addr]['var'],
command=lambda key=check_buttons[addr]: set_cb_values(key))
if check_button_all_var.get() == True:
check_buttons[addr]['checked'] = True
check_buttons[addr]['widget'].select()
else:
check_buttons[addr]['checked'] = False
check_buttons[addr]['color'] = ""
check_buttons[addr]['canvas'] = tk.Canvas(cb_frame, width=20, height=28)
check_buttons[addr]['indicator'] = check_buttons[addr]['canvas'].create_oval(10, 10, 20, 20,
fill=check_buttons[addr]['color'])
check_buttons[addr]['widget'].grid(row=row, column=0)
check_buttons[addr]['canvas'].grid(row=row, column=1)
#manage checkbutton canvas
cb_frame.update()
cb_canvas.configure(scrollregion=(1,1,0,cb_frame.winfo_height()))
cb_canvas.bind_all("<MouseWheel>", on_mousewheel)
cb_canvas.create_window(outer_frame.winfo_width()//2, 0, window=cb_frame, anchor='n')
window.protocol("WM_DELETE_WINDOW", on_closing)
window.mainloop()
def handle_client(conn, addr):
# display client address
connections[addr] = conn
log = "["+get_time()+"] NEW CONNECTION: "+str(addr[0])+"\n"
write_log_file(log)
while True:
try:
# receave message from client
sig = conn.recv(64).decode()
if sig == "k":
check_buttons[addr]['color'] = "yellow"
check_buttons[addr]['canvas'].itemconfig(check_buttons[addr]['indicator'],
fill=check_buttons[addr]['color'])
log = "["+get_time()+"] "+str(addr)+": got the message\n"
write_log_file(log)
elif sig == "ROGER":
check_buttons[addr]['color'] = "green"
check_buttons[addr]['canvas'].itemconfig(check_buttons[addr]['indicator'],
fill=check_buttons[addr]['color'])
log = "["+get_time()+"] "+str(addr)+": read the message\n"
write_log_file(log)
except Exception as e:
# disconnect the server
conn.close()
del connections[addr]
log = "["+get_time()+"] "+str(addr)+": "+str(e)+"\n"
write_log_file(log)
window.after(0, gui)
# kill thread
sys.exit()
def start_server():
log = "\n=============== STARTING SERVER: "+HOST+" "+str(PORT)+" ["+get_time()+"] ===============\n"
write_log_file(log)
# allow maximum 3 connections to the socket
s.listen(10)
while True:
# wait till a client accept connection
conn, addr = s.accept()
# create a thread to handle each connection
thread = threading.Thread(target=handle_client, args=(conn, addr))
thread.start()
window.after(0, gui)
def get_time():
t = time.strftime("%d-%m-%Y %H:%M:%S", time.localtime())
return t
def write_log_file(text):
with open("log.txt", "a") as lf:
lf.write(text)
def read_client_list():
global PORT
try:
with open("client_list.txt", "r") as f:
if len(f.readline()) <= 6:
f.seek(0)
PORT = int(next(f))
else:
f.seek(0)
PORT = 5050
for client in f.readlines():
client_list.append(client.strip().split(";"))
except FileNotFoundError:
PORT = 5050
if __name__ == "__main__":
read_client_list()
# take the server name and port name
HOST = socket.gethostbyname(socket.gethostname())
#PORT = 5050
# create a socket at server side using TCP / IP protocol
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# bind the socket with server and port number
s.bind((HOST, PORT))
gui(True) | 2.921875 | 3 |
test/test_server.py | keredson/tinyweb | 138 | 12768989 | <reponame>keredson/tinyweb
#!/usr/bin/env micropython
"""
Unittests for Tiny Web
MIT license
(C) <NAME> 2017-2018
"""
import unittest
import uos as os
import uerrno as errno
import uasyncio as asyncio
from tinyweb import webserver
from tinyweb.server import urldecode_plus, parse_query_string
from tinyweb.server import request, HTTPException
# Helper to delete file
def delete_file(fn):
# "unlink" gets renamed to "remove" in micropython,
# so support both
if hasattr(os, 'unlink'):
os.unlink(fn)
else:
os.remove(fn)
# HTTP headers helpers
def HDR(str):
return '{}\r\n'.format(str)
HDRE = '\r\n'
class mockReader():
"""Mock for coroutine reader class"""
def __init__(self, lines):
if type(lines) is not list:
lines = [lines]
self.lines = lines
self.idx = 0
async def readline(self):
self.idx += 1
# Convert and return str to bytes
return self.lines[self.idx - 1].encode()
def readexactly(self, n):
return self.readline()
class mockWriter():
"""Mock for coroutine writer class"""
def __init__(self, generate_expection=None):
"""
keyword arguments:
generate_expection - raise exception when calling send()
"""
self.s = 1
self.history = []
self.closed = False
self.generate_expection = generate_expection
async def awrite(self, buf, off=0, sz=-1):
if sz == -1:
sz = len(buf) - off
if self.generate_expection:
raise self.generate_expection
# Save biffer into history - so to be able to assert then
self.history.append(buf[:sz])
async def aclose(self):
self.closed = True
async def mock_wait_for(coro, timeout):
await coro
def run_coro(coro):
# Mock wait_for() function with simple dummy
asyncio.wait_for = (lambda c, t: await c)
"""Simple helper to run coroutine"""
for i in coro:
pass
# Tests
class Utils(unittest.TestCase):
def testUrldecode(self):
runs = [('abc%20def', 'abc def'),
('abc%%20def', 'abc% def'),
('%%%', '%%%'),
('%20%20', ' '),
('abc', 'abc'),
('a%25%25%25c', 'a%%%c'),
('a++b', 'a b'),
('+%25+', ' % '),
('+%2B+', ' + '),
('%20+%2B+%41', ' + A'),
]
for r in runs:
self.assertEqual(urldecode_plus(r[0]), r[1])
def testParseQueryString(self):
runs = [('k1=v2', {'k1': 'v2'}),
('k1=v2&k11=v11', {'k1': 'v2',
'k11': 'v11'}),
('k1=v2&k11=', {'k1': 'v2',
'k11': ''}),
('k1=+%20', {'k1': ' '}),
('%6b1=+%20', {'k1': ' '}),
('k1=%3d1', {'k1': '=1'}),
('11=22%26&%3d=%3d', {'11': '22&',
'=': '='}),
]
for r in runs:
self.assertEqual(parse_query_string(r[0]), r[1])
class ServerParts(unittest.TestCase):
def testRequestLine(self):
runs = [('GETT / HTTP/1.1', 'GETT', '/'),
('TTEG\t/blah\tHTTP/1.1', 'TTEG', '/blah'),
('POST /qq/?q=q HTTP', 'POST', '/qq/', 'q=q'),
('POST /?q=q BSHT', 'POST', '/', 'q=q'),
('POST /?q=q&a=a JUNK', 'POST', '/', 'q=q&a=a')]
for r in runs:
try:
req = request(mockReader(r[0]))
run_coro(req.read_request_line())
self.assertEqual(r[1].encode(), req.method)
self.assertEqual(r[2].encode(), req.path)
if len(r) > 3:
self.assertEqual(r[3].encode(), req.query_string)
except Exception:
self.fail('exception on payload --{}--'.format(r[0]))
def testRequestLineEmptyLinesBefore(self):
req = request(mockReader(['\n', '\r\n', 'GET /?a=a HTTP/1.1']))
run_coro(req.read_request_line())
self.assertEqual(b'GET', req.method)
self.assertEqual(b'/', req.path)
self.assertEqual(b'a=a', req.query_string)
def testRequestLineNegative(self):
runs = ['',
'\t\t',
' ',
' / HTTP/1.1',
'GET',
'GET /',
'GET / '
]
for r in runs:
with self.assertRaises(HTTPException):
req = request(mockReader(r))
run_coro(req.read_request_line())
def testHeadersSimple(self):
req = request(mockReader([HDR('Host: google.com'),
HDRE]))
run_coro(req.read_headers([b'Host']))
self.assertEqual(req.headers, {b'Host': b'google.com'})
def testHeadersSpaces(self):
req = request(mockReader([HDR('Host: \t google.com \t '),
HDRE]))
run_coro(req.read_headers([b'Host']))
self.assertEqual(req.headers, {b'Host': b'google.com'})
def testHeadersEmptyValue(self):
req = request(mockReader([HDR('Host:'),
HDRE]))
run_coro(req.read_headers([b'Host']))
self.assertEqual(req.headers, {b'Host': b''})
def testHeadersMultiple(self):
req = request(mockReader([HDR('Host: google.com'),
HDR('Junk: you blah'),
HDR('Content-type: file'),
HDRE]))
hdrs = {b'Host': b'google.com',
b'Junk': b'you blah',
b'Content-type': b'file'}
run_coro(req.read_headers([b'Host', b'Junk', b'Content-type']))
self.assertEqual(req.headers, hdrs)
def testUrlFinderExplicit(self):
urls = [('/', 1),
('/%20', 2),
('/a/b', 3),
('/aac', 5)]
junk = ['//', '', '/a', '/aa', '/a/fhhfhfhfhfhf']
# Create server, add routes
srv = webserver()
for u in urls:
srv.add_route(u[0], u[1])
# Search them all
for u in urls:
# Create mock request object with "pre-parsed" url path
rq = request(mockReader([]))
rq.path = u[0].encode()
f, args = srv._find_url_handler(rq)
self.assertEqual(u[1], f)
# Some simple negative cases
for j in junk:
rq = request(mockReader([]))
rq.path = j.encode()
f, args = srv._find_url_handler(rq)
self.assertIsNone(f)
self.assertIsNone(args)
def testUrlFinderParameterized(self):
srv = webserver()
# Add few routes
srv.add_route('/', 0)
srv.add_route('/<user_name>', 1)
srv.add_route('/a/<id>', 2)
# Check first url (non param)
rq = request(mockReader([]))
rq.path = b'/'
f, args = srv._find_url_handler(rq)
self.assertEqual(f, 0)
# Check second url
rq.path = b'/user1'
f, args = srv._find_url_handler(rq)
self.assertEqual(f, 1)
self.assertEqual(args['_param_name'], 'user_name')
self.assertEqual(rq._param, 'user1')
# Check third url
rq.path = b'/a/123456'
f, args = srv._find_url_handler(rq)
self.assertEqual(f, 2)
self.assertEqual(args['_param_name'], 'id')
self.assertEqual(rq._param, '123456')
# When param is empty and there is no non param endpoint
rq.path = b'/a/'
f, args = srv._find_url_handler(rq)
self.assertEqual(f, 2)
self.assertEqual(rq._param, '')
def testUrlFinderNegative(self):
srv = webserver()
# empty URL is not allowed
with self.assertRaises(ValueError):
srv.add_route('', 1)
# Query string is not allowed
with self.assertRaises(ValueError):
srv.add_route('/?a=a', 1)
# Duplicate urls
srv.add_route('/duppp', 1)
with self.assertRaises(ValueError):
srv.add_route('/duppp', 1)
# We want to test decorators as well
server_for_decorators = webserver()
@server_for_decorators.route('/uid/<user_id>')
@server_for_decorators.route('/uid2/<user_id>')
async def route_for_decorator(req, resp, user_id):
await resp.start_html()
await resp.send('YO, {}'.format(user_id))
@server_for_decorators.resource('/rest1/<user_id>')
def resource_for_decorator1(data, user_id):
return {'name': user_id}
@server_for_decorators.resource('/rest2/<user_id>')
async def resource_for_decorator2(data, user_id):
yield '{"name": user_id}'
class ServerFull(unittest.TestCase):
def setUp(self):
self.dummy_called = False
self.data = {}
# "Register" one connection into map for dedicated decor server
server_for_decorators.conns[id(1)] = None
self.hello_world_history = ['HTTP/1.0 200 MSG\r\n' +
'Content-Type: text/html\r\n\r\n',
'<html><h1>Hello world</h1></html>']
# Create one more server - to simplify bunch of tests
self.srv = webserver()
self.srv.conns[id(1)] = None
def testRouteDecorator1(self):
"""Test @.route() decorator"""
# First decorator
rdr = mockReader(['GET /uid/man1 HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
# "Send" request
run_coro(server_for_decorators._handler(rdr, wrt))
# Ensure that proper response "sent"
expected = ['HTTP/1.0 200 MSG\r\n' +
'Content-Type: text/html\r\n\r\n',
'YO, man1']
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
def testRouteDecorator2(self):
# Second decorator
rdr = mockReader(['GET /uid2/man2 HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
# Re-register connection
server_for_decorators.conns[id(1)] = None
# "Send" request
run_coro(server_for_decorators._handler(rdr, wrt))
# Ensure that proper response "sent"
expected = ['HTTP/1.0 200 MSG\r\n' +
'Content-Type: text/html\r\n\r\n',
'YO, man2']
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
def testResourceDecorator1(self):
"""Test @.resource() decorator"""
rdr = mockReader(['GET /rest1/man1 HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
run_coro(server_for_decorators._handler(rdr, wrt))
expected = ['HTTP/1.0 200 MSG\r\n'
'Access-Control-Allow-Origin: *\r\n' +
'Access-Control-Allow-Headers: *\r\n' +
'Content-Length: 16\r\n' +
'Access-Control-Allow-Methods: GET\r\n' +
'Content-Type: application/json\r\n\r\n',
'{"name": "man1"}']
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
def testResourceDecorator2(self):
rdr = mockReader(['GET /rest2/man2 HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
run_coro(server_for_decorators._handler(rdr, wrt))
expected = ['HTTP/1.1 200 MSG\r\n' +
'Access-Control-Allow-Methods: GET\r\n' +
'Connection: close\r\n' +
'Access-Control-Allow-Headers: *\r\n' +
'Content-Type: application/json\r\n' +
'Transfer-Encoding: chunked\r\n' +
'Access-Control-Allow-Origin: *\r\n\r\n',
'11\r\n',
'{"name": user_id}',
'\r\n',
'0\r\n\r\n'
]
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
def testCatchAllDecorator(self):
# A fresh server for the catchall handler
server_for_catchall_decorator = webserver()
# Catchall decorator and handler
@server_for_catchall_decorator.catchall()
async def route_for_catchall_decorator(req, resp):
await resp.start_html()
await resp.send('my404')
rdr = mockReader(['GET /this/is/an/invalid/url HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
server_for_catchall_decorator.conns[id(1)] = None
run_coro(server_for_catchall_decorator._handler(rdr, wrt))
expected = ['HTTP/1.0 200 MSG\r\n' +
'Content-Type: text/html\r\n\r\n',
'my404']
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
async def dummy_handler(self, req, resp):
"""Dummy URL handler. It just records the fact - it has been called"""
self.dummy_req = req
self.dummy_resp = resp
self.dummy_called = True
async def dummy_post_handler(self, req, resp):
self.data = await req.read_parse_form_data()
async def hello_world_handler(self, req, resp):
await resp.start_html()
await resp.send('<html><h1>Hello world</h1></html>')
async def redirect_handler(self, req, resp):
await resp.redirect('/blahblah', msg='msg:)')
def testStartHTML(self):
"""Verify that request.start_html() works well"""
self.srv.add_route('/', self.hello_world_handler)
rdr = mockReader(['GET / HTTP/1.1\r\n',
HDR('Host: blah.com'),
HDRE])
wrt = mockWriter()
# "Send" request
run_coro(self.srv._handler(rdr, wrt))
# Ensure that proper response "sent"
self.assertEqual(wrt.history, self.hello_world_history)
self.assertTrue(wrt.closed)
def testRedirect(self):
"""Verify that request.start_html() works well"""
self.srv.add_route('/', self.redirect_handler)
rdr = mockReader(['GET / HTTP/1.1\r\n',
HDR('Host: blah.com'),
HDRE])
wrt = mockWriter()
# "Send" request
run_coro(self.srv._handler(rdr, wrt))
# Ensure that proper response "sent"
exp = ['HTTP/1.0 302 MSG\r\n' +
'Location: /blahblah\r\nContent-Length: 5\r\n\r\n',
'msg:)']
self.assertEqual(wrt.history, exp)
def testRequestBodyUnknownType(self):
"""Unknow HTTP body test - empty dict expected"""
self.srv.add_route('/', self.dummy_post_handler, methods=['POST'])
rdr = mockReader(['POST / HTTP/1.1\r\n',
HDR('Host: blah.com'),
HDR('Content-Length: 5'),
HDRE,
'12345'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# Check extracted POST body
self.assertEqual(self.data, {})
def testRequestBodyJson(self):
"""JSON encoded POST body"""
self.srv.add_route('/',
self.dummy_post_handler,
methods=['POST'],
save_headers=['Content-Type', 'Content-Length'])
rdr = mockReader(['POST / HTTP/1.1\r\n',
HDR('Content-Type: application/json'),
HDR('Content-Length: 10'),
HDRE,
'{"a": "b"}'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# Check parsed POST body
self.assertEqual(self.data, {'a': 'b'})
def testRequestBodyUrlencoded(self):
"""Regular HTML form"""
self.srv.add_route('/',
self.dummy_post_handler,
methods=['POST'],
save_headers=['Content-Type', 'Content-Length'])
rdr = mockReader(['POST / HTTP/1.1\r\n',
HDR('Content-Type: application/x-www-form-urlencoded; charset=UTF-8'),
HDR('Content-Length: 10'),
HDRE,
'a=b&c=%20d'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# Check parsed POST body
self.assertEqual(self.data, {'a': 'b', 'c': ' d'})
def testRequestBodyNegative(self):
"""Regular HTML form"""
self.srv.add_route('/',
self.dummy_post_handler,
methods=['POST'],
save_headers=['Content-Type', 'Content-Length'])
rdr = mockReader(['POST / HTTP/1.1\r\n',
HDR('Content-Type: application/json'),
HDR('Content-Length: 9'),
HDRE,
'some junk'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# payload broken - HTTP 400 expected
self.assertEqual(wrt.history, ['HTTP/1.0 400 MSG\r\n\r\n'])
def testRequestLargeBody(self):
"""Max Body size check"""
self.srv.add_route('/',
self.dummy_post_handler,
methods=['POST'],
save_headers=['Content-Type', 'Content-Length'],
max_body_size=5)
rdr = mockReader(['POST / HTTP/1.1\r\n',
HDR('Content-Type: application/json'),
HDR('Content-Length: 9'),
HDRE,
'some junk'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# payload broken - HTTP 400 expected
self.assertEqual(wrt.history, ['HTTP/1.0 413 MSG\r\n\r\n'])
async def route_parameterized_handler(self, req, resp, user_name):
await resp.start_html()
await resp.send('<html>Hello, {}</html>'.format(user_name))
def testRouteParameterized(self):
"""Verify that route with params works fine"""
self.srv.add_route('/db/<user_name>', self.route_parameterized_handler)
rdr = mockReader(['GET /db/user1 HTTP/1.1\r\n',
HDR('Host: junk.com'),
HDRE])
wrt = mockWriter()
# "Send" request
run_coro(self.srv._handler(rdr, wrt))
# Ensure that proper response "sent"
expected = ['HTTP/1.0 200 MSG\r\n' +
'Content-Type: text/html\r\n\r\n',
'<html>Hello, user1</html>']
self.assertEqual(wrt.history, expected)
self.assertTrue(wrt.closed)
def testParseHeadersOnOff(self):
"""Verify parameter parse_headers works"""
self.srv.add_route('/', self.dummy_handler, save_headers=['H1', 'H2'])
rdr = mockReader(['GET / HTTP/1.1\r\n',
HDR('H1: blah.com'),
HDR('H2: lalalla'),
HDR('Junk: fsdfmsdjfgjsdfjunk.com'),
HDRE])
# "Send" request
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
self.assertTrue(self.dummy_called)
# Check for headers - only 2 of 3 should be collected, others - ignore
hdrs = {b'H1': b'blah.com',
b'H2': b'lalalla'}
self.assertEqual(self.dummy_req.headers, hdrs)
self.assertTrue(wrt.closed)
def testDisallowedMethod(self):
"""Verify that server respects allowed methods"""
self.srv.add_route('/', self.hello_world_handler)
self.srv.add_route('/post_only', self.dummy_handler, methods=['POST'])
rdr = mockReader(['GET / HTTP/1.0\r\n',
HDRE])
# "Send" GET request, by default GET is enabled
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
self.assertEqual(wrt.history, self.hello_world_history)
self.assertTrue(wrt.closed)
# "Send" GET request to POST only location
self.srv.conns[id(1)] = None
self.dummy_called = False
rdr = mockReader(['GET /post_only HTTP/1.1\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
# Hanlder should not be called - method not allowed
self.assertFalse(self.dummy_called)
exp = ['HTTP/1.0 405 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
# Connection must be closed
self.assertTrue(wrt.closed)
def testAutoOptionsMethod(self):
"""Test auto implementation of OPTIONS method"""
self.srv.add_route('/', self.hello_world_handler, methods=['POST', 'PUT', 'DELETE'])
self.srv.add_route('/disabled', self.hello_world_handler, auto_method_options=False)
rdr = mockReader(['OPTIONS / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 0\r\n'
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Methods: POST, PUT, DELETE\r\n\r\n']
self.assertEqual(wrt.history, exp)
self.assertTrue(wrt.closed)
def testPageNotFound(self):
"""Verify that malformed request generates proper response"""
rdr = mockReader(['GET /not_existing HTTP/1.1\r\n',
HDR('Host: blah.com'),
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 404 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
# Connection must be closed
self.assertTrue(wrt.closed)
def testMalformedRequest(self):
"""Verify that malformed request generates proper response"""
rdr = mockReader(['GET /\r\n',
HDR('Host: blah.com'),
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 400 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
# Connection must be closed
self.assertTrue(wrt.closed)
class ResourceGetPost():
"""Simple REST API resource class with just two methods"""
def get(self, data):
return {'data1': 'junk'}
def post(self, data):
return data
class ResourceGetParam():
"""Parameterized REST API resource"""
def __init__(self):
self.user_id = 'user_id'
def get(self, data, user_id):
return {self.user_id: user_id}
class ResourceGetArgs():
"""REST API resource with additional arguments"""
def get(self, data, arg1, arg2):
return {'arg1': arg1, 'arg2': arg2}
class ResourceGenerator():
"""REST API with generator as result"""
async def get(self, data):
yield 'longlongchunkchunk1'
yield 'chunk2'
# unicode support
yield '\u265E'
class ResourceNegative():
"""To cover negative test cases"""
def delete(self, data):
# Broken pipe emulation
raise OSError(32, '', '')
def put(self, data):
# Simple unhandled expection
raise Exception('something')
class ServerResource(unittest.TestCase):
def setUp(self):
self.srv = webserver()
self.srv.conns[id(1)] = None
self.srv.add_resource(ResourceGetPost, '/')
self.srv.add_resource(ResourceGetParam, '/param/<user_id>')
self.srv.add_resource(ResourceGetArgs, '/args', arg1=1, arg2=2)
self.srv.add_resource(ResourceGenerator, '/gen')
self.srv.add_resource(ResourceNegative, '/negative')
def testOptions(self):
# Ensure that only GET/POST methods are allowed:
rdr = mockReader(['OPTIONS / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 0\r\n'
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Methods: GET, POST\r\n\r\n']
self.assertEqual(wrt.history, exp)
def testGet(self):
rdr = mockReader(['GET / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 17\r\n'
'Access-Control-Allow-Methods: GET, POST\r\n'
'Content-Type: application/json\r\n\r\n',
'{"data1": "junk"}']
self.assertEqual(wrt.history, exp)
def testGetWithParam(self):
rdr = mockReader(['GET /param/123 HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 18\r\n'
'Access-Control-Allow-Methods: GET\r\n'
'Content-Type: application/json\r\n\r\n',
'{"user_id": "123"}']
self.assertEqual(wrt.history, exp)
def testGetWithArgs(self):
rdr = mockReader(['GET /args HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 22\r\n'
'Access-Control-Allow-Methods: GET\r\n'
'Content-Type: application/json\r\n\r\n',
'{"arg1": 1, "arg2": 2}']
self.assertEqual(wrt.history, exp)
def testGenerator(self):
rdr = mockReader(['GET /gen HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.1 200 MSG\r\n' +
'Access-Control-Allow-Methods: GET\r\n' +
'Connection: close\r\n' +
'Access-Control-Allow-Headers: *\r\n' +
'Content-Type: application/json\r\n' +
'Transfer-Encoding: chunked\r\n' +
'Access-Control-Allow-Origin: *\r\n\r\n',
'13\r\n',
'longlongchunkchunk1',
'\r\n',
'6\r\n',
'chunk2',
'\r\n',
# next chunk is 1 char len UTF-8 string
'3\r\n',
'\u265E',
'\r\n',
'0\r\n\r\n']
self.assertEqual(wrt.history, exp)
def testPost(self):
# Ensure that parameters from query string / body will be combined as well
rdr = mockReader(['POST /?qs=qs1 HTTP/1.0\r\n',
HDR('Content-Length: 17'),
HDR('Content-Type: application/json'),
HDRE,
'{"body": "body1"}'])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Access-Control-Allow-Origin: *\r\n'
'Access-Control-Allow-Headers: *\r\n'
'Content-Length: 30\r\n'
'Access-Control-Allow-Methods: GET, POST\r\n'
'Content-Type: application/json\r\n\r\n',
'{"qs": "qs1", "body": "body1"}']
self.assertEqual(wrt.history, exp)
def testInvalidMethod(self):
rdr = mockReader(['PUT / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 405 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
def testException(self):
rdr = mockReader(['PUT /negative HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 500 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
def testBrokenPipe(self):
rdr = mockReader(['DELETE /negative HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
self.assertEqual(wrt.history, [])
class StaticContent(unittest.TestCase):
def setUp(self):
self.srv = webserver()
self.srv.conns[id(1)] = None
self.tempfn = '__tmp.html'
self.ctype = None
self.etype = None
self.max_age = 2592000
with open(self.tempfn, 'wb') as f:
f.write('someContent blah blah')
def tearDown(self):
try:
delete_file(self.tempfn)
except OSError:
pass
async def send_file_handler(self, req, resp):
await resp.send_file(self.tempfn,
content_type=self.ctype,
content_encoding=self.etype,
max_age=self.max_age)
def testSendFileManual(self):
"""Verify send_file works great with manually defined parameters"""
self.ctype = 'text/plain'
self.etype = 'gzip'
self.max_age = 100
self.srv.add_route('/', self.send_file_handler)
rdr = mockReader(['GET / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 200 MSG\r\n' +
'Cache-Control: max-age=100, public\r\n'
'Content-Type: text/plain\r\n'
'Content-Length: 21\r\n'
'Content-Encoding: gzip\r\n\r\n',
bytearray(b'someContent blah blah')]
self.assertEqual(wrt.history, exp)
self.assertTrue(wrt.closed)
def testSendFileNotFound(self):
"""Verify 404 error for non existing files"""
self.srv.add_route('/', self.send_file_handler)
rdr = mockReader(['GET / HTTP/1.0\r\n',
HDRE])
wrt = mockWriter()
# Intentionally delete file before request
delete_file(self.tempfn)
run_coro(self.srv._handler(rdr, wrt))
exp = ['HTTP/1.0 404 MSG\r\n\r\n']
self.assertEqual(wrt.history, exp)
self.assertTrue(wrt.closed)
def testSendFileConnectionReset(self):
self.srv.add_route('/', self.send_file_handler)
rdr = mockReader(['GET / HTTP/1.0\r\n',
HDRE])
# tell mockWrite to raise error during send()
wrt = mockWriter(generate_expection=OSError(errno.ECONNRESET))
run_coro(self.srv._handler(rdr, wrt))
# there should be no payload due to connected reset
self.assertEqual(wrt.history, [])
self.assertTrue(wrt.closed)
if __name__ == '__main__':
unittest.main()
| 2.40625 | 2 |
nought/wizard.py | cole-wilson/nought | 10 | 12768990 | <reponame>cole-wilson/nought<filename>nought/wizard.py<gh_stars>1-10
import sys, os, toml
def main():
# sys.exit(0)
print('Hello! The best way to setup nought is to go to the github page:\n\thttps://github.com/cole-wilson/nought#configuration')
# mode = input("\nYou have several choices:\n\t\1:Create a config file.\n\t2: Edit an existing config file.\n\t(your choice must be an integer)\n>>> ")
# try:
# mode = int(mode)
# except:
# print("You have to chose an integer!")
# if mode == 1:
# fname = input('Please type the path to your pre-existing file.\n>>> ')
# fname = os.path.abspath(fname)
# try:
# conf = toml.loads(open(fname).read())
# print(conf)
# except toml.decoder.TomlDecodeError as error:
# print("This file is malformed:\n\t"+str(error)+"\nPlease fix!")
# sys.exit(1)
# except IsADirectoryError:
# print('That path is a directory!')
# sys.exit(1)
# except FileNotFoundError:
# print('That path doesn\'t exist!')
# sys.exit(1)
# for group in conf["group"]:
# t = input("Please enter a cron style time for when I should run this command:\n>>> ")
sys.exit(0) | 2.609375 | 3 |
examples/common/python/utility/hex_utils.py | gbarcomu/avalon | 0 | 12768991 | # Copyright 2019 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import binascii
# Return list of binary hex ids as list of UTF strings
def pretty_ids(ids):
pretty_list = []
for id in ids:
pretty_list.append(hex_to_utf8(id))
return pretty_list
# Return binary hex as UTF string
def hex_to_utf8(binary):
return binascii.hexlify(binary).decode("UTF-8")
def is_valid_hex_str(hex_str):
"""
Function to check given string is valid hex string or not
Parameter
- hex_str is string
Returns True if valid hex string otherwise False
"""
try:
int(hex_str, 16)
return True
except ValueError:
return False
def byte_array_to_hex_str(in_byte_array):
'''
Converts tuple of bytes to hex string
'''
return ''.join(format(i, '02x') for i in in_byte_array)
| 3 | 3 |
znail/ui/api/test/test_ipredirect.py | Risca/znail | 15 | 12768992 | <gh_stars>10-100
import unittest
from unittest.mock import patch
from znail.netem.ipredirect import IpRedirect, IpRedirectDescriptor
from znail.ui import app
class TestIpRedirect(unittest.TestCase):
def setUp(self):
ip_redirect_clear_patcher = patch.object(IpRedirect, "_clear")
self.ip_redirect_clear = ip_redirect_clear_patcher.start()
self.addCleanup(ip_redirect_clear_patcher.stop)
ip_redirect_apply_patcher = patch.object(IpRedirect, "_apply")
self.ip_redirect_apply = ip_redirect_apply_patcher.start()
self.addCleanup(ip_redirect_apply_patcher.stop)
self.client = app.test_client()
def tearDown(self):
self.client.post("/api/ipredirect/clear")
def test_empty(self):
response = self.client.get("/api/ipredirect")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, [])
def test_can_be_set(self):
response = self.client.post(
"/api/ipredirect",
json=[
{
"ip": "1.2.3.4",
"port": 80,
"destination_ip": "2.3.4.5",
"destination_port": 8080,
"protocol": "tcp",
}
],
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {"message": "ok"})
self.ip_redirect_apply.assert_called_once_with(
{
IpRedirectDescriptor(
ip="1.2.3.4", port=80, destination_ip="2.3.4.5", destination_port=8080, protocol="tcp"
)
}
)
response = self.client.get("/api/ipredirect")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.json,
[
{
"ip": "1.2.3.4",
"port": 80,
"destination_ip": "2.3.4.5",
"destination_port": 8080,
"protocol": "tcp",
}
],
)
def test_multiple_entries(self):
response = self.client.post(
"/api/ipredirect",
json=[
{
"ip": "1.2.3.4",
"port": 80,
"destination_ip": "2.3.4.5",
"destination_port": 8080,
"protocol": "tcp",
},
{
"ip": "2.3.4.5",
"port": 8080,
"destination_ip": "3.4.5.6",
"destination_port": 80,
"protocol": "udp",
},
],
)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {"message": "ok"})
last_call = sorted(self.ip_redirect_apply.call_args[0][0])
self.assertEqual(
last_call,
sorted(
[
IpRedirectDescriptor(
ip="1.2.3.4", port=80, destination_ip="2.3.4.5", destination_port=8080, protocol="tcp"
),
IpRedirectDescriptor(
ip="2.3.4.5", port=8080, destination_ip="3.4.5.6", destination_port=80, protocol="udp"
),
]
),
)
response = self.client.get("/api/ipredirect")
self.assertEqual(response.status_code, 200)
self.assertIn(
{
"ip": "1.2.3.4",
"port": 80,
"destination_ip": "2.3.4.5",
"destination_port": 8080,
"protocol": "tcp",
},
response.json,
)
self.assertIn(
{
"ip": "2.3.4.5",
"port": 8080,
"destination_ip": "3.4.5.6",
"destination_port": 80,
"protocol": "udp",
},
response.json,
)
def test_can_not_be_set_to_invalid_value(self):
response = self.client.post("/api/ipredirect", json={"invalid": "data"})
self.assertEqual(response.status_code, 422)
def test_bad_request(self):
response = self.client.post("/api/ipredirect")
self.assertEqual(response.status_code, 400)
def test_can_be_cleared(self):
response = self.client.post("/api/ipredirect/clear")
self.assertEqual(response.status_code, 200)
self.assertEqual(response.json, {"message": "ok"})
self.ip_redirect_clear.assert_called_once_with()
| 2.390625 | 2 |
pypospack/task/tasks_lammps/structural_minimization.py | eragasa/pypospack | 4 | 12768993 | import os,copy
from collections import OrderedDict
from pypospack.task.lammps import LammpsSimulation
class LammpsStructuralMinimization(LammpsSimulation):
""" Class for LAMMPS structural minimization
This data class defines additional attributes and methods necessary to
interact with the Workflow manager.
Args:
task_name(str): unique id for the task name being define
task_directory(str): the directory where this task will create
input and output files for LAMMPS
Attributes:
config
config_map
"""
def __init__(self,
task_name,
task_directory,
structure_filename,
restart=False,
fullauto=False):
_task_type = 'lmps_min_all'
LammpsSimulation.__init__(self,
task_name=task_name,
task_directory=task_directory,
task_type=_task_type,
structure_filename=structure_filename,
restart=restart,
fullauto=fullauto)
def postprocess(self):
LammpsSimulation.postprocess(self)
def lammps_input_file_to_string(self):
str_out = "".join([\
self._lammps_input_initialization_section(),
self._lammps_input_create_atoms(),
self._lammps_input_define_potential(),
self._lammps_input_run_minimization(),
self._lammps_input_out_section()])
return(str_out)
def on_post(self,configuration=None):
self.__get_results_from_lammps_outputfile()
LammpsSimulation.on_post(self,configuration=configuration)
def __get_results_from_lammps_outputfile(self):
_filename = os.path.join(
self.task_directory,
'lammps.out')
with open(_filename,'r') as f:
lines = f.readlines()
_variables = [
'tot_energy',
'num_atoms',
'xx','yy','zz','xy','xz','yz',
'tot_press',
'pxx', 'pyy', 'pzz', 'pxy', 'pxz', 'pyz',
]
_results = OrderedDict()
for i,line in enumerate(lines):
for name in _variables:
if line.startswith('{} = '.format(name)):
_results[name] = float(line.split('=')[1].strip())
if line.startswith('ERROR:'):
print('name:{}'.format(name))
print('line:{}'.format(line.strip))
raise NotImplementedError
_task_name = self.task_name
self.results = OrderedDict()
self.results['{}.{}'.format(_task_name,'toten')] = _results['tot_energy']
self.results['{}.{}'.format(_task_name,'natoms')] = _results['num_atoms']
# this only works for orthogonal cells
self.results['{}.{}'.format(_task_name,'a11')] = _results['xx']
self.results['{}.{}'.format(_task_name,'a12')] = 0
self.results['{}.{}'.format(_task_name,'a13')] = 0
self.results['{}.{}'.format(_task_name,'a21')] = 0
self.results['{}.{}'.format(_task_name,'a22')] = _results['yy']
self.results['{}.{}'.format(_task_name,'a23')] = 0
self.results['{}.{}'.format(_task_name,'a31')] = 0
self.results['{}.{}'.format(_task_name,'a32')] = 0
self.results['{}.{}'.format(_task_name,'a33')] = _results['zz']
self.results['{}.{}'.format(_task_name,'totpress')] = _results['tot_press']
self.results['{}.{}'.format(_task_name,'p11')] = _results['pxx']
self.results['{}.{}'.format(_task_name,'p12')] = _results['pxy']
self.results['{}.{}'.format(_task_name,'p13')] = _results['pxz']
self.results['{}.{}'.format(_task_name,'p21')] = _results['pxy']
self.results['{}.{}'.format(_task_name,'p22')] = _results['pyy']
self.results['{}.{}'.format(_task_name,'p23')] = _results['pyz'] #pyz=pzy
self.results['{}.{}'.format(_task_name,'p31')] = _results['pxz'] #pxz=pzx
self.results['{}.{}'.format(_task_name,'p32')] = _results['pyz']
self.results['{}.{}'.format(_task_name,'p33')] = _results['pzz']
def _lammps_input_run_minimization(self):
str_out = (
'# ---- define settings\n'
'compute eng all pe/atom\n'
'compute eatoms all reduce sum c_eng\n'
'# ---- run minimization\n'
'reset_timestep 0\n'
'fix 1 all box/relax iso 0.0 vmax 0.001\n'
'thermo 10\n'
'thermo_style custom step pe lx ly lz xy xz yz press pxx pyy pzz pxy pxz pyz c_eatoms\n'
# 'thermo_style custom step pe lx ly lz press pxx pyy pzz c_eatoms\n'
'min_style cg\n'
'minimize 1e-25 1e-25 5000 10000\n'
)
return str_out
| 2.5625 | 3 |
infobip_channels/email/models/path_paramaters/get_domain_details.py | infobip-community/infobip-api-python-sdk | 0 | 12768994 | <gh_stars>0
from infobip_channels.email.models.path_paramaters.core import EmailPathParameter
class GetDomainDetailsPathParameter(EmailPathParameter):
pass
| 1.203125 | 1 |
plot/plotPuddleworld.py | architsakhadeo/Offline-Hyperparameter-Tuning-for-RL | 0 | 12768995 | <filename>plot/plotPuddleworld.py
import os
import numpy as np
import matplotlib.pyplot as plt
import scipy.stats as st
datapath = ["../data/best/puddleworldOptimal/", "../data/best/puddleworldSubOptimal/", "../data/best/puddleworldSubSubOptimal/"]
labels = ["optimal", "average", "bad"]
num_runs = 50
def findIndex(array, i):
for j in range(len(array)):
if i < array[j]:
return j
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
for d in range(3):
subdirs = os.listdir(datapath[d])
for i in range(len(subdirs)):
average = [0.0 for k in range(4400)]
individual_runs = [[0.0 for k in range(4400)] for l in range(num_runs)]
individual_runs_returns = [[0.0 for k in range(4400)] for l in range(num_runs)]
average_returns = [0.0 for k in range(4400)]
max = 0.0
files = os.listdir(datapath[d]+subdirs[i])
for f in range(len(files)):
#print(files[f])
if 'episodes' not in files[f]:
continue
#print(datapath)
#print(subdirs, i, len(subdirs))
#print(files, f, len(files))
content = open(datapath[d]+subdirs[i]+'/'+files[f],'r').read().split('\n')
returns_content = open(datapath[d]+subdirs[i]+'/'+files[f].replace('episodes','returns'),'r').read().split('\n')
content = content[1:-1]
returns_content = returns_content[1:-1]
for j in range(len(content)):
content[j] = int(content[j])
returns_content[j] = float(returns_content[j])
if content[-1] > max:
max = content[-1]
cumsum_content = np.cumsum(content)
cumsum_content -= 1
#print(cumsum_content)
'''
for j in range(4400):
index = findIndex(cumsum_content, j)
average[j] += content[index]
individual_runs[f][j] = content[index]
'''
count = 0
for t in cumsum_content:
if t >= 4400:
continue
individual_runs[f][t] = 1
individual_runs_returns[f][t] = returns_content[count]
count += 1
average[t] += 1
average_returns[t] += returns_content[count]
for j in range(4400):
average[j] /= (1.0*num_runs)
average_returns[j] /= (1.0*num_runs)
#average = average[:-max]
#average = average[:7500]
#print(average)
#print(sum(average)/7500.0)
#print(individual_runs)
'''
npdata = np.array(individual_runs)
mean = np.mean(npdata,axis=0)
stderror = np.std(npdata,axis=0)/(num_runs**0.5)
criticalValue = st.norm.ppf( (0.95 + 1.0) / 2.0 )
confidenceInterval = criticalValue * stderror
#lower = mean - stderror
#upper = mean + stderror
print(criticalValue)
lower = mean - confidenceInterval
upper = mean + confidenceInterval
'''
window_size = 200
mean = [0 for i in range(int(len(average)/window_size))]
mean_returns = [0 for i in range(int(len(average_returns)/window_size))]
for m in range(len(mean)-1):
mean[m] = window_size / np.sum(average[m*window_size: (m+1)*window_size])
mean_returns[m] = np.sum(average_returns[m*window_size: (m+1)*window_size])
xAxis = [window_size * i for i in range(int(len(average)/window_size))]
#plt.plot(xAxis[:-2], mean[:-2], label=labels[d], color=colors[d])
plt.plot(xAxis[:-1], np.array(mean_returns[:-1])/window_size, label=labels[d], color=colors[d])
#plt.fill_between(xAxis, lower, upper, alpha=0.25, color=colors[i])
#plt.plot([i for i in range(7500)], average[:7500], label=labels[i])
plt.ylabel("Average return\n per timestep", rotation=0, labelpad=30)
plt.xlabel("Timesteps")
plt.legend()
#plt.ylim([0, 100])
plt.savefig("../data/best/puddleworldPolicies_softmax.png", bbox_inches='tight')
plt.show()
| 2.515625 | 3 |
testData/typeinspection/fieldFieldInvalid.py | seandstewart/typical-pycharm-plugin | 0 | 12768996 | <gh_stars>0
import typic
from builtins import *
@typic.klass
class A:
a: int = typic.field(int(123))
b = typic.field(int(123))
c = typic.field(default=int(123))
A(<warning descr="Expected type 'int', got 'str' instead">a=str('123')</warning>, <warning descr="Expected type 'int', got 'str' instead">b=str('123')</warning>, <warning descr="Expected type 'int', got 'str' instead">c=str('123')</warning>)
| 2.703125 | 3 |
leetCode/algorithms/medium/lowest_common_ancestor_of_a_binary_tree.py | ferhatelmas/algo | 25 | 12768997 | class Solution:
def lowestCommonAncestor(self, root, p, q):
if root is None or p == root or q == root:
return root
l = self.lowestCommonAncestor(root.left, p, q)
r = self.lowestCommonAncestor(root.right, p, q)
if l is None:
return r
if r is None:
return l
return root
| 3.359375 | 3 |
orders/migrations/0001_initial.py | toert/django-shop-template | 1 | 12768998 | <filename>orders/migrations/0001_initial.py
# -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-03-22 05:52
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('catalog', '0004_auto_20170322_0852'),
]
operations = [
migrations.CreateModel(
name='Order',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('first_name', models.CharField(max_length=200, verbose_name='Имя')),
('last_name', models.CharField(max_length=200, verbose_name='Фамилия')),
('email', models.EmailField(max_length=254, verbose_name='Email')),
('city', models.CharField(max_length=80, verbose_name='Город')),
('address', models.TextField(verbose_name='Адрес')),
('postal_code', models.CharField(max_length=80, verbose_name='Почтовый индекс')),
('description', models.CharField(max_length=200, verbose_name='Краткое описание')),
('created', models.DateField(auto_now_add=True, verbose_name='Дата оформления заказа')),
('updated', models.DateField(auto_now=True, verbose_name='Дата последнего обновления')),
('when_paid', models.DateField(verbose_name='Дата оплаты')),
],
options={
'verbose_name': 'Заказ',
'ordering': ['created'],
'verbose_name_plural': 'Заказы',
},
),
migrations.CreateModel(
name='OrderedProduct',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=10, verbose_name='Цена за единицу')),
('quantity', models.PositiveIntegerField(verbose_name='Количество товара')),
('order', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='product', to='orders.Order', verbose_name='Заказ')),
('product', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='order', to='catalog.Product', verbose_name='Товар')),
],
),
]
| 1.476563 | 1 |
vagalume_json/busca_top_100_artistas_vagalume.py | Koalapvh13/python_exercises | 0 | 12768999 | import json
import requests
def remove_repetidos(lista):
l = []
for i in lista:
if i not in l:
l.append(i)
l.sort()
return l
def busca_musicas(id_api):
r = requests.get(id_api+'index.js')
if r.status_code == 200:
reddit_data = json.loads(r.content)
musicas = reddit_data['artist']['lyrics']['item']
for id_music in musicas:
r2 = requests.get('https://api.vagalume.com.br/search.php?musid='+id_music['id'])
if r2.status_code == 200:
reddit_data = json.loads(r2.content)
limpa_string = reddit_data['mus'][0]['text'].replace('.', '') # Retira ponto final
limpa_string = limpa_string.replace(',', '') # Retira virgula
limpa_string = limpa_string.replace('?', '') # Retira ponto de interrogação
limpa_string = limpa_string.replace('!', '') # Retira ponto de exclamação
limpa_string = limpa_string.replace('(', '') # Retira abre parênteses
limpa_string = limpa_string.replace(')', '') # Retira fecha parênteses
limpa_string = limpa_string.replace('[', '') # Retira abre colchetes
limpa_string = limpa_string.replace(']', '') # Retira fecha colchetes
limpa_string = limpa_string.replace('{', '') # Retira abre chaves
limpa_string = limpa_string.replace('}', '') # Retira fecha chaves
limpa_string = limpa_string.replace('/', '') # Retira barra
limpa_string = limpa_string.replace('"', '') # Retira aspas duplas
lista = remove_repetidos(limpa_string.split())
for d in lista:
btl = '\"'+ d+ '\", '
arquivo = open('strings.txt','a')
arquivo.write(btl)
arquivo.close()
print('Salvo com Sucesso!')
###########################################################################################################################
link_cantor = input('Informe o link do cantor no Vagalume.com: ')
busca_musicas(link_cantor)
| 3.046875 | 3 |
tests/test_dependency_duplicates.py | Aryabhata-Rootspring/fastapi | 53,007 | 12769000 | <reponame>Aryabhata-Rootspring/fastapi<gh_stars>1000+
from typing import List
from fastapi import Depends, FastAPI
from fastapi.testclient import TestClient
from pydantic import BaseModel
app = FastAPI()
client = TestClient(app)
class Item(BaseModel):
data: str
def duplicate_dependency(item: Item):
return item
def dependency(item2: Item):
return item2
def sub_duplicate_dependency(
item: Item, sub_item: Item = Depends(duplicate_dependency)
):
return [item, sub_item]
@app.post("/with-duplicates")
async def with_duplicates(item: Item, item2: Item = Depends(duplicate_dependency)):
return [item, item2]
@app.post("/no-duplicates")
async def no_duplicates(item: Item, item2: Item = Depends(dependency)):
return [item, item2]
@app.post("/with-duplicates-sub")
async def no_duplicates_sub(
item: Item, sub_items: List[Item] = Depends(sub_duplicate_dependency)
):
return [item, sub_items]
openapi_schema = {
"openapi": "3.0.2",
"info": {"title": "FastAPI", "version": "0.1.0"},
"paths": {
"/with-duplicates": {
"post": {
"summary": "With Duplicates",
"operationId": "with_duplicates_with_duplicates_post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/no-duplicates": {
"post": {
"summary": "No Duplicates",
"operationId": "no_duplicates_no_duplicates_post",
"requestBody": {
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/Body_no_duplicates_no_duplicates_post"
}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
"/with-duplicates-sub": {
"post": {
"summary": "No Duplicates Sub",
"operationId": "no_duplicates_sub_with_duplicates_sub_post",
"requestBody": {
"content": {
"application/json": {
"schema": {"$ref": "#/components/schemas/Item"}
}
},
"required": True,
},
"responses": {
"200": {
"description": "Successful Response",
"content": {"application/json": {"schema": {}}},
},
"422": {
"description": "Validation Error",
"content": {
"application/json": {
"schema": {
"$ref": "#/components/schemas/HTTPValidationError"
}
}
},
},
},
}
},
},
"components": {
"schemas": {
"Body_no_duplicates_no_duplicates_post": {
"title": "Body_no_duplicates_no_duplicates_post",
"required": ["item", "item2"],
"type": "object",
"properties": {
"item": {"$ref": "#/components/schemas/Item"},
"item2": {"$ref": "#/components/schemas/Item"},
},
},
"HTTPValidationError": {
"title": "HTTPValidationError",
"type": "object",
"properties": {
"detail": {
"title": "Detail",
"type": "array",
"items": {"$ref": "#/components/schemas/ValidationError"},
}
},
},
"Item": {
"title": "Item",
"required": ["data"],
"type": "object",
"properties": {"data": {"title": "Data", "type": "string"}},
},
"ValidationError": {
"title": "ValidationError",
"required": ["loc", "msg", "type"],
"type": "object",
"properties": {
"loc": {
"title": "Location",
"type": "array",
"items": {"type": "string"},
},
"msg": {"title": "Message", "type": "string"},
"type": {"title": "Error Type", "type": "string"},
},
},
}
},
}
def test_openapi_schema():
response = client.get("/openapi.json")
assert response.status_code == 200, response.text
assert response.json() == openapi_schema
def test_no_duplicates_invalid():
response = client.post("/no-duplicates", json={"item": {"data": "myitem"}})
assert response.status_code == 422, response.text
assert response.json() == {
"detail": [
{
"loc": ["body", "item2"],
"msg": "field required",
"type": "value_error.missing",
}
]
}
def test_no_duplicates():
response = client.post(
"/no-duplicates",
json={"item": {"data": "myitem"}, "item2": {"data": "myitem2"}},
)
assert response.status_code == 200, response.text
assert response.json() == [{"data": "myitem"}, {"data": "myitem2"}]
def test_duplicates():
response = client.post("/with-duplicates", json={"data": "myitem"})
assert response.status_code == 200, response.text
assert response.json() == [{"data": "myitem"}, {"data": "myitem"}]
def test_sub_duplicates():
response = client.post("/with-duplicates-sub", json={"data": "myitem"})
assert response.status_code == 200, response.text
assert response.json() == [
{"data": "myitem"},
[{"data": "myitem"}, {"data": "myitem"}],
]
| 2.265625 | 2 |
packios.py | skela/r | 0 | 12769001 | <filename>packios.py
#!/usr/bin/env python
import os
import plistlib
class PackIOS(object):
def __init__(self, root, proj_folder, project, solution, release_notes=None, mdtool=None, configuration="Ad-Hoc"):
self.name = proj_folder
self.mdtool = mdtool
self.proj_folder = os.path.join(root, proj_folder)
self.project = os.path.join(root, project)
self.solution = os.path.join(root, solution)
self.project_bin = os.path.join(root, proj_folder, 'bin/iPhone/%s' % configuration)
self.fallback_project_bin = os.path.join(root, proj_folder, 'bin/iPhone/%s' % "Release")
self.configuration = configuration
self.project_name = os.path.splitext(os.path.basename(project))[0]
self.release_notes = release_notes
if release_notes is not None:
self.release_notes = os.path.join(root, release_notes)
if self.mdtool is None:
self.mdtool = "/Applications/Visual Studio.app/Contents/MacOS/vstool"
if not os.path.exists(self.mdtool):
exit("Failed to locate mdtool - " + self.mdtool)
if self.release_notes is not None:
if not os.path.exists(self.release_notes):
exit("Failed to locate release notes - %s" % self.release_notes)
def get_project_bin_folder(self):
if os.path.exists(self.project_bin):
return self.project_bin
return self.fallback_project_bin
def get_release_notes(self):
if self.release_notes is None:
return ""
f = open(self.release_notes, 'r')
rn = f.read()
f.close()
return rn
def files_of_type(self, file_type, folder_path=None):
fpath = folder_path
if fpath is None:
fpath = self.get_project_bin_folder()
files = os.listdir(fpath)
ipa_files = []
for f in files:
if f.endswith('.'+file_type):
ipa_files.append(f)
return ipa_files
def name_of_file(self, file_type):
ipa_files = self.files_of_type(file_type)
if len(ipa_files) > 1:
exit("Too many %s files, not sure which one to pick : %s" % (file_type,ipa_files))
if len(ipa_files) == 0:
exit("Failed to find %s file" % file_type)
ipa = ipa_files[0]
return ipa
def name_of_ipa(self):
return self.name_of_file('ipa')
def name_of_dsym(self):
return self.name_of_file('app.dSYM')
def path_to_ipa(self):
name = self.name_of_ipa()
bin_folder = self.get_project_bin_folder()
ipa_path = os.path.join(bin_folder, name)
return ipa_path
def path_to_ipa_alt(self):
name = self.name_of_ipa()
bin_folder = self.get_project_bin_folder()
ipa_path = os.path.join(bin_folder, name)
folders = []
files = os.listdir(bin_folder)
for f in files:
tmp = os.path.join(bin_folder,f)
if os.path.isdir(tmp):
folders.append(os.path.join(tmp,name))
if len(folders) == 1:
ipa_path = folders[0]
return ipa_path
# This can hopefully be removed once Xamarin fix the bug they introduced in 9.8.0 / 9.8.1
def dexamarin_ipas(self):
bin_folder = self.get_project_bin_folder()
folders = []
files = os.listdir(bin_folder)
for f in files:
tmp = os.path.join(bin_folder,f)
if os.path.isdir(tmp):
folders.append(tmp)
counter = 0
for folder in folders:
ipa_files = self.files_of_type('ipa',folder)
for ipa_name in ipa_files:
ipa = os.path.join(folder,ipa_name)
ipa_name = ipa_name.replace(".ipa","%d.ipa" % counter)
dest = os.path.join(bin_folder,ipa_name)
cmd = 'mv "%s" "%s"' % (ipa,dest)
os.system(cmd)
counter += 1
def path_to_dsym(self):
name = self.name_of_dsym()
return os.path.join(self.get_project_bin_folder(), name)
def path_to_info_plist(self):
return os.path.join(self.proj_folder, 'Info.plist')
def get_build_number(self):
plist = self.path_to_info_plist()
k = plistlib.readPlist(plist)
if 'CFBundleVersion' in k:
return k['CFBundleVersion']
return None
def set_build_number(self, build_num):
plist = self.path_to_info_plist()
k = plistlib.readPlist(plist)
k['CFBundleVersion'] = build_num
plistlib.writePlist(k, plist)
def increment_build_number(self):
build_number = self.get_build_number()
if build_number is None:
build_number = "1"
else:
build_number = str(int(build_number)+1)
self.set_build_number(build_number)
def decrement_build_number(self):
build_number = self.get_build_number()
if build_number is None:
build_number = "1"
else:
build_number = str(int(build_number)-1)
self.set_build_number(build_number)
def get_version_number(self):
plist = self.path_to_info_plist()
k = plistlib.readPlist(plist)
if 'CFBundleShortVersionString' in k:
return k['CFBundleShortVersionString']
return None
def set_version_number(self, version_num):
plist = self.path_to_info_plist()
k = plistlib.readPlist(plist)
k['CFBundleShortVersionString'] = version_num
plistlib.writePlist(k, plist)
def clean(self):
bin_folder = os.path.join(self.proj_folder, 'bin')
obj_folder = os.path.join(self.proj_folder, 'obj')
if os.path.exists(bin_folder):
print 'Clearing away ' + bin_folder
os.system('rm -fdr ' + bin_folder)
if os.path.exists(obj_folder):
print 'Clearing away ' + obj_folder
os.system('rm -fdr ' + obj_folder)
def build(self, verbosely=False):
v = ""
if verbosely:
v = "-v"
cmd_build = '"%s" %s build "--configuration:%s|iPhone" "--project:%s" "%s"' % (self.mdtool, v, self.configuration,self.project_name,self.solution)
os.system(cmd_build)
#cmd_archive = '"%s" %s archive "--configuration:%s|iPhone" "--project:%s" "%s"' % (self.mdtool, v, self.configuration,self.project_name,self.solution)
#os.system(cmd_archive)
if len(self.files_of_type('ipa')) == 0:
self.dexamarin_ipas()
ipa_path = self.path_to_ipa()
if not os.path.exists(ipa_path):
exit("Failed to build ipa, i.e. its missing - " + ipa_path)
def update_version(self):
print '=>Update version information for ' + os.path.basename(self.project)
build_number = self.get_build_number()
print build_number
msg = "Would you like to increment the build number? y/n\n> "
if build_number is None:
msg = "Has no build number, would you like to start one? y/n\n>"
q = raw_input(msg)
if q == "y":
self.increment_build_number()
version_number = self.get_version_number()
print version_number
msg = "Would you like to change the version number? y/n\n> "
if version_number is None:
msg = "Has no version number, would you like to set one? y/n\n>"
q = raw_input(msg)
if q == "y":
version_number = raw_input("What to?> ")
self.set_version_number(version_number)
def run(self, update_versions=True, confirm_build=True):
self.clean()
if update_versions:
self.update_version()
build_number = self.get_build_number()
version_number = self.get_version_number()
if build_number is None:
build_number = "[Missing]"
if version_number is None:
version_number = "[Missing]"
if confirm_build:
print 'So thats version ' + version_number + " build " + build_number
q = raw_input("Would you like to continue? y/n\n> ")
if q != "y":
print "Ok, not doing the build, suit yourself..."
return
self.build()
| 2.3125 | 2 |
novaplaylist/scrapers/__init__.py | gtnx/nova-playlist | 0 | 12769002 | <reponame>gtnx/nova-playlist
# -*- coding: utf-8 -*-
from Scraper import Scraper
from FipScraper import FipScraper
from NovaScraper import NovaScraper
from OuiScraper import OuiScraper
from NostalgieScraper import NostalgieScraper
from RadioparadiseScraper import RadioparadiseScraper
| 1.117188 | 1 |
src/sima/post/nonequidistantsignal.py | SINTEF/simapy | 0 | 12769003 | <gh_stars>0
# This an autogenerated file
#
# Generated with NonEquidistantSignal
from typing import Dict,Sequence,List
from dmt.entity import Entity
from dmt.blueprint import Blueprint
from .blueprints.nonequidistantsignal import NonEquidistantSignalBlueprint
from typing import Dict
from sima.post.generatorsignal import GeneratorSignal
from sima.post.signalproperties import SignalProperties
from sima.post.xyitem import XYItem
from sima.sima.scriptablevalue import ScriptableValue
class NonEquidistantSignal(GeneratorSignal):
"""
Keyword arguments
-----------------
name : str
(default "")
description : str
(default "")
_id : str
(default "")
scriptableValues : List[ScriptableValue]
properties : List[SignalProperties]
xunit : str
Defines the unit of the x axis(default 's')
yunit : str
Defines the unit of the y axis(default '-')
values : List[XYItem]
ylabel : str
(default "")
xlabel : str
(default "")
"""
def __init__(self , name="", description="", _id="", xunit='s', yunit='-', ylabel="", xlabel="", **kwargs):
super().__init__(**kwargs)
self.name = name
self.description = description
self._id = _id
self.scriptableValues = list()
self.properties = list()
self.xunit = xunit
self.yunit = yunit
self.values = list()
self.ylabel = ylabel
self.xlabel = xlabel
for key, value in kwargs.items():
if not isinstance(value, Dict):
setattr(self, key, value)
@property
def blueprint(self) -> Blueprint:
"""Return blueprint that this entity represents"""
return NonEquidistantSignalBlueprint()
@property
def name(self) -> str:
""""""
return self.__name
@name.setter
def name(self, value: str):
"""Set name"""
self.__name = str(value)
@property
def description(self) -> str:
""""""
return self.__description
@description.setter
def description(self, value: str):
"""Set description"""
self.__description = str(value)
@property
def _id(self) -> str:
""""""
return self.___id
@_id.setter
def _id(self, value: str):
"""Set _id"""
self.___id = str(value)
@property
def scriptableValues(self) -> List[ScriptableValue]:
""""""
return self.__scriptableValues
@scriptableValues.setter
def scriptableValues(self, value: List[ScriptableValue]):
"""Set scriptableValues"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__scriptableValues = value
@property
def properties(self) -> List[SignalProperties]:
""""""
return self.__properties
@properties.setter
def properties(self, value: List[SignalProperties]):
"""Set properties"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__properties = value
@property
def xunit(self) -> str:
"""Defines the unit of the x axis"""
return self.__xunit
@xunit.setter
def xunit(self, value: str):
"""Set xunit"""
self.__xunit = str(value)
@property
def yunit(self) -> str:
"""Defines the unit of the y axis"""
return self.__yunit
@yunit.setter
def yunit(self, value: str):
"""Set yunit"""
self.__yunit = str(value)
@property
def values(self) -> List[XYItem]:
""""""
return self.__values
@values.setter
def values(self, value: List[XYItem]):
"""Set values"""
if not isinstance(value, Sequence):
raise Exception("Expected sequense, but was " , type(value))
self.__values = value
@property
def ylabel(self) -> str:
""""""
return self.__ylabel
@ylabel.setter
def ylabel(self, value: str):
"""Set ylabel"""
self.__ylabel = str(value)
@property
def xlabel(self) -> str:
""""""
return self.__xlabel
@xlabel.setter
def xlabel(self, value: str):
"""Set xlabel"""
self.__xlabel = str(value)
| 2.125 | 2 |
app/holidays.py | seratch/jp-holidays-for-slack | 5 | 12769004 | import logging
from typing import Optional
import requests
import datetime
from dataclasses import dataclass
logger = logging.getLogger(__name__)
@dataclass
class Holiday:
title: str
date: str
day_of_week: str
day_of_week_text: str
def fetch_public_holiday(token: str, target_date: datetime.date) -> Optional[Holiday]:
response = requests.get(
url="https://api.kenall.jp/v1/holidays",
headers={"Authorization": f"Token {token}"},
params={"year": target_date.year},
)
target_date_str = str(target_date)
response_body = response.json()
logger.debug(response_body)
for holiday in response_body.get("data"):
if holiday.get("date") == target_date_str:
return Holiday(**holiday)
return None
def fetch_next_public_holiday(
token: str, target_date: datetime.date
) -> Optional[Holiday]:
response = requests.get(
url="https://api.kenall.jp/v1/holidays",
headers={"Authorization": f"Token {token}"},
params={"year": target_date.year},
)
response_body = response.json()
logger.debug(response_body)
for holiday in response_body.get("data"):
if datetime.date.fromisoformat(holiday.get("date")) >= target_date:
return Holiday(**holiday)
next_new_year_day = datetime.date(target_date.year + 1, 1, 1)
return fetch_next_public_holiday(next_new_year_day)
| 3.015625 | 3 |
precis/util.py | rukmal/precis | 0 | 12769005 | <reponame>rukmal/precis<filename>precis/util.py
import collections
import json
import logging
def buildData(data_file: str, override_files: list=[]) -> dict:
"""Initializes the static configuration variables used in the
PaperRank system. Provides a method to override base configuration.
Keyword Arguments:
override {str} -- File name in the `config/` directory
that may be used to override the `base.json` configuration
(default: {''}).
Raises:
RuntimeError -- Raised when configuration files cannot be found.
"""
# Parsing base data file
data = parseJSON(file_path=data_file)
# Iterate through override files, parse and apply each to base data file
for override_file in override_files:
override_data = parseJSON(file_path=override_file)
data = applyOverride(base_dict=data, override_dict=override_data)
return data
def parseJSON(file_path: str) -> dict:
"""Function to parse a JSON file.
Arguments:
file_path {str} -- File path of target JSON file.
Raises:
FileNotFoundError -- Raised if the target file is not found.
JSONDecodeError -- Raised if there is an error parsing the JSON file.
Returns:
dict -- Dictionary of parsed JSON file contents.
"""
try:
data_str = open(file_path).read()
data_parsed = json.loads(data_str)
return data_parsed
except FileNotFoundError as e:
logging.error('File %s not found' % file_path)
logging.error(e)
raise e
except json.decoder.JSONDecodeError as e:
logging.error('Error parsing JSON file %s' % file_path)
logging.error(e)
raise e
def applyOverride(base_dict: dict, override_dict: dict) -> dict:
"""Function to apply an override to a dictionary with values from another.
Arguments:
base_dict {dict} -- Base dictionary.
override_dict {dict} -- Override dictionary.
Returns:
dict -- Updated dictionary.
"""
for k, v in override_dict.items():
if isinstance(v, collections.Mapping):
base_dict[k] = applyOverride(base_dict.get(k, {}), v)
else:
base_dict[k] = v
return base_dict
| 3.0625 | 3 |
account/forms.py | ShwethaRGowda/FADB | 149 | 12769006 | from django import forms
from .models import UserProfile
class ProfileForm(forms.ModelForm):
class Meta:
model = UserProfile
fields = ['name', 'photo']
widgets = {
'name': forms.TextInput(attrs={'class': 'form-control'}),
'photo': forms.FileInput(attrs={'class': 'form-control'}),
} | 2.25 | 2 |
assignments/09-bottles/bottles.py | jaeanderson/biosys-analytics | 0 | 12769007 | <filename>assignments/09-bottles/bottles.py<gh_stars>0
#!/usr/bin/env python3
"""
Name: jranderson
Date: 03.16.19
Purpose: bottles of beers
"""
import argparse
import os
import re
import sys
# --------------------------------------------------------
def get_args():
"""get command-line arguments"""
parser = argparse.ArgumentParser(
description='Bottles of beer song',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
'-n',
'--num_bottles',
help='How many bottles',
metavar='INT',
type=int,
default=10)
return parser.parse_args()
#---------------------------------------------------------
def warn(msg):
"""Print a message to STDERR"""
print(msg, file=sys.stderr)
#---------------------------------------------------------
def die(msg='Something bad happened'):
"""warn() and exit with error"""
warn(msg)
sys.exit(1)
#---------------------------------------------------------
def main():
"""Let's get ready to code!"""
args = get_args()
num_bottles = args.num_bottles
if num_bottles < 1:
die('N ({}) must be a positive integer'.format(num_bottles))
for nbottles in range (num_bottles, 0, -1):
print('{} bottle{} of beer on the wall,'.format(nbottles, '' if nbottles == 1 else 's'))
print('{} bottle{} of beer,'.format(nbottles, '' if nbottles == 1 else 's'))
print('Take one down, pass it around,')
nbottles = nbottles - 1
print('{} bottle{} of beer on the wall!{}'.format(nbottles, '' if nbottles == 1 else 's', '' if nbottles == 0 else '\n'))
#---------------------------------------------------------
if __name__=='__main__':
main()
| 3.328125 | 3 |
riskrate_data/__init__.py | riskrate/riskrate-data-python | 0 | 12769008 | <gh_stars>0
from .connection import DataClient
from .helper import (
simple_query,
simple_query_dict,
simple_mutation,
insert_chunked,
)
| 1.234375 | 1 |
BasicPythonPrograms/Operator_Module_3.py | Pushkar745/PythonProgramming | 0 | 12769009 | import operator
s1="Marlin"
s2="beard"
print("The Concatenated string is :",end="")
print(operator.concat(s1,s2))
#using contains() to check if s1 contains s2
if(operator.contains(s1, s2)):
print("Marlin Contains Beard")
else:
print("Marlin Does not contains Beard")
| 4.34375 | 4 |
p110_balanced_binary_tree.py | feigaochn/leetcode | 0 | 12769010 | <reponame>feigaochn/leetcode<gh_stars>0
# coding: utf-8
# author: <NAME> <<EMAIL>>
# Problem: balanced binary tree
#
# Given a binary tree, determine if it is height-balanced.
#
# For this problem, a height-balanced binary tree is defined as a binary tree
# in which the depth of the two subtrees of every node never differ by more
# than 1.
#
# Subscribe to see which companies asked this question
#
# Show Tags
#
# Tree
# Depth-first Search
#
# Show Similar Problems
#
# (E) Maximum Depth of Binary Tree
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isBalanced(self, root):
"""
:type root: TreeNode
:rtype: bool
"""
cache = {None: 0}
def depth(node):
if node not in cache:
cache[node] = 1 + max(depth(node.left), depth(node.right))
return cache[node]
def check(node):
return node is None or (-1 <= depth(node.left) - depth(node.right) <= 1
and check(node.left)
and check(node.right))
return check(root)
def main():
import utils
root = utils.build_binary_tree([1, 2, 2, 3, None, None, 3, 4, None, None, 4])
print(Solution().isBalanced(root))
root = utils.build_binary_tree([1])
print(Solution().isBalanced(root))
print(Solution().isBalanced(None))
if __name__ == '__main__':
main()
pass
| 4.03125 | 4 |
cpa/imagelist.py | oba14/CellProfiler-Analyst | 1 | 12769011 | <gh_stars>1-10
from __future__ import print_function
import logging
import wx
import numpy as np
from properties import Properties
from dbconnect import *
from UserDict import DictMixin
import imagetools
p = Properties.getInstance()
db = DBConnect.getInstance()
class ImageListCtrl(wx.ListCtrl):
def __init__(self, parent, imkeys):
wx.ListCtrl.__init__(self, parent, -1,
style=wx.LC_REPORT|wx.LC_VIRTUAL|wx.LC_HRULES|wx.LC_VRULES)
self.set_key_list(imkeys)
self.Bind(wx.EVT_LIST_ITEM_ACTIVATED, self.OnItemActivated)
def set_key_list(self, imkeys):
self.imkeys = imkeys
if len(self.imkeys) > 0:
columns_of_interest = well_key_columns(p.image_table)
if len(columns_of_interest) > 0:
columns_of_interest = ','+','.join(columns_of_interest)
self.data = db.execute('SELECT %s%s FROM %s WHERE %s'%(
UniqueImageClause(),
columns_of_interest,
p.image_table,
GetWhereClauseForImages(imkeys)))
self.cols = image_key_columns() + well_key_columns()
else:
self.data = np.array(self.imkeys)
self.cols = image_key_columns()
else:
self.data = []
self.cols = []
self.data.sort()
for i, col in enumerate(self.cols):
self.InsertColumn(i, col)
self.SetColumnWidth(i, 150)
self.SetItemCount(len(imkeys))
def OnItemActivated(self, event):
imkey = self.imkeys[event.m_itemIndex]
f = imagetools.ShowImage(tuple(imkey), p.image_channel_colors, self.GrandParent or self.Parent)
f.Raise()
def OnGetItemText(self, row, col):
return self.data[row][col]
class ImageListFrame(wx.Frame):
def __init__(self, parent, imkeys, **kwargs):
wx.Frame.__init__(self, parent, -1, **kwargs)
sizer = wx.BoxSizer(wx.VERTICAL)
self.imlist = ImageListCtrl(self, imkeys)
sizer.Add(self.imlist, 1, wx.EXPAND)
self.SetSizer(sizer)
self.SetAutoLayout(True)
if __name__ == "__main__":
app = wx.PySimpleApp()
logging.basicConfig(level=logging.DEBUG,)
if not p.show_load_dialog():
print('Props file required')
sys.exit()
ilf = ImageListFrame(None, db.execute('SELECT %s from %s'%(UniqueImageClause(), p.image_table)))
ilf.Show()
app.MainLoop()
| 1.90625 | 2 |
h2o-py/tests/testdir_munging/pyunit_runif.py | ahmedengu/h2o-3 | 6,098 | 12769012 | from __future__ import print_function
import sys
sys.path.insert(1,"../../")
import h2o
from tests import pyunit_utils
def runif_check():
fr = h2o.H2OFrame([[r] for r in range(1,1001)])
runif1 = fr[0].runif(1234)
runif2 = fr[0].runif(1234)
runif3 = fr[0].runif(42)
assert (runif1 == runif2).all(), "Expected runif with the same seeds to return the same values."
assert not (runif1 == runif3).all(), "Expected runif with different seeds to return different values."
if __name__ == "__main__":
pyunit_utils.standalone_test(runif_check)
else:
runif_check()
| 2.25 | 2 |
Practical3-AddIns/ExplosionAddin/Install/ExplosionAddin_addin.py | masher92/GEOG_5790 | 0 | 12769013 | import arcpy
import pythonaddins
class ExplosionButtonClass(object):
"""Implementation for ExplosionAddin_addin.explosionbutton (Button)"""
def __init__(self):
self.enabled = True
self.checked = False
def onClick(self):
# Print message to confirm initialisation
#pythonaddins.MessageBox('Have you applied a definition query to all necessary layers?', 'Query check', 4)
pythonaddins.MessageBox("I am working", "Are you working?")
pythonaddins.GPToolDialog("E:/MSc/Advanced-Programming/GitHub/GEOG_5790/Practical2-Scripts/Explosion Toolbox (v2).tbx", "Explosion")
| 2.46875 | 2 |
Programming_for_GIA_Core_Skills/Assessment_1/Task8_GUI's/model.py | jord9762/jordy9762.github.io | 0 | 12769014 | <reponame>jord9762/jordy9762.github.io
import matplotlib
matplotlib.use('TkAgg')
#imports random library which is helpful for testing outcomes, contains functions such as shuffle
import random
#imports operators which may be missing from default Spyder
import operator
#imports the csv library necessary for reading the 'environmental' csv data (in.csv).
import csv
#imports the agentframework file which must be in the same directory to work, by importing this we can import the agent class.
import agentframework
#imports animations from pyplot which will allow
import matplotlib.animation
import matplotlib.pyplot
import requests
import bs4
#provides connection between the standard python interface to the Tk GUI toolkit
import tkinter
"""WARNING!!!!!"""
"""Note to visualise the code in this file the code %matplotlib qt must be inputted in to the ipython console first. Or alternatively
the code can be ran in the command prompt. Note to run this code more than once in the Jupyter terminal may require a restart of the kernel."""
"""https://www.youtube.com/watch?v=8exB6Ly3nx0 this excellent resource had info on combining GUI with matplotlib data"""
#creates a new empty list for what will be the csv environment data
environment = []
#empty list for agents
agents = []
num_of_agents = 10
num_of_iterations = 100
neighbourhood = 20
#specifies pop up figure dimensions
fig = matplotlib.pyplot.figure(figsize=(7, 7))
ax = fig.add_axes([0, 0, 1, 1])
carry_on = True
def run():
animation = matplotlib.animation.FuncAnimation(fig, update, frames=gen_function, repeat=False)
canvas.draw() #show did not work see here for solution https://stackoverflow.com/questions/50165115/unable-to-call-canvas-show
#used to call GUI
root = tkinter.Tk()
root.wm_title("Model")
#Open window having dimension 700x700
root.geometry('700x700')
menu_bar = tkinter.Menu(root)
root.config(menu=menu_bar)
#configures GUI background to grey
root.configure(background="grey")
#my_button class and parameters below change the GUI button to blue
my_button = tkinter.Button(root, text="Run model", command=run, bg='blue')#https://pythonexamples.org/python-tkinter-button-background-color/#:~:text=You%20can%20change%20the%20background,bg%20property%20as%20shown%20below.&text=The%20default%20color%20of%20Tkinter%20Button%20is%20grey.
my_button.pack(side=tkinter.TOP)#https://www.youtube.com/watch?v=Uk2FivOD8qo got idea from here
canvas = matplotlib.backends.backend_tkagg.FigureCanvasTkAgg(fig, master=root)
canvas._tkcanvas.pack(side=tkinter.TOP, fill=tkinter.BOTH, expand=1)
f = open('in.csv', newline='')
#Note that the correct directory must be navigated to in the terminal else the full file path will be needed
reader = csv.reader(f, quoting=csv.QUOTE_NONNUMERIC)
for row in reader: # A list of rows
rowlist = []
for value in row: # A list of value
rowlist.append(value)
environment.append(rowlist)
# Make the agents.
for i in range(num_of_agents):
agents.append(agentframework.agent(environment, agents))
def update(frame_number):
fig.clear()
global carry_on
# Move the agents.
for j in range(num_of_iterations):
for i in range(num_of_agents):
agents[i].move()
agents[i].eat()
agents[i].share_with_neighbours(neighbourhood)
#plot agents
matplotlib.pyplot.xlim(0, 100)
matplotlib.pyplot.ylim(0, 100)
matplotlib.pyplot.imshow(environment)
for i in range(num_of_agents):
matplotlib.pyplot.scatter(agents[i]._x, agents[i]._y)
"""In the gen_function function we are assigning the number of iterations the animation will go through. a=0
means starting at 0 and carry_on while a =<10 leads to 10 iterations being called as the anmation will carry
on until the store reaches 9."""
def gen_function(b = [0]):
store = 0
global carry_on #Not actually needed as we're not assigning, but clearer
while (store < 10) & (carry_on) :
yield store # Returns control and waits next call.
store = store + 1
#script is ended by calling of tkinter mainloop
tkinter.mainloop()
| 3 | 3 |
spvcm/both_levels/sma_se/__init__.py | weikang9009/spvcm | 14 | 12769015 | from .model import SMASE
| 1.101563 | 1 |
users/forms.py | faisoabdirisak/NextDoor | 0 | 12769016 | <filename>users/forms.py
from cProfile import label
from django.forms import ModelForm
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
class CustomUserCreation(UserCreationForm):
class Meta:
model=User
fields=['username','email','<PASSWORD>','<PASSWORD>']
labels={
}
def __init__(self, *args,**kwargs):
super(CustomUserCreation,self).__init__(*args,**kwargs)
for name,field in self.fields.items():
field.widget.attrs.update({'class':'un'}) | 2.34375 | 2 |
coursesApp/migrations/0005_delete_lessons.py | glen-s-abraham/Elearning-platform | 0 | 12769017 | <gh_stars>0
# Generated by Django 3.0.6 on 2020-06-03 12:42
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('coursesApp', '0004_auto_20200603_1809'),
]
operations = [
migrations.DeleteModel(
name='Lessons',
),
]
| 1.289063 | 1 |
src/third_party/wiredtiger/test/suite/test_rollback_to_stable25.py | benety/mongo | 0 | 12769018 | #!/usr/bin/env python
#
# Public Domain 2014-present MongoDB, Inc.
# Public Domain 2008-2014 WiredTiger, Inc.
#
# This is free and unencumbered software released into the public domain.
#
# Anyone is free to copy, modify, publish, use, compile, sell, or
# distribute this software, either in source code form or as a compiled
# binary, for any purpose, commercial or non-commercial, and by any
# means.
#
# In jurisdictions that recognize copyright laws, the author or authors
# of this software dedicate any and all copyright interest in the
# software to the public domain. We make this dedication for the benefit
# of the public at large and to the detriment of our heirs and
# successors. We intend this dedication to be an overt act of
# relinquishment in perpetuity of all present and future rights to this
# software under copyright law.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
# IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import wiredtiger, wttest
from wtscenario import make_scenarios, filter_scenarios
# test_rollback_to_stable25.py
# Check various scenarios relating to RLE cells in column-store.
#
# We write at three different timestamps:
# 10 - aaaaaa or none
# 20 - bbbbbb or delete or none
# 30 - cccccc or delete or none
#
# and we evict to push things to disk after any of these,
# and we roll back to either 15 or 25.
#
# The writes can be either uniform, heterogeneous, first key, middle key, or last key.
#
# We do this with a group of 5 keys 2..6. Keys 1 and 6 are written with zzzzzz at
# timestamp 5 and evicted to ensure that the group of keys we're using is isolated
# from other unused keys.
#
# This generates a lot of cases, but we filter pointless combinations and they run fast.
# Put these bits outside the class definition so they can be referred to both in class
# instances and in the scenario setup logic, which doesn't have a class instance yet.
my_rle_size = 5
def keys_of_write(write):
if write == 'u' or write == 'h':
return range(2, 2 + my_rle_size)
elif write == 'f':
return [2]
elif write == 'm':
return [2 + my_rle_size // 2]
else:
return [2 + my_rle_size - 1]
class test_rollback_to_stable25(wttest.WiredTigerTestCase):
conn_config = 'in_memory=false'
write_10_values = [
('10u', dict(write_10='u')),
('10h', dict(write_10='h')),
('10f', dict(write_10='f')),
('10m', dict(write_10='m')),
('10l', dict(write_10='l')),
]
type_10_values = [
('nil', dict(type_10=None)),
('upd', dict(type_10='upd')),
]
write_20_values = [
('20u', dict(write_20='u')),
('20h', dict(write_20='h')),
('20f', dict(write_20='f')),
('20m', dict(write_20='m')),
('20l', dict(write_20='l')),
]
type_20_values = [
('nil', dict(type_20=None)),
('upd', dict(type_20='upd')),
('del', dict(type_20='del')),
]
write_30_values = [
('30u', dict(write_30='u')),
('30h', dict(write_30='h')),
('30f', dict(write_30='f')),
('30m', dict(write_30='m')),
('30l', dict(write_30='l')),
]
type_30_values = [
('nil', dict(type_30=None)),
('upd', dict(type_30='upd')),
('del', dict(type_30='del')),
]
evict_time_values = [
('chk10', dict(evict_time=10)),
('chk20', dict(evict_time=20)),
('chk30', dict(evict_time=30)),
]
rollback_time_values = [
('roll15', dict(rollback_time=15)),
('roll25', dict(rollback_time=25)),
]
def is_meaningful(name, vals):
# The last write at evict time should be uniform, to get an RLE cell.
if vals['evict_time'] == 10 and vals['write_10'] != 'u':
return False
if vals['evict_time'] == 20 and vals['write_20'] != 'u':
return False
if vals['evict_time'] == 30 and vals['write_30'] != 'u':
return False
# If the type is nil, the value must be uniform.
if vals['type_10'] is None and vals['write_10'] != 'u':
return False
if vals['type_20'] is None and vals['write_20'] != 'u':
return False
if vals['type_30'] is None and vals['write_30'] != 'u':
return False
# Similarly, delete and heterogeneous doesn't make sense.
if vals['type_10'] == 'del' and vals['write_10'] == 'h':
return False
if vals['type_20'] == 'del' and vals['write_20'] == 'h':
return False
if vals['type_20'] == 'del' and vals['write_30'] == 'h':
return False
# Both 10 and 20 shouldn't be nil. That's equivalent to 10 and 30 being nil.
if vals['type_10'] is None and vals['type_20'] is None:
return False
# Avoid cases that delete nonexistent values.
def deletes_nonexistent():
present = {}
for k in range(2, 2 + my_rle_size):
present[k] = False
def adjust(ty, write):
if ty is None:
return
for k in keys_of_write(write):
if ty == 'upd':
present[k] = True
elif ty == 'del':
if present[k]:
present[k] = False
else:
raise KeyError
adjust(vals['type_10'], vals['write_10'])
adjust(vals['type_20'], vals['write_20'])
adjust(vals['type_30'], vals['write_30'])
try:
deletes_nonexistent()
except KeyError:
return False
return True
scenarios = filter_scenarios(make_scenarios(write_10_values, type_10_values,
write_20_values, type_20_values,
write_30_values, type_30_values,
evict_time_values,
rollback_time_values),
is_meaningful)
value_z = "zzzzz" * 10
def writes(self, uri, s, expected, ty, write, value, ts):
if ty is None:
# do nothing at all
return
cursor = s.open_cursor(uri)
s.begin_transaction()
for k in keys_of_write(write):
if ty == 'upd':
myval = value + str(k) if write == 'h' else value
cursor[k] = myval
expected[k] = myval
else:
cursor.set_key(k)
cursor.remove()
del expected[k]
s.commit_transaction('commit_timestamp=' + self.timestamp_str(ts))
cursor.close()
def evict(self, uri, s):
# Evict the page to force reconciliation.
evict_cursor = s.open_cursor(uri, None, "debug=(release_evict)")
s.begin_transaction()
# Search the key to evict it. Use both bookends.
v = evict_cursor[1]
self.assertEqual(v, self. value_z)
v = evict_cursor[2 + my_rle_size]
self.assertEqual(v, self. value_z)
self.assertEqual(evict_cursor.reset(), 0)
s.rollback_transaction()
evict_cursor.close()
def check(self, uri, s, ts, expected):
cursor = s.open_cursor(uri)
s.begin_transaction('read_timestamp=' + self.timestamp_str(ts))
# endpoints should still be in place
self.assertEqual(cursor[1], self.value_z)
self.assertEqual(cursor[2 + my_rle_size], self.value_z)
for k in range(2, 2 + my_rle_size):
if k in expected:
self.assertEqual(cursor[k], expected[k])
else:
cursor.set_key(k)
r = cursor.search()
self.assertEqual(r, wiredtiger.WT_NOTFOUND)
s.rollback_transaction()
cursor.close()
def test_rollback_to_stable25(self):
# Create a table without logging.
uri = "table:rollback_to_stable25"
self.session.create(uri, 'key_format=r,value_format=S')
# Pin oldest timestamp to 2.
self.conn.set_timestamp('oldest_timestamp=' + self.timestamp_str(2))
# Start stable timestamp at 2.
self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(2))
value_a = "aaaaa" * 10
value_b = "bbbbb" * 10
value_c = "ccccc" * 10
s = self.conn.open_session()
# Write the endpoints at time 5.
cursor = s.open_cursor(uri)
s.begin_transaction()
cursor[1] = self.value_z
cursor[2 + my_rle_size] = self.value_z
s.commit_transaction('commit_timestamp=' + self.timestamp_str(5))
self.evict(uri, s)
cursor.close()
# Do writes at time 10.
expected = {}
self.writes(uri, s, expected, self.type_10, self.write_10, value_a, 10)
expected10 = expected.copy()
# Evict at time 10 if requested.
if self.evict_time == 10:
self.evict(uri, s)
# Do more writes at time 20.
self.writes(uri, s, expected, self.type_20, self.write_20, value_b, 20)
expected20 = expected.copy()
# Evict at time 20 if requested.
if self.evict_time == 20:
self.evict(uri, s)
# Do still more writes at time 30.
self.writes(uri, s, expected, self.type_30, self.write_30, value_c, 30)
expected30 = expected.copy()
# Evict at time 30 if requested.
if self.evict_time == 30:
self.evict(uri, s)
# Now roll back.
self.conn.set_timestamp('stable_timestamp=' + self.timestamp_str(self.rollback_time))
self.conn.rollback_to_stable()
if self.rollback_time < 20:
expected20 = expected10
expected30 = expected10
elif self.rollback_time < 30:
expected30 = expected20
# Now make sure we see what we expect.
self.check(uri, s, 10, expected10)
self.check(uri, s, 20, expected20)
self.check(uri, s, 30, expected30)
| 1.296875 | 1 |
murano/tests/unit/api/v1/test_schemas.py | openstack/murano | 91 | 12769019 | <reponame>openstack/murano
# Copyright (c) 2016 AT&T Corp
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import mock
from oslo_messaging.rpc import client
from webob import exc
from murano.api.v1 import schemas
import murano.tests.unit.base as test_base
from murano.tests.unit import utils as test_utils
@mock.patch('murano.api.v1.schemas.policy')
@mock.patch('murano.api.v1.schemas.request_statistics.update_error_count')
@mock.patch('murano.api.v1.schemas.request_statistics.update_count')
class TestSchemas(test_base.MuranoTestCase):
@classmethod
def setUpClass(cls):
super(TestSchemas, cls).setUpClass()
cls.controller = schemas.Controller()
@mock.patch('murano.api.v1.schemas.rpc')
def test_get_schema(self, mock_rpc, *args):
dummy_context = test_utils.dummy_context()
dummy_context.GET = {
'classVersion': 'test_class_version',
'packageName': 'test_package_name'
}
mock_request = mock.MagicMock(context=dummy_context)
mock_rpc.engine().generate_schema.return_value = 'test_schema'
result = self.controller.get_schema(mock_request, 'test_class')
self.assertEqual('test_schema', result)
@mock.patch('murano.api.v1.schemas.rpc')
def test_get_schema_negative(self, mock_rpc, *args):
dummy_context = test_utils.dummy_context()
dummy_context.GET = {
'classVersion': 'test_class_version',
'packageName': 'test_package_name'
}
mock_request = mock.MagicMock(context=dummy_context)
# Test exception handling for pre-defined exception types.
exc_types = ('NoClassFound',
'NoPackageForClassFound',
'NoPackageFound')
for exc_type in exc_types:
dummy_error = client.RemoteError(exc_type=exc_type,
value='dummy_value')
mock_rpc.engine().generate_schema.side_effect = dummy_error
with self.assertRaisesRegex(exc.HTTPNotFound,
dummy_error.value):
self.controller.get_schema(mock_request, 'test_class')
# Test exception handling for miscellaneous exception type.
dummy_error = client.RemoteError(exc_type='TestExcType',
value='dummy_value')
mock_rpc.engine().generate_schema.side_effect = dummy_error
with self.assertRaisesRegex(client.RemoteError,
dummy_error.value):
self.controller.get_schema(mock_request, 'test_class')
| 1.867188 | 2 |
autocomplete_light/example_apps/unuseable_virtualfield/models.py | metzlar/django-autocomplete-light | 1 | 12769020 | <filename>autocomplete_light/example_apps/unuseable_virtualfield/models.py
from django.db import models
from vote.managers import VotableManager
class HasVotes(models.Model):
votes = VotableManager()
| 1.59375 | 2 |
MicroPython/esptool-master/espressif/efuse/esp32s2/__init__.py | hu-tianyi/AuTrix | 1 | 12769021 | <filename>MicroPython/esptool-master/espressif/efuse/esp32s2/__init__.py
from .fields import EspEfuses # noqa: F401
from . import operations # noqa: F401
| 0.976563 | 1 |
simscale_eba/cli/set_env_variables.py | SimScaleGmbH/external-building-aerodynamics | 0 | 12769022 | import os
import click
def SetVars(api_url, api_key):
'''
Takes API key and URL, sets them to environment variables
Parameters
----------
api_url : str
The SimScale API URL to call.
api_key : str
The SimScale API Key to use when calling, this is equivilent to
the users password for the API, it should never be printed.
'''
try:
os.environ["SIMSCALE_API_URL"] = str(api_url)
os.environ['SIMSCALE_API_KEY'] = str(api_key)
print("Your API key has ben set to the environment")
except:
raise Exception("Could not set environment variables")
@click.command("set-api-variables")
@click.argument(
'api-url',
type=str
)
@click.argument(
'api-key',
type=str
)
def set_variables(api_url, api_key):
SetVars(api_url, api_key)
| 2.625 | 3 |
repository/templatetags/file_type_tags.py | christianwgd/django_restic_gui | 5 | 12769023 | import os
from django import template
from django.utils.safestring import mark_safe
from repository.models import FileExt
register = template.Library()
generic = '<path fill-rule="evenodd" d="M4 0h8a2 2 0 0 1 2 2v12a2 2 0 0 1-2 2H4a2 2 0 0 1-2-2V2a2 2 0 0 1 2-2zm0 1a1 1 0 0 0-1 1v12a1 1 0 0 0 1 1h8a1 1 0 0 0 1-1V2a1 1 0 0 0-1-1H4z"/>'
@register.simple_tag()
def get_file_icon(name):
filename, file_extension = os.path.splitext(name)
# remove the dot from extension
file_extension = file_extension[1:]
if file_extension is None or file_extension == '':
return (mark_safe(generic))
else:
try:
ext = FileExt.objects.get(name=file_extension.lower())
except FileExt.DoesNotExist:
return (mark_safe(generic))
return mark_safe(ext.type.svg_path)
| 2.34375 | 2 |
data_helpers.py | nicholaswen/cnn-text-classification-tf | 0 | 12769024 | import numpy as np
import re
import pandas as pd
def clean_str(string):
"""
Tokenization/string cleaning for all datasets except for SST.
Original taken from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " , ", string)
string = re.sub(r"!", " ! ", string)
string = re.sub(r"\(", " \( ", string)
string = re.sub(r"\)", " \) ", string)
string = re.sub(r"\?", " \? ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip().lower()
def load_data_sarc(input_file, training, sample_percent=1.0):
reddit = pd.read_csv(input_file)
sample_index = int(len(reddit) * sample_percent)
labels = reddit['label'].values
labels = labels[:sample_index]
labels = [[0, 1] if l == 1 else [1, 0] for l in labels]
split_index = int(len(labels) * 0.7)
train_labels, test_labels = labels[:split_index], labels[split_index:]
sarcastic = 0
for label in test_labels:
if label == [0, 1]: sarcastic += 1
# Process data
text = reddit['comment'].values
text = [str(x) for x in text]
text = text[:sample_index]
train_text, test_text = text[:split_index], text[split_index:]
return [train_text, np.array(train_labels)] if training else [test_text, np.array(test_labels)]
def load_data_ghosh(input_file):
with open(input_file) as f:
twitter = f.readlines()
twitter = [x.strip() for x in twitter]
twitter = pd.DataFrame(twitter)
new = twitter[0].str.split("\t", n = 2, expand = True)
twitter_labels = new[1]
twitter_text = new[2]
twitter_text = [tweet for tweet in twitter_text]
twitter_labels = [[0, 1] if l is '1' else [1, 0] for l in twitter_labels]
sarcastic = 0
for label in twitter_labels:
if label == [0, 1]: sarcastic += 1
#print("Sarcastic Count: %d" % sarcastic)
#print("Not Sarcastic Count: %d" % (len(twitter_labels)-sarcastic))
twitter_labels = np.array(twitter_labels)
return [twitter_text, twitter_labels]
def load_data_and_labels(positive_data_file, negative_data_file):
"""
Loads MR polarity data from files, splits the data into words and generates labels.
Returns split sentences and labels.
"""
# Load data from files
positive_examples = list(open(positive_data_file, "r", encoding='utf-8').readlines())
positive_examples = [s.strip() for s in positive_examples]
negative_examples = list(open(negative_data_file, "r", encoding='utf-8').readlines())
negative_examples = [s.strip() for s in negative_examples]
# Split by words
x_text = positive_examples + negative_examples
x_text = [clean_str(sent) for sent in x_text]
# Generate labels
positive_labels = [[0, 1] for _ in positive_examples]
negative_labels = [[1, 0] for _ in negative_examples]
y = np.concatenate([positive_labels, negative_labels], 0)
return [x_text, y]
def batch_iter_one_epoch(data, batch_size, shuffle=True):
data = np.array(data)
data_size = len(data)
num_batches = int((len(data)-1)/batch_size) + 1
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
def batch_iter(data, batch_size, num_epochs, shuffle=True):
"""
Generates a batch iterator for a dataset.
"""
data = np.array(data)
data_size = len(data)
num_batches_per_epoch = int((len(data)-1)/batch_size) + 1
for epoch in range(num_epochs):
# Shuffle the data at each epoch
print("Epoch: %d" % epoch)
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_data = data[shuffle_indices]
else:
shuffled_data = data
for batch_num in range(num_batches_per_epoch):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
yield shuffled_data[start_index:end_index]
#load_data_sarc('data/train-balanced-sarcasm.csv', True)
#load_data_ghosh('data/ghosh/train.txt')
| 3.09375 | 3 |
2020/adventofcode.py | wmdrthr/advent-of-code | 0 | 12769025 | #! /usr/bin/env python3
# encoding: utf-8
import os, sys, re
import time
from pprint import pprint
from datetime import datetime, timedelta
import itertools
import collections
import functools
import math
import pytz
import requests
YEAR = 2020
SESSIONID_FILE = '~/.config/adventofcode/session'
USER_AGENT = 'wmdrthr/advent-of-code'
def get_session_id():
try:
sessionid = open(os.path.expanduser(SESSIONID_FILE)).read()
return sessionid.strip()
except (OSError, IOError) as err:
print('Could not load session-id - ', str(err))
print("""Puzzle inputs differ by user. Log in to the Advent of Code site,
then check your cookies to get the value of session-id. Save this
value in {}""".format(os.path.expanduser(SESSIONID_FILE)))
sys.exit(3)
def guess_day():
"""
Today's date, if it's during the Advent of Code. Happy Holidays!
Raises exception otherwise.
"""
now = datetime.now(tz=pytz.timezone('Asia/Kolkata'))
if now.year != YEAR or now.month != 12 or now.day > 25:
raise Exception('AoC {%d} not currently running, day must be provided.'.format(YEAR))
unlock = now.replace(hour = 10, minute = 30,
second = 0, microsecond = 0) # Midnight EST -> 10:30 AM IST
if now < unlock:
now = now - timedelta(days = 1)
return now.day
def get_data(day):
"Get input data for day (1-25) and year (> 2015)"
inputfile = 'inputs/input{:02}.txt'.format(day)
if os.path.exists(inputfile):
data = open(inputfile).read()
else:
# if trying to fetch the data for the current AoC, check if the
# day's puzzle has unlocked yet
now = datetime.now(tz=pytz.timezone('Asia/Kolkata'))
if now.year == YEAR and now.month == 12 and day < 25:
unlock = now.replace(hour = 10, minute = 30,
second = 0, microsecond = 0) # Midnight EST -> 10:30 AM IST
if now < unlock:
print("Today's puzzle hasn't unlocked yet!")
return None
if not os.path.exists('inputs'):
os.mkdir('inputs')
uri = 'http://adventofcode.com/{year}/day/{day}/input'.format(year=YEAR, day=day)
response = requests.get(uri,
cookies={'session': get_session_id()},
headers={'User-Agent': USER_AGENT})
if response.status_code != 200:
raise Exception('Unexpected response: (status = {})\n{}'.format(response.status_code,
response.content))
data = response.text
print('Fetched data for day {}'.format(day))
with open(inputfile, 'w') as output:
output.write(data)
return data
def format_elapsed_time(elapsed):
for unit in ['ns', 'us', 'ms', 's']:
if elapsed > 1000:
elapsed /= 1000
continue
return f'Elapsed: {elapsed:4.3f} {unit}'
def with_solutions(*expected):
def wrapper(f):
error_msg = 'Incorrect solution for Part {}: Expected "{}", Actual "{}"'
def wrapped_method(*args, **kwargs):
fgen = f(*args)
try:
for index in range(2):
actual = next(fgen)
if expected[index] is not None and not kwargs['skip_verification']:
if actual != expected[index]:
print(error_msg.format(index + 1, expected[index], actual))
sys.exit(23)
print(actual)
return
except StopIteration:
return
except TypeError as e:
if e.args[0] == "'NoneType' object is not an iterator":
return
else:
raise e
return wrapped_method
return wrapper
def main():
if len(sys.argv) > 1:
day = int(sys.argv[1])
else:
day = guess_day()
custom_data = False
if len(sys.argv) > 2:
if sys.argv[2] == '-':
data = sys.stdin.read()
else:
if os.path.exists(sys.argv[2]):
data = open(sys.argv[2]).read()
else:
data = sys.argv[2]
custom_data = True
else:
data = get_data(day)
if not data:
print('Cannot run solver without data. Bailing')
sys.exit(0)
data = data.strip()
solvers = {}
solvers = dict([(fn, f) for fn, f in globals().items()
if callable(f) and fn.startswith('solve')])
solver = solvers.get('solve{}'.format(day), None)
if solver is not None:
start = time.monotonic_ns()
solver(data, skip_verification=custom_data)
end = time.monotonic_ns()
elapsed = (end - start)
print(format_elapsed_time(elapsed))
else:
print('No solver for day {}'.format(day))
################################################################################
# Common Code
ORIGIN = (0, 0)
def manhattan(a, b = ORIGIN):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
DIRECTIONS = { '↑' : (( 0, -1), ('←', '→')),
'↓' : (( 0, 1), ('→', '←')),
'←' : ((-1, 0), ('↓', '↑')),
'→' : (( 1, 0), ('↑', '↓')),
'↖' : ((-1, -1), ('←', '↑')),
'↗' : (( 1, -1), ('↑', '→')),
'↘' : (( 1, 1), ('→', '↓')),
'↙' : ((-1, 1), ('↓', '←'))}
def move(point, direction, distance = 1):
(dx, dy) = DIRECTIONS[direction][0]
return (point[0] + (dx * distance), point[1] + (dy * distance))
def turn(heading, direction, angle):
for _ in range(angle // 90):
if direction == 'L':
heading = DIRECTIONS[heading][1][0]
elif direction == 'R':
heading = DIRECTIONS[heading][1][1]
return heading
def rotate(position, direction, angle):
for _ in range(angle // 90):
if direction == 'L':
position = (position[1], -1 * position[0])
elif direction == 'R':
position = (-1 * position[1], position[0])
return position
def neighbors(_, point, rows, cols):
for dir in DIRECTIONS:
dx, dy = DIRECTIONS[dir][0]
if 0 <= point[0] + dx < cols and 0 <= point[1] + dy < rows:
yield (dir, (point[0] + dx, point[1] + dy))
def raytraced_neighbors(grid, point, rows, cols):
for dir in DIRECTIONS:
dx, dy = DIRECTIONS[dir][0]
for n in itertools.count(1):
new_point = (point[0] + (dx * n), point[1] + (dy * n))
if 0 <= new_point[0] < cols and 0 <= new_point[1] < rows:
if grid[new_point] != 0:
yield (dir, new_point)
break
else:
break
def display(grid, rows, cols, tiles):
# Given a dict representing a point grid, print the grid, using
# the given tileset.
for y in range(rows):
row = []
for x in range(cols):
row.append(tiles[grid[(x, y)]])
print(''.join(row))
################################################################################
# Solvers
@with_solutions(703131, 272423970)
def solve1(data):
# Report Repair
entries = [int(l) for l in data.splitlines()]
for a, b in itertools.combinations(entries, 2):
if a + b == 2020:
yield a * b
break
for a, b, c in itertools.combinations(entries, 3):
if a + b + c == 2020:
yield a * b * c
break
@with_solutions(528, 497)
def solve2(data):
# Password Philosophy
valid1 = valid2 = 0
for line in data.splitlines():
rule, password = [r.strip() for r in line.split(':')]
counts, letter = rule.split(' ')
a, b = [int(d) for d in counts.split('-')]
if a <= password.count(letter) <= b:
valid1 += 1
if (password[a - 1] == letter) ^ (password[b - 1] == letter):
valid2 += 1
yield valid1
yield valid2
@with_solutions(162, 3064612320)
def solve3(data):
# Toboggan Trajectory
data = [l.strip() for l in data.split('\n') if len(l) > 1]
trees = {(x, y):1 for y,l in enumerate(data) for x,c in enumerate(l) if c == '#'}
treemap = collections.defaultdict(int, trees)
right = len(data[0])
bottom = len(data)
def traverse(dx, dy):
current = ORIGIN
count = 0
while True:
if treemap[(current[0] % right, current[1])]:
count += 1
if current[1] >= bottom:
break
current = (current[0] + dx, current[1] + dy)
return count
# Part 1
yield traverse(3, 1)
# Part 2
count = 1
for (dx, dy) in [(1, 1),
(3, 1),
(5, 1),
(7, 1),
(1, 2)]:
count *= traverse(dx, dy)
yield count
@with_solutions(202, 137)
def solve4(data):
# Passport Processing
passports = []
passport = {}
for line in data.split('\n'):
if line.strip() == '':
if len(passport) > 0:
passports.append(passport)
passport = {}
continue
for field in line.strip().split(' '):
key, val = field.split(':')
passport[key] = val
if len(passport) > 0:
passports.append(passport)
hcl_regex = re.compile(r'^#[0-9a-f]{6}$')
pid_regex = re.compile(r'^\d{9}$')
def valid_height(v):
if v[-2:] == 'cm':
return 150 <= int(v[:-2]) <= 193
elif v[-2:] == 'in':
return 59 <= int(v[:-2]) <= 76
else:
return False
validators = {
'byr': lambda v: 1920 <= int(v) <= 2002,
'iyr': lambda v: 2010 <= int(v) <= 2020,
'eyr': lambda v: 2020 <= int(v) <= 2030,
'hgt': valid_height,
'hcl': lambda v: hcl_regex.match(v) is not None,
'ecl': lambda v: v in {'amb', 'blu', 'brn', 'gry', 'grn', 'hzl', 'oth'},
'pid': lambda v: pid_regex.match(v) is not None
}
def valid_passport(passport, validators=None):
for key in ['byr', 'iyr', '<KEY>
'hcl', 'ecl', 'pid']:
if key not in passport:
return False
if validators is None:
continue
if not validators[key](passport[key]):
return False
return True
yield len(list(filter(valid_passport, passports)))
count = 0
for passport in passports:
if valid_passport(passport, validators):
count += 1
yield count
@with_solutions(933, 711)
def solve5(data):
# Binary Boarding
passes = data.split('\n')
table = str.maketrans('FBLR', '0101')
seats = [False for _ in range(1024)]
def seatid(bpass):
seatid = int(bpass.translate(table), 2)
seats[seatid] = True
return seatid
yield max([seatid(bpass) for bpass in passes])
for seat in range(1024):
if not seats[seat] and seats[seat - 1] and seats[seat + 1]:
yield seat
break
@with_solutions(6742, 3447)
def solve6(data):
# Custom Customs
anyone = everyone = 0
for group_data in data.split('\n\n'):
counter = collections.defaultdict(int)
responses = group_data.split('\n')
for response in responses:
for answer in response:
counter[answer] += 1
anyone += len([k for k,v in counter.items() if v > 0])
everyone += len([k for k,v in counter.items() if v == len(responses)])
yield anyone
yield everyone
@with_solutions(248, 57281)
def solve7(data):
# Handy Haversacks
rules = collections.defaultdict(dict)
for line in data.split('\n'):
parent, children = line.split(' contain ')
parent = parent[:-5]
if children == 'no other bags.':
rules[parent] = {}
else:
for child in children.split(','):
count, colora, colorb, _ = child.strip().split(' ', 4)
count = int(count)
rules[parent][f'{colora} {colorb}'] = count
def bagcheck(color):
if 'shiny gold' in rules[color]:
return True
for child in rules[color]:
if bagcheck(child):
return True
return False
def bagcount(color):
count = 0
for child, childcount in rules[color].items():
count += childcount + (childcount * bagcount(child))
return count
yield len([color for color in rules.keys() if bagcheck(color)])
yield bagcount('shiny gold')
@with_solutions(1915, 944)
def solve8(data):
# Handheld Halting
def execute(program):
acc = 0
counter = [0 for _ in range(len(program))]
idx = 0
while True:
if idx == len(program):
return (True, acc)
if counter[idx] > 0:
return (False, acc)
counter[idx] += 1
instr, val = program[idx].split(' ')
val = int(val)
if instr == 'acc':
acc += val
idx += 1
elif instr == 'jmp':
idx += val
elif instr == 'nop':
idx += 1
# Part 1
program = data.split('\n')
_, res = execute(program)
yield res
# Part 2
for idx, line in enumerate(program):
if line[:3] == 'acc':
continue
fixed_program = program[:]
instr, val = line.split(' ')
if instr == 'nop':
fixed_step = f'jmp {val}'
else:
fixed_step = f'nop {val}'
fixed_program[idx] = fixed_step
halting, res = execute(fixed_program)
if halting:
yield res
break
@with_solutions(258585477, 36981213)
def solve9(data):
# Encoding Error
numbers = [int(l) for l in data.split('\n')]
target = 0
window = (0, 25)
while True:
sums = set()
for x,y in itertools.combinations(numbers[window[0]:window[1]], 2):
sums.add(x+y)
if numbers[window[1]] not in sums:
target = numbers[window[1]]
yield target
break
window = (window[0]+1,window[1]+1)
window = (0,1)
partial = numbers[0]
while True:
if partial == target:
values = sorted(numbers[window[0]:window[1]])
yield values[0] + values[-1]
elif partial < target:
partial += numbers[window[1]]
window = (window[0], window[1]+1)
elif partial > target:
partial -= numbers[window[0]]
window = (window[0]+1, window[1])
@with_solutions(2590, 226775649501184)
def solve10(data):
# Adapter Array
ratings = set([int(r) for r in data.split('\n')])
max_rating = max(ratings)
builtin_rating = max_rating + 3
differences = []
current = 0
while current < max_rating:
for difference in (1,2,3):
if (current + difference) in ratings:
differences.append(difference)
break
current += difference
differences.append(builtin_rating - current)
yield differences.count(1) * differences.count(3)
@functools.lru_cache
def find_next_adapter(current):
if current == max_rating:
return 1
count = 0
for difference in (1, 2, 3):
if (current + difference) in ratings:
count += find_next_adapter(current + difference)
return count
yield find_next_adapter(0)
@with_solutions(2481, 2227)
def solve11(data):
# Seating System
data = data.split('\n')
rows, cols = len(data), len(data[0])
data = {(x, y):1 for y,l in enumerate(data)
for x,c in enumerate(l) if c == 'L'}
grid = collections.defaultdict(int, data)
def step(current_grid, neighbor_function, max_occupancy):
newgrid = collections.defaultdict(int, current_grid)
changes = 0
for y in range(rows):
for x in range(cols):
point = (x, y)
if current_grid[point] == 1:
for (_, p) in neighbor_function(current_grid, point, rows, cols):
if current_grid[p] == 2:
break
else:
newgrid[point] = 2
changes += 1
elif grid[point] == 2:
if sum([1 for (_, p) in neighbor_function(current_grid, point, rows, cols)
if current_grid[p] == 2]) >= max_occupancy:
newgrid[point] = 1
changes += 1
return newgrid, changes
# Part 1
while True:
grid, changes = step(grid, neighbors, 4)
if changes == 0:
yield len([p for p,v in grid.items() if v == 2])
break
# Part 2
grid = collections.defaultdict(int, data)
while True:
grid, changes = step(grid, raytraced_neighbors, 5)
if changes == 0:
yield len([p for p,v in grid.items() if v == 2])
break
@with_solutions(439, 12385)
def solve12(data):
# Rain Risk
compass_directions = {'N':'↑', 'S':'↓', 'E':'→', 'W':'←'}
# Part 1
heading, position = compass_directions['E'], ORIGIN
for line in data.split('\n'):
command = line[0]
value = int(line[1:])
if command == 'F':
position = move(position, heading, value)
elif command in 'NSEW':
position = move(position, compass_directions[command], value)
elif command in 'LR':
heading = turn(heading, command, value)
else:
raise Exception("invalid input")
print(manhattan(position))
# Part 2
heading, position = compass_directions['E'], ORIGIN
waypoint = (10, -1)
for line in data.split('\n'):
command = line[0]
value = int(line[1:])
if command == 'F':
position = move(position, compass_directions['E'], waypoint[0] * value)
position = move(position, compass_directions['N'], waypoint[1] * value)
elif command in 'NSEW':
waypoint = move(waypoint, compass_directions[command], value)
elif command in 'LR':
waypoint = rotate(waypoint, command, value)
else:
raise Exception("invalid input")
print(manhattan(position))
@with_solutions(3606, 379786358533423)
def solve13(data):
# Shuttle Search
data = data.split('\n')
start_ts = int(data[0])
buses = [int(x) for x in data[1].split(',') if x != 'x']
def shortest_delay():
for ts in itertools.count(start_ts):
for bus in buses:
if (ts + 1) % bus == 0:
delay = (ts + 1) - start_ts
return delay * bus
yield shortest_delay()
def chinese_remainder(pairs):
total = 0
product = 1
for _,busid in pairs:
product *= busid
for idx, busid in pairs:
d = product // busid
total += idx * d * pow(d, busid - 2, busid)
return total % product
buses = [(int(n) - i, int(n)) for (i, n) in enumerate(data[1].split(',')) if n != 'x']
yield chinese_remainder(buses)
@with_solutions(14925946402938, 3706820676200)
def solve14(data):
# Docking Data
memory = collections.defaultdict(int)
# Part 1
and_mask = or_mask = None
for line in data.split('\n'):
if line.startswith('mask'):
line = line.strip()
or_mask = int(line[7:].replace('X', '0'), 2)
and_mask = int(line[7:].replace('X', '1'), 2)
else:
address, value = line.split(' = ')
address = int(address[4:-1])
value = int(value)
value = (value | or_mask) & and_mask
memory[address] = value
yield sum(memory.values())
# Part 2
def floating_addresses(pos, mask):
if not mask:
yield 0
else:
if mask[-1] == '0':
for a in floating_addresses(pos // 2, mask[:-1]):
yield 2*a + pos % 2
elif mask[-1] == '1':
for a in floating_addresses(pos // 2, mask[:-1]):
yield 2*a + 1
elif mask[-1] == 'X':
for a in floating_addresses(pos // 2, mask[:-1]):
yield 2*a + 0
yield 2*a + 1
memory.clear()
mask = None
for line in data.split('\n'):
if line.startswith('mask'):
line = line.strip()
mask = line[7:].strip()
else:
address, value = line.split(' = ')
address = int(address[4:-1])
value = int(value)
for addr in floating_addresses(address, mask):
memory[addr] = value
print(sum(memory.values()))
@with_solutions(706, 19331)
def solve15(data):
# Rambunctious Recitation
numbers = [int(v) for v in data.split(',')]
def memory_game(limit):
history = {n:i for i,n in enumerate(numbers)}
last_number = numbers[-1]
for turn in range(len(numbers), limit):
try:
last_turn = history[last_number]
number = turn - last_turn - 1
except KeyError:
number = 0
history[last_number] = turn - 1
last_number = number
return number
yield memory_game(2020)
yield memory_game(30000000)
@with_solutions(23954, 453459307723)
def solve16(data):
# Ticket Translation
data = data.split('\n')
rules = collections.defaultdict(set)
idx = 0
while True:
line = data[idx].strip()
if line == '':
break
field, values = line.split(':')
for rule in values.split(' or '):
a,b = [int(v) for v in rule.strip().split('-')]
rules[field].update(x for x in range(a, b+1))
idx += 1
idx += 2
line = data[idx].strip()
my_ticket = [int(v) for v in line.split(',')]
tickets = []
idx += 3
while idx < len(data):
line = data[idx].strip()
tickets.append([int(v) for v in line.split(',')])
idx += 1
# Part 1
all_valid_values = {x for value in rules.values() for x in value}
invalid_values = [t for ticket in tickets for t in ticket if t not in all_valid_values]
yield sum(invalid_values)
# Part 2
valid_tickets = [ticket for ticket in tickets if set(ticket).issubset(all_valid_values)]
matched_rules = [None] * len(my_ticket)
while None in matched_rules:
for rule_key, rule_values in rules.items():
if rule_key in matched_rules:
continue
possible_rules = set()
for idx, key in enumerate(matched_rules):
if key is not None:
continue
for ticket in valid_tickets:
if ticket[idx] not in rule_values:
break
else:
possible_rules.add(idx)
if len(possible_rules) == 1:
matched_rules[possible_rules.pop()] = rule_key
break
values = [my_ticket[idx] for idx,name in enumerate(matched_rules) if name.startswith('departure')]
yield math.prod(values)
@with_solutions(391, 2264)
def solve17(data):
# Conway Cubes
def step(active_cubes, dimensions):
neighbors = collections.defaultdict(int)
for cube in active_cubes:
for offset in itertools.product(range(-1,2), repeat=dimensions):
if offset == (0,) * dimensions:
continue
neighbors[tuple(x + y for x, y in zip(cube, offset))] += 1
new_active_cubes = set()
for cube, count in neighbors.items():
if cube in active_cubes:
if count == 2 or count == 3:
new_active_cubes.add(cube)
elif count == 3:
new_active_cubes.add(cube)
return new_active_cubes
cubes = {(x, y, 0) for y,l in enumerate(data.split('\n'))
for x,c in enumerate(l) if c == '#'}
for _ in range(6):
cubes = step(cubes, 3)
yield len(cubes)
cubes = {(x, y, 0, 0) for y,l in enumerate(data.split('\n'))
for x,c in enumerate(l) if c == '#'}
for _ in range(6):
cubes = step(cubes, 4)
yield len(cubes)
@with_solutions(11297104473091, 185348874183674)
def solve18(data):
# Operation Order
def simple_evaluate_ltr(expr):
elements = expr.split()
result = int(elements[0])
idx = 1
while idx < len(elements):
if elements[idx] == '+':
idx += 1
result += int(elements[idx])
elif elements[idx] == '*':
idx += 1
result *= int(elements[idx])
idx += 1
return result
def simple_evaluate_am(expr):
elements = expr.split()
while '+' in elements:
idx = elements.index('+')
result = int(elements[idx-1]) + int(elements[idx+1])
elements[idx-1:idx+2] = [str(result)]
while '*' in elements:
idx = elements.index('*')
result = int(elements[idx-1]) * int(elements[idx+1])
elements[idx-1:idx+2] = [str(result)]
return int(elements[0])
def paren_evaluate(expr, evaluate):
if '(' not in expr:
return evaluate(expr)
start = expr.index('(')
count = 0
end = 0
for end in range(start+1, len(expr)):
if expr[end] == ')' and count == 0:
break
elif expr[end] == ')' and count > 0:
count -= 1
elif expr[end] == '(':
count += 1
return paren_evaluate(expr[:start] +
str(paren_evaluate(expr[start+1:end], evaluate)) +
expr[end+1:],
evaluate)
sum1 = sum2 = 0
for expr in data.split('\n'):
sum1 += paren_evaluate(expr, simple_evaluate_ltr)
sum2 += paren_evaluate(expr, simple_evaluate_am)
yield sum1
yield sum2
@with_solutions(203, 304)
def solve19(data):
# Monster Messages
rules = {}
terminals = {}
messages = []
cache = {}
for line in data.split('\n'):
if ':' in line:
key, rule = line.split(': ')
if rule in ('"a"', '"b"'):
terminals[key] = rule[1]
else:
options = rule.split(' | ')
rules[key] = [r.split(' ') for r in options]
elif line:
messages.append(line)
def match_subrule(message, start, end, subrules):
if start == end and not subrules:
return True
if start == end or not subrules:
return False
result = False
for idx in range(start+1, end+1):
if match(message, start, idx, subrules[0]) and\
match_subrule(message, idx, end, subrules[1:]):
result = True
return result
def match(message, start, end, rule):
key = (start, end, rule)
if key in cache:
return cache[key]
result = False
if rule in terminals:
result = (message[start:end] == terminals[rule])
else:
for option in rules[rule]:
if match_subrule(message, start, end, option):
result = True
cache[key] = result
return result
def check():
total = 0
for message in messages:
cache.clear()
if match(message, 0, len(message), '0'):
total += 1
return total
# Part 1
yield check()
# Part 2
rules['8'] = [['42'], ['42', '8']]
rules['11'] = [['42', '31'], ['42', '11', '31']]
yield check()
@with_solutions(15006909892229, 2190)
def solve20(data):
# Jurassic Jigsaw
ROWS, COLS = 10, 10
TILES = {}
for lines in data.split('\n\n'):
tileid = lines[5:lines.find(':')]
TILES[tileid] = [list(l) for l in lines.split('\n')[1:]]
EDGES = {}
for tileid, tilegrid in TILES.items():
left, right, top, bottom = [], [], [], []
for idx in range(ROWS):
left.append(tilegrid[idx][0])
right.append(tilegrid[idx][COLS-1])
top.append(tilegrid[0][idx])
bottom.append(tilegrid[ROWS-1][idx])
edges = [tuple(e) for e in [left, right, top, bottom]]
EDGES[tileid] = set([e for e in edges] + [tuple(reversed(e)) for e in edges])
# Part 1
adjacent = collections.defaultdict(set)
for ((t1,e1),(t2,e2)) in itertools.product(EDGES.items(), repeat=2):
if t1 == t2:
continue
if e1 & e2: # if edge sets of any 2 tiles intersect
adjacent[t1].add(t2)
product = 1
for tileid,_ in EDGES.items():
if len(adjacent[tileid]) == 2:
product *= int(tileid)
yield product
@with_solutions(2389,'fsr,skrxt,lqbcg,mgbv,dvjrrkv,ndnlm,xcljh,zbhp')
def solve21(data):
# Allergen Assessment
foods = []
all_ingredients, all_allergens = set(), set()
for line in data.split('\n'):
ingredients, allergens = line.split('(contains ')
ingredients = set(ingredients.split())
allergens = set(allergens[:-1].split(', '))
foods.append((ingredients, allergens))
all_ingredients |= set(ingredients)
all_allergens |= set(allergens)
candidates = {i:set(all_allergens) for i in all_ingredients}
counter = collections.defaultdict(int)
for ingredients, allergens in foods:
for ingredient in ingredients:
counter[ingredient] += 1
for allergen in allergens:
for ingredient in all_ingredients:
if ingredient not in ingredients:
candidates[ingredient].discard(allergen)
# Part 1
total = 0
for ingredient in all_ingredients:
if len(candidates[ingredient]) == 0:
total += counter[ingredient]
yield total
# Part 2
allergen_map = {}
used = set()
while len(allergen_map) < len(all_allergens):
for ingredient in all_ingredients:
possible = [a for a in candidates[ingredient] if a not in used]
if len(possible) == 1:
allergen_map[ingredient] = possible[0]
used.add(possible[0])
dangerous_ingredients = [k for k,v in sorted(allergen_map.items(), key=lambda kv:kv[1])]
yield ','.join(dangerous_ingredients)
@with_solutions(31455, 32528)
def solve22(data):
# Crab Combat
hands = data.split('\n\n')
decks = [collections.deque([int(v) for v in hands[0].split('\n')[1:]]),
collections.deque([int(v) for v in hands[1].split('\n')[1:]])]
def score(winner):
return sum([card * (len(winner) - idx) for idx, card in enumerate(winner)])
def combat(deck1, deck2):
winner = None
while True:
card1, card2 = deck1.popleft(), deck2.popleft()
if card1 > card2:
deck1.extend([card1, card2])
else:
deck2.extend([card2, card1])
if len(deck1) == 0:
return deck2
elif len(deck2) == 0:
return deck1
def copy_deck(deck, n):
return collections.deque(list(deck)[:n])
def recursive_combat(deck1, deck2):
history = set()
while len(deck1) > 0 and len(deck2) > 0:
round_hash = (tuple(deck1), tuple(deck2))
if round_hash in history:
return 1, deck1, deck2
else:
history.add(round_hash)
card1, card2 = deck1.popleft(), deck2.popleft()
winner = 0
if len(deck1) >= card1 and len(deck2) >= card2:
winner, _, _ = recursive_combat(copy_deck(deck1, card1), copy_deck(deck2, card2))
elif card1 > card2:
winner = 1
else:
winner = 2
if winner == 1:
deck1.extend([card1, card2])
else:
deck2.extend([card2, card1])
if len(deck1) == 0:
return 2, deck1, deck2
else:
return 1, deck1, deck2
yield score(combat(*decks))
decks = [collections.deque([int(v) for v in hands[0].split('\n')[1:]]),
collections.deque([int(v) for v in hands[1].split('\n')[1:]])]
winner, *finaldecks = recursive_combat(*decks)
yield score(finaldecks[winner - 1])
@with_solutions('89573246', 2029056128)
def solve23(data):
# Crab Cups
cups = [int(v) for v in data]
def solve(moves):
number_of_cups = len(cups) if moves == 100 else int(1e6)
links = [None for _ in range(number_of_cups + 1)]
for idx, cup in enumerate(cups):
links[cups[idx]] = cups[(idx + 1) % len(cups)]
if moves > 1e6:
links[cups[-1]] = len(cups) + 1
for idx in range(len(cups) + 1, number_of_cups + 1):
links[idx] = idx + 1
links[-1] = cups[0]
current = cups[0]
for _ in range(moves):
pickup = links[current]
links[current] = links[links[links[pickup]]]
dest = number_of_cups if current == 1 else current - 1
while dest in (pickup, links[pickup], links[links[pickup]]):
dest = number_of_cups if dest == 1 else dest - 1
links[links[links[pickup]]] = links[dest]
links[dest] = pickup
current = links[current]
return links
# Part 1
links = solve(100)
answer, x = [], 1
while (x := links[x]) != 1:
answer.append(x)
yield ''.join([str(x) for x in answer])
# Part 2
links = solve(int(1e7))
yield links[1] * links[links[1]]
@with_solutions(512, 4120)
def solve24(data):
# Lobby Layout
origin = (0, 0, 0)
directions = {'e' :(1, -1, 0), 'w' :(-1, 1, 0),
'sw':(-1, 0, 1), 'se':(0, -1, 1),
'nw':(0, 1, -1), 'ne':(1, 0, -1)}
def hexmove(pos, direction):
x, y, z = pos
dx, dy, dz = directions[direction]
return (x + dx, y + dy, z + dz)
# Part 1
def traverse(position, instructions):
if len(instructions) == 0:
return position
if instructions[0] in 'ew':
new_position = hexmove(position, instructions[0])
instructions = instructions[1:]
else:
new_position = hexmove(position, instructions[:2])
instructions = instructions[2:]
return traverse(new_position, instructions)
black_tiles = set()
for line in data.split('\n'):
final_position = traverse(origin, line[:])
if final_position in black_tiles:
black_tiles.remove(final_position)
else:
black_tiles.add(final_position)
yield len(black_tiles)
# Part 2
def step(tileset):
new_tileset = set()
check_tiles = set()
for tile in tileset:
check_tiles.add(tile)
check_tiles.update([hexmove(tile, d) for d in directions.keys()])
for tile in check_tiles:
count = 0
for neighbor in [hexmove(tile, d) for d in directions.keys()]:
if neighbor in tileset:
count += 1
if tile in tileset and count in (1, 2):
new_tileset.add(tile)
elif tile not in tileset and count == 2:
new_tileset.add(tile)
return new_tileset
for _ in range(100):
black_tiles = step(black_tiles)
yield len(black_tiles)
@with_solutions(17032383, None)
def solve25(data):
# Combo Breaker
card_public_key, door_public_key = [int(l) for l in data.split('\n')]
def transform(subject_number, loop_size = 0, public_key = None):
value = 1
for n in itertools.count():
value = (value * subject_number) % 20201227
if loop_size > 0 and n == loop_size:
return value
if public_key and value == public_key:
return n + 1
card_loop_size = transform(7, public_key=card_public_key)
yield pow(door_public_key, card_loop_size, 20201227)
################################################################################
if __name__ == '__main__':
main()
| 2.765625 | 3 |
pdfserver/views.py | cburgmer/pdfserver | 2 | 12769026 | <filename>pdfserver/views.py<gh_stars>1-10
# -*- coding: utf-8 -*-
import re
from operator import attrgetter
from flask import g, request, Response, session, render_template
from flask import abort, redirect, url_for, jsonify
from flaskext.babel import gettext
from werkzeug import wrap_file
from werkzeug.exceptions import InternalServerError, MethodNotAllowed, Gone, \
NotFound
from pyPdf import PdfFileWriter, PdfFileReader
from pdfserver import app, babel
Upload = __import__(app.config['MODELS'], fromlist='Upload').Upload
from pdfserver.util import templated
from pdfserver.tasks import handle_pdfs_task, NotRegistered
@babel.localeselector
def get_locale():
# Get language from the user accept header the browser transmits.
return request.accept_languages.best_match(
app.config['SUPPORTED_TRANSLATIONS'])
def _get_upload():
app.logger.debug("IDS %r" % request.form.get('id', None))
try:
id = long(request.form.get('id', None))
except ValueError:
app.logger.debug("Invalid id specified: %r"
% request.form.get('id', None))
raise abort(404)
file_ids = session.get('file_ids', [])
app.logger.debug(file_ids)
if id not in file_ids:
app.logger.debug("No upload with id %r for user" % id)
raise abort(404)
upload = Upload.get_for_id(id)
if upload:
return upload
else:
app.logger.debug("Upload with id %r doesn't exist" % id)
raise abort(404)
def _get_uploads():
file_ids = map(int, session.get('file_ids', []))
if not file_ids:
return []
return Upload.get_for_ids(file_ids)
def _order_files(files):
"""Return a mapping for each id given by POST data.
Appends missing ids to the end of the given order.
"""
files_order = []
file_id_map = dict((upload.id, upload) for upload in files)
# Get user selected order from form
order = request.form.getlist('file[]')
for id in order:
try:
id = int(id)
except ValueError:
continue
if id and id in file_id_map:
files_order.append(file_id_map[id])
del file_id_map[id]
# Append missing ids
files_order.extend(file_id_map.values())
return files_order
@templated('main.html')
def main():
files = _get_uploads()
session['has_cookies'] = 1
return {'uploads': files}
def main_table():
# Get user defined order
files = _order_files(_get_uploads())
return jsonify(content=render_template('uploads.html',
uploads=files))
def handle_form():
action = request.form.get('form_action', 'upload')
app.logger.debug(action)
if action == 'upload':
return upload_file()
elif action == 'confirm_deleteall':
return confirm_delete_all()
elif action == 'deleteall':
return delete_all()
elif action == 'combine':
return combine_pdfs()
elif action == 'cancel':
return main()
else:
raise MethodNotAllowed()
def upload_file():
if not session.get('has_cookies', 0) == 1:
app.logger.debug("No cookie found")
return Response('<html><body><span id="cookies">'
+ gettext('Please activate cookies '
'so your uploads can be linked to you.')
+ '</span></body></html>')
if 'file' in request.files and request.files['file']:
app.logger.info("Upload form is valid")
upload = Upload()
# save original name
upload.store_file(request.files['file'])
Upload.add(upload)
Upload.commit()
# link to session
file_ids = session.get('file_ids', [])
file_ids.append(upload.id)
session['file_ids'] = file_ids
app.logger.info("Saved upload: %s" % upload)
else:
app.logger.error("No file specified")
return redirect(url_for('main'))
#@templated('confirm_delete.html')
#def confirm_delete():
#files = _get_uploads()
#return {'uploads': files}
def delete():
upload = _get_upload()
session['file_ids'].remove(upload.id)
session.modified = True
Upload.delete(upload)
Upload.commit()
return main_table()
@templated('confirm_delete_all.html')
def confirm_delete_all():
files = _get_uploads()
return {'uploads': files}
def delete_all():
app.logger.debug("Deleting all files")
files = _get_uploads()
session['file_ids'] = []
for upload in files:
Upload.delete(upload)
Upload.commit()
if request.is_xhr:
return main_table()
else:
return redirect(url_for('main'))
def _respond_with_pdf(output):
# TODO get proper file name
response = Response(content_type='application/pdf')
response.headers.add('Content-Disposition',
'attachment; filename=combined.pdf')
response.data = output
app.logger.debug("Wrote %d bytes" % len(response.data))
return response
def combine_pdfs():
files = _get_uploads()
# If user clicked on button but no files were uploaded
if not files:
return redirect(url_for('main'))
# Get options
try:
# make sure value is multiple of 90
rotate = int(request.form.get('rotate', '0')) / 90 * 90
except ValueError:
rotate = 0
try:
# make sure value is multiple of 90
pages_sheet = int(request.form.get('pages_sheet', '1'))
if not pages_sheet in (1, 2, 4, 6, 9, 16):
raise ValueError
except ValueError:
pages_sheet = 1
text_overlay = request.form.get('text_overlay', None)
app.logger.debug(u"Parameters: %d pages on one, rotate %d°, text overlay %r"
% (pages_sheet, rotate, text_overlay))
# Get pdf objects and arrange in the user selected order, then get ranges
files = _order_files(files)
pages = [request.form.get('pages_%d' % upload.id, "")
for upload in files]
# Do the actual work
file_ids = map(attrgetter('id'), files)
resp = handle_pdfs_task.apply_async((file_ids,
pages,
pages_sheet,
rotate,
text_overlay))
# Save task in session and keep the open task list small
session['tasks'] = session.get('tasks', [])[-9:] + [resp.task_id]
if request.is_xhr:
return jsonify(ready=False, task_id=resp.task_id,
url=url_for('check_result', task_id=resp.task_id))
else:
return redirect(url_for('result_page', task_id=resp.task_id))
def result_page(task_id):
"""
Non-Javascript result page
"""
if task_id not in session.get('tasks', []):
app.logger.debug("Valid tasks %r" % session.get('tasks', []))
raise NotFound()
param = {'task_id': task_id,
'ready': handle_pdfs_task.AsyncResult(task_id).ready()}
return render_template('download.html', **param)
def check_result(task_id):
if task_id not in session.get('tasks', []):
app.logger.debug("Valid tasks %r" % session.get('tasks', []))
raise NotFound()
result = handle_pdfs_task.AsyncResult(task_id)
if result.ready():
url = url_for('download_result', task_id=task_id)
else:
url = ''
return jsonify(ready=result.ready(), url=url, task_id=task_id,
success=result.successful())
def download_result(task_id):
if task_id not in session.get('tasks', []):
app.logger.debug("Valid tasks %r" % session.get('tasks', []))
raise NotFound()
try:
result = handle_pdfs_task.AsyncResult(task_id)
if result.ready():
if hasattr(result, 'available') and not result.available():
raise Gone("Result expired. "
"You probably waited too long to download the file.")
if result.successful():
output = result.result
return _respond_with_pdf(output.decode('zlib'))
else:
app.logger.debug("Result not successful: %r" % result.result)
raise InternalServerError(unicode(result.result))
except NotRegistered:
app.logger.debug("Result not registered %r" % task_id)
raise NotFound()
return redirect(url_for('result_page', task_id=task_id))
def remove_download():
task_id = request.form.get('task_id', None)
if task_id not in session.get('tasks', []):
app.logger.debug("Valid tasks %r" % session.get('tasks', []))
raise NotFound()
result = handle_pdfs_task.AsyncResult(task_id)
session['tasks'].remove(task_id)
session.modified = True
try:
if hasattr(result, 'forget'):
result.forget()
except NotRegistered:
app.logger.debug("Result not registered %r" % task_id)
raise NotFound()
return jsonify(status="OK")
| 2.359375 | 2 |
kong-python-pdk/kong_pdk/pdk/kong/nginx/__init__.py | srAtKong/kong-custom-plugin-py-tcp | 18 | 12769027 | <gh_stars>10-100
# AUTO GENERATED BASED ON Kong 2.4.x, DO NOT EDIT
# Original source path: kong/pdk/nginx.lua
from typing import TypeVar, Any, Union, List, Mapping, Tuple, Optional
number = TypeVar('number', int, float)
table = TypeVar('table', List[Any], Mapping[str, Any])
# XXX
cdata = Any
err = str
from .shared import shared as cls_shared
class nginx():
shared = cls_shared
@staticmethod
def get_ctx(k: str) -> Any:
"""
:param k: key for the ctx data
:returns the per-request context data in ngx.ctx
"""
pass
@staticmethod
def get_subsystem() -> str:
"""
:returns the subsystem name
"""
pass
@staticmethod
def get_tls1_version_str() -> str:
"""
:returns the TLSv1 version string
"""
pass
@staticmethod
def get_var() -> str:
"""
:returns the NGINX version string
"""
pass
@staticmethod
def req_start_time() -> number:
"""
:returns req_start_time
"""
pass
@staticmethod
def set_ctx(k: str, any: str) -> None:
"""
:param k: key for the ctx data
:param any: value for the ctx data
"""
pass
pass | 2.25 | 2 |
src/Memory.py | sumanthreddy07/uPower_Assembler | 0 | 12769028 | import sys
class Memory:
def __init__(self):
self.memory = {}
#single byte
def get_address(self, address_str):
if address_str in self.memory:
return self.memory[addresss_str]
else:
print("memory not assigned, returning zero")
return '0'*8
#single byte
def set_address(self, address_str, value):
# print(f"setting memory address {address_str} to value {value}")
self.memory[address_str] = value
def get_byte(self, address_str):
return self.memory[address_str]
def get_halfword(self, address_str):
return "".join([self.memory[str(int(address_str)+i)] for i in range(2)])
def get_word(self, address_str):
# print(f"address to access: {address_str}")
return "".join([self.memory[str(int(address_str)+i)] for i in range(4)])
def get_doubleword(self, address_str):
# print(f"get double_word address_str: {address_str}")
return "".join([self.memory[str(int(address_str)+i)] for i in range(8)])
def store_byte(self, address_str, value):
if len(value) == 8:
self.memory[address_str]= value
else:
print(f"store_byte takes only 8 bit values, but got {len(value)}")
sys.exit(0)
def store_halfword(self, address_str, value):
if len(value) == 16:
for i in range(2):
self.memory[str(int(address_str) + i)] = value[i*8: 8 + i*8]
else:
print(f"store_halfword takes only 16 bit values, but got {len(value)}")
sys.exit(0)
def store_word(self, address_str, value):
if len(value) == 32:
for i in range(4):
self.memory[str(int(address_str) + i)] = value[i*8:8 + i*8]
else:
print(f"store_word takes only 32 bit values, but got {len(value)}")
sys.exit(0)
def store_doubleword(self, address_str, value):
if len(value) == 64:
for i in range(8):
self.memory[str(int(address_str) + i)] = value[i*8:8 + i*8]
else:
print(f"store_halfword takes only 64 bit values, but got {len(value)}")
sys.exit(0)
def set_n_bytes(self, address_str, value, n):
if len(value) // 8 == n:
for i in range(n):
self.store_byte(
str(int(address_str) + i*8),
value[i*8:8 + i*8]
)
else:
raise ValueError("length {len(value)} not a multiple of 8")
def get_string(self, address):
address = str(int(address, 2))
if address in self.memory:
string=""
while self.memory[address]!='0'*8:
char = chr(int(self.memory[address], 2))
string = string + char
address = str(int(address) + 1)
return string
else:
raise RuntimeError("get_string: address not in memory")
| 3.71875 | 4 |
fr/modules/actions/moveToFace.py | ktnyt/SeedWBA | 0 | 12769029 | # -*- coding: utf-8 -*-
import cv2
import math
from ..analyzers.facedetector import FaceDetector
class Noaction(object):
def activate(self):
# Called when action activated
return {}
def update(self):
act = {
"wheelleft": 0.0,
"wheelright": 0.0
}
# Called every frame while action is activated
if str(FaceDetector.biggestFaceRect) != "None":
# If face exists
x, _ = FaceDetector.biggestFaceRectPosNormalized(None)
size = FaceDetector.biggestFaceSizeNormalized(None)
if abs(x) > 0.3:
if x > 0:
# Rotate to left
act["wheelleft"] -= 0.1
act["wheelright"] += 0.1
else:
# Rotate to right
act["wheelleft"] += 0.1
act["wheelright"] -= 0.1
if size < 0.1:
# Move forward
act["wheelleft"] += 0.1
act["wheelright"] += 0.1
def deactivate(self):
# Called when action deactivated
return {} | 2.78125 | 3 |
pyproteome/pathways/enrichments.py | white-lab/pyproteome | 8 | 12769030 | <gh_stars>1-10
# -*- coding: utf-8 -*-
'''
This module does most of the heavy lifting for the pathways module.
It includes functions for calculating enrichment scores and generating plots
for GSEA / PSEA.
'''
from __future__ import division
from collections import defaultdict
from functools import partial
import logging
import multiprocessing
import numpy as np
import pandas as pd
from sklearn.utils import shuffle
from . import plot
LOGGER = logging.getLogger('pyproteome.enrichments')
MIN_PERIODS = 5
'''
Minimum number of samples with peptide quantification and phenotypic
measurements needed to generate a correlation metric score.
'''
CORRELATION_METRICS = [
'spearman',
'pearson',
'kendall',
'fold',
'log2',
'zscore',
]
'''
Correlation metrics used for enrichment analysis. 'spearman', 'pearson', and
'kendall' are all calculated using `pandas.Series.corr()`.
'fold' takes ranking values direction from the 'Fold Change' column.
'log2' takes ranking values from a log2 'Fold Change' column.
'zscore' takes ranking values from a log2 z-scored 'Fold Change' column.
'''
DEFAULT_P = 0.75
DEFAULT_ESS_METHOD = 'integral'
DEFAULT_Q = 0.25
DEFAULT_RANK_CPUS = 6
'''
Default number of CPUs to use when scrambling rows of a data set.
'''
DEFAULT_CORR_CPUS = 4
'''
Default number of CPUs to use when scrambling columns of a data set.
'''
ESS_METHODS = {
'max_abs': lambda x: max(x, key=abs),
'max_min': lambda x: (max(x) - min(x)) * np.sign(max(x, key=abs)),
'integral': lambda x: np.trapz(x),
}
class PrPDF(object):
'''
An exact probability distribution estimator.
'''
def __init__(self, data):
self.data = np.sort(data)
def pdf(self, x):
'''
Probability density function.
Parameters
----------
x : float
Returns
-------
float
'''
return 1 / self.data.shape[0]
def cdf(self, x):
'''
Cumulative density function.
Parameters
----------
x : float
Returns
-------
float
'''
return np.searchsorted(self.data, x, side='left') / self.data.shape[0]
def sf(self, x):
'''
Survival function.
Parameters
----------
x : float
Returns
-------
float
'''
return 1 - (
np.searchsorted(self.data, x, side='right') / self.data.shape[0]
)
def _calc_p(ess, ess_pi):
return [
abs(ess) <= abs(i)
for i in ess_pi
if (i < 0) == (ess < 0)
]
def _shuffle(ser):
ind = ser.index
ser = shuffle(ser)
ser.index = ind
return ser
def simulate_es_s_pi(
vals,
psms,
gene_sets,
phenotype=None,
metric='spearman',
p_iter=1000,
n_cpus=None,
**kwargs
):
'''
Simulate ES(S, pi) by scrambling the phenotype / correlation values for a
data set and recalculating gene set enrichment scores.
Parameters
----------
vals : :class:`pandas.DataFrame`
psms : :class:`pandas.DataFrame`
gene_sets : :class:`pandas.DataFrame`
phenotype : :class:`pandas.Series`, optional
metric : str, optional
p_iter : int, optional
n_cpus : int, optional
Returns
-------
df : :class:`pandas.DataFrame`
'''
assert metric in CORRELATION_METRICS
LOGGER.info(
'Calculating ES(S, pi) for {} gene sets'.format(len(gene_sets))
)
if n_cpus is None:
n_cpus = DEFAULT_RANK_CPUS
if metric in ['spearman', 'pearson', 'kendall']:
n_cpus = DEFAULT_CORR_CPUS
if n_cpus > 1:
pool = multiprocessing.Pool(
processes=n_cpus,
)
gen = pool.imap_unordered(
partial(
_calc_essdist,
psms=psms.copy(),
gene_sets=gene_sets,
metric=metric,
**kwargs
),
[phenotype for _ in range(p_iter)],
)
else:
gen = (
_calc_essdist(
phen=phenotype,
psms=psms,
gene_sets=gene_sets,
metric=metric,
**kwargs
)
for _ in range(p_iter)
)
ess_dist = defaultdict(list)
for ind, ess in enumerate(
gen,
start=1,
):
for key, val in ess.items():
ess_dist[key].append(val)
if ind % (p_iter // min([p_iter, 10])) == 0:
LOGGER.info(
'-- Calculated {}/{} pvals'.format(ind, p_iter)
)
LOGGER.info('Calculating ES(S, pi) using {} cpus'.format(n_cpus))
vals['ES(S, pi)'] = vals.index.map(
lambda row: ess_dist[row],
)
return vals
def _calc_q(nes, nes_pdf, nes_pi_pdf):
if nes > 0:
return (
nes_pi_pdf.pdf(nes) + nes_pi_pdf.sf(nes)
) / (
nes_pdf.pdf(nes) + nes_pdf.sf(nes)
)
else:
return (
nes_pi_pdf.pdf(nes) + nes_pi_pdf.cdf(nes)
) / (
nes_pdf.pdf(nes) + nes_pdf.cdf(nes)
)
def estimate_pq(vals):
'''
Estimate p- and q-values for an enrichment analysis using the ES(S, pi)
values generated by `simulate_es_s_pi`.
Parameters
----------
vals : :class:`pandas.DataFrame`
'''
assert 'ES(S)' in vals.columns
assert 'ES(S, pi)' in vals.columns
vals = vals.copy()
mask = vals['ES(S)'] > 0
pos_ess = vals['ES(S)'][mask]
neg_ess = vals['ES(S)'][~mask]
pos_pi = vals['ES(S, pi)'].apply(np.array).apply(lambda x: x[x > 0])
neg_pi = vals['ES(S, pi)'].apply(np.array).apply(lambda x: x[x < 0])
pos_mean = pos_pi.apply(np.mean).abs()
neg_mean = neg_pi.apply(np.mean).abs()
pos_nes = pos_ess / pos_mean
neg_nes = neg_ess / neg_mean
# assert (pos_nes.isnull() | neg_nes.isnull()).all()
# assert ((~pos_nes.isnull()) | (~neg_nes.isnull())).all()
pos_pi_nes = pos_pi / pos_mean
neg_pi_nes = neg_pi / neg_mean
vals['NES(S)'] = pos_nes.fillna(neg_nes)
vals['pos NES(S, pi)'] = pos_pi_nes
vals['neg NES(S, pi)'] = neg_pi_nes
pos_mat = (
np.concatenate(pos_pi_nes.values)
if pos_pi_nes.shape[0] > 0 else
np.array([])
)
neg_mat = (
np.concatenate(neg_pi_nes.values)
if neg_pi_nes.shape[0] > 0 else
np.array([])
)
plot.plot_nes_dist(
vals['NES(S)'].values,
np.concatenate([pos_mat, neg_mat]),
)
pos_pdf = PrPDF(pos_nes.dropna().values)
neg_pdf = PrPDF(neg_nes.dropna().values)
pos_pi_pdf = PrPDF(pos_mat)
neg_pi_pdf = PrPDF(neg_mat)
LOGGER.info('Generated NES(S) distributions')
if vals.shape[0] > 0:
vals['p-value'] = vals.apply(
lambda x:
_frac_true(
_calc_p(x['ES(S)'], x['ES(S, pi)'])
),
axis=1,
)
vals['q-value'] = vals.apply(
lambda x:
_calc_q(
x['NES(S)'],
pos_pdf if x['NES(S)'] > 0 else neg_pdf,
pos_pi_pdf if x['NES(S)'] > 0 else neg_pi_pdf,
),
axis=1,
)
LOGGER.info('Calculated p, q values')
return vals
def _frac_true(x):
return sum(x) / max([len(x), 1])
def get_gene_changes(psms):
'''
Extract the gene IDs and correlation values for each gene / phosphosite in
a data set. Merge together duplicate IDs by calculating their mean
correlation.
Parameters
----------
psms : :class:`pandas.DataFrame`
'''
LOGGER.info('Getting gene correlations ({} IDs)'.format(psms.shape[0]))
gene_changes = psms[['ID', 'Correlation']].copy()
if gene_changes.shape[0] > 0:
gene_changes = gene_changes.groupby(
'ID',
as_index=False,
).agg({
'Correlation': np.mean,
})
gene_changes = gene_changes.sort_values(by='Correlation', ascending=False)
gene_changes = gene_changes.set_index(keys='ID')
return gene_changes
def _calc_essdist(
phen=None,
psms=None,
gene_sets=None,
metric='spearman',
**kwargs
):
assert metric in CORRELATION_METRICS
if phen is not None:
phen = _shuffle(phen)
if metric in ['fold', 'zscore']:
psms = psms.copy()
psms['Fold Change'] = _shuffle(psms['Fold Change'])
vals = enrichment_scores(
psms,
gene_sets,
phenotype=phen,
metric=metric,
recorrelate=True,
pval=False,
**kwargs
)
return vals['ES(S)']
def correlate_phenotype(psms, phenotype=None, metric='spearman'):
'''
Calculate the correlation values for each gene / phosphosite in a data set.
Parameters
----------
psms : :class:`pandas.DataFrame`
phenotype : :class:`pandas.Series`, optional
metric : str, optional
The correlation function to use. See CORRELATION_METRICS for a full
list of choices.
Returns
-------
psms : :class:`pandas.DataFrame`
'''
assert metric in CORRELATION_METRICS
psms = psms.copy()
if metric in ['spearman', 'pearson', 'kendall']:
LOGGER.info(
'Calculating correlations using metric \'{}\' (samples: {})'
.format(metric, list(phenotype.index))
)
psms['Correlation'] = psms.apply(
lambda row:
phenotype.corr(
pd.to_numeric(row[phenotype.index]),
method=metric,
min_periods=MIN_PERIODS,
),
axis=1,
)
else:
LOGGER.info(
'Calculating ranks'
)
new = psms['Fold Change']
if (
metric in ['log2'] or
(metric in ['zscore'] and (new > 0).all())
):
new = new.apply(np.log2)
if metric in ['zscore']:
new = (new - new.mean()) / new.std()
psms['Correlation'] = new
return psms
def calculate_es_s(gene_changes, gene_set, p=None, n_h=None, ess_method=None):
'''
Calculate the enrichment score for an individual gene set.
Parameters
----------
gene_changes : :class:`pandas.DataFrame`
gene_set : set of str
p : float, optional
n_h : int, optional
ess_method : str, optional
One of {'integral', 'max_abs', 'max_min'}.
Returns
-------
dict
'''
if p is None:
p = DEFAULT_P
if ess_method is None:
ess_method = DEFAULT_ESS_METHOD
assert ess_method in ESS_METHODS
n = len(gene_changes)
gene_set = set(
gene
for gene in gene_set
if gene in gene_changes.index
)
hits = gene_changes.index.isin(gene_set)
hit_list = gene_changes[hits].index.tolist()
if n_h is None:
n_h = len(gene_set)
n_r = gene_changes[hits]['Correlation'].apply(
lambda x: abs(x) ** p
).sum()
scores = [0] + [
((abs(val) ** p) / n_r)
if hit else
(-1 / (n - n_h))
for hit, val in zip(hits, gene_changes['Correlation'])
]
cumsum = np.cumsum(scores)
ess = ESS_METHODS.get(ess_method)(cumsum)
return {
'hits': hits,
'cumscore': cumsum,
'ess': ess,
'hit_list': hit_list,
}
def calculate_es_s_ud(gene_changes, up_set, down_set, **kwargs):
'''
Calculate the enrichment score for an individual gene set.
Parameters
----------
gene_changes : :class:`pandas.DataFrame`
gene_set : set of str
kwargs : dict, optional
See extra arguments passed to calculate_es_s.
Returns
-------
dict
'''
up_set = set(
gene
for gene in up_set
if gene in gene_changes.index
)
down_set = set(
gene
for gene in down_set
if gene in gene_changes.index
)
vals = []
for gene_set in [up_set, down_set]:
vals.append(
calculate_es_s(
gene_changes,
gene_set,
n_h=len(up_set) + len(down_set),
**kwargs
)
)
up_hits = vals[0]['hits']
down_hits = vals[1]['hits']
hit_list = vals[0]['hit_list'] + vals[1]['hit_list']
upcumsum = vals[0]['cumscore']
downcumsum = vals[1]['cumscore']
ess = (
vals[0]['ess'] if vals[0]['hits'].any() else 0
) - (
vals[1]['ess'] if vals[1]['hits'].any() else 0
)
return {
'hits': up_hits,
'down_hits': down_hits,
'cumscore': upcumsum,
'down_cumscore': downcumsum,
'ess': ess,
'hit_list': hit_list,
}
def _get_set_cols(cols):
for set_cols in [
['up_set', 'down_set'],
['set'],
]:
if any([i in cols for i in set_cols]):
return set_cols
def enrichment_scores(
psms,
gene_sets,
pval=True,
recorrelate=False,
metric=None,
phenotype=None,
**kwargs
):
'''
Calculate enrichment scores for each gene set.
p-values and q-values are calculated by scrambling the phenotypes assigned
to each sample or scrambling peptides' fold changes, depending on the
correlation metric used.
Parameters
----------
psms : :class:`pandas.DataFrame`
gene_sets : :class:`pandas.DataFrame`
pval : bool, optional
recorrelate : bool, optional
metric : str, optional
phenotype : :class:`pandas.Series`, optional
kwargs : dict, optional
See extra arguments passed to `calculate_es_s` and `simulate_es_s_pi`.
Returns
-------
df : :class:`pandas.DataFrame`
'''
if metric is None:
if phenotype is None:
metric = 'fold'
else:
metric = 'spearman'
assert metric in CORRELATION_METRICS
if recorrelate:
psms = correlate_phenotype(psms, phenotype=phenotype, metric=metric)
gene_changes = get_gene_changes(psms)
set_cols = _get_set_cols(gene_sets.columns)
cols = [
'name',
'cumscore',
'down_cumscore',
'ES(S)',
'hits',
'down_hits',
'hit_list',
'n_hits',
] + set_cols
vals = pd.DataFrame(
columns=cols,
)
ess_args = {
i: kwargs[i]
for i in ['p', 'ess_method']
if i in kwargs
}
for set_id, row in gene_sets.iterrows():
if set(set_cols) == set(['set']):
es_vals = calculate_es_s(
gene_changes,
row['set'],
**ess_args
)
else:
es_vals = calculate_es_s_ud(
gene_changes,
row['up_set'],
row['down_set'],
**ess_args
)
vals = vals.append(
pd.Series(
[
row['name'],
es_vals.get('cumscore', []),
es_vals.get('down_cumscore', []),
es_vals.get('ess', 0),
es_vals.get('hits', []),
es_vals.get('down_hits', []),
es_vals.get('hit_list', []),
len(es_vals.get('hit_list', [])),
] + [row[i] for i in set_cols],
name=set_id,
index=cols,
)
)
ess_pi_args = {
i: kwargs[i]
for i in ['p', 'p_iter', 'n_cpus', 'ess_method']
if i in kwargs
}
if pval:
vals = simulate_es_s_pi(
vals, psms, gene_sets,
phenotype=phenotype,
metric=metric,
**ess_pi_args
)
vals = estimate_pq(vals)
vals = vals.sort_values('NES(S)', ascending=False)
else:
vals = vals.sort_values('ES(S)', ascending=False)
return vals
def filter_gene_sets(gene_sets, psms, min_hits=10):
'''
Filter gene sets to include only those with at least a given number of
hits in a data set.
Parameters
----------
gene_sets : :class:`pandas.DataFrame`
psms : :class:`pandas.DataFrame`
min_hits : int, optional
Returns
-------
df : :class:`pandas.DataFrame`
'''
LOGGER.info('Filtering gene sets')
total_sets = gene_sets
all_genes = set(psms['ID'])
set_cols = _get_set_cols(gene_sets.columns)
combined_set = gene_sets.apply(
lambda x:
set(
i
for col in set_cols
for i in x[col]
),
axis=1,
)
gene_sets = gene_sets[
combined_set.apply(
lambda x:
len(x) < len(all_genes) and
len(all_genes.intersection(x)) >= min_hits
)
]
LOGGER.info(
'Filtered {} gene sets down to {} with ≥ {} genes present'
.format(total_sets.shape[0], gene_sets.shape[0], min_hits)
)
return gene_sets
def filter_vals(
vals,
min_hits=0,
min_abs_score=0,
max_pval=1,
max_qval=1,
):
'''
Filter gene set enrichment scores using give ES(S) / p-value / q-value
cutoffs.
Parameters
----------
vals : :class:`pandas.DataFrame`
min_hits : int, optional
min_abs_score : float, optional
max_pval : float, optional
max_qval : float, optional
Returns
-------
df : :class:`pandas.DataFrame`
'''
n_len = len(vals)
if vals.shape[0] > 0:
vals = vals[
vals.apply(
lambda x:
x['n_hits'] >= min_hits and
abs(x['ES(S)']) >= min_abs_score and (
(x['p-value'] <= max_pval)
if 'p-value' in x.index else
True
) and (
(x['q-value'] <= max_qval)
if 'q-value' in x.index else
True
),
axis=1,
)
]
LOGGER.info(
'Filtered {} gene sets down to {} after cutoffs'
.format(n_len, len(vals))
)
return vals
| 2.25 | 2 |
crypto.py | manu9812/MongoDB | 0 | 12769031 | <filename>crypto.py
import pymongo
from flask import Flask, request, jsonify
from werkzeug.exceptions import BadRequest
from agent import main as agent
app = Flask(__name__)
@app.route("/")
def index():
"""
:return: Endpoints Cryptongo API
"""
# jsonify: Convierte diccionarios de python al formato json.
return jsonify(
{
'name': 'Cryprongo API',
'index': request.host_url,
'endpoint_1': request.host_url + 'top-rank-20',
'endpoint_2': request.host_url + 'tickers'
}
)
def get_db_connection(uri):
"""
Define la conexión a la BD.
MongoClient por defecto se conecta al localhost.
:param uri: URI de conexión.
:return: BD a utilizar.
"""
client = pymongo.MongoClient(uri)
return client.cryptongo
db_connection = get_db_connection('mongodb://crypto-mongodb-dev:27017/')
@app.route('/tickers', methods=['GET'])
def get_documents():
"""
Obtiene todos los documentos de la coleccion de la BD.
:return: Una lista de los documentos según el criterio de búsqueda.
"""
params = {}
# request: Recibe la petición de la url y los parámetros (si tiene).
name = request.args.get('name', '') # Si no hay valor, será un str vacío.
limit = int(request.args.get('limit', 0))
if name:
params.update({'name': name}) # Añade el valor al diccionario.
# Se define que no se muestre los campos _id y ticker_hash.
cursor = db_connection.tickers.find(params, {'_id': 0, 'ticker_hash': 0}).limit(limit)
return jsonify(list(cursor))
@app.route("/top-rank-20", methods=['GET'])
def get_rank_top20():
"""
Obtiene los documentos que tienen un ranking menor o igual a 20.
:return: Una lista de los documentos según el criterio de búsqueda.
"""
params = {}
name = request.args.get('name', '')
limit = int(request.args.get('limit', 0))
if name:
params.update({'name': name})
params.update({'rank': {'$lte': 20}})
cursor = db_connection.tickers.find(params, {'_id': 0, 'ticker_hash': 0}).limit(limit)
return jsonify(list(cursor))
@app.route('/tickers', methods=['DELETE'])
def remove_currency():
"""
Eliminar uno o varios documentos de la coleccion según el nombre de la criptomoneda.
:return: La cantidad de documentos eliminados.
"""
params = {}
name = request.args.get('name', '')
if name:
params.update({'name': name})
number = db_connection.tickers.delete_many(params).deleted_count
if number > 0:
message = 'Documentos eliminados'
return jsonify(message=message, number=number), 200 # Ok
else:
error = 'No se encontraron documentos'
return jsonify(error=error), 404 # Not Found
else:
error = 'No se envío el parámetro name'
return jsonify(error=error), 400 # Bad Request
@app.route('/tickers', methods=['POST'])
def add_currency():
try:
data_request = request.get_json()
if agent.save_ticker(db_connection, data_request):
message = 'Documento almacenado exitosamente'
return jsonify(message=message), 200
else:
error = 'El documento ya existe'
return jsonify(error=error), 400
except BadRequest:
error = 'No se envío información en el body'
return jsonify(error=error), 400
| 2.6875 | 3 |
examples/tensorflow/decoding/translate_example.py | hieuhoang/FasterTransformer | 0 | 12769032 | """
Modified From https://github.com/OpenNMT/OpenNMT-tf/blob/r1/examples/library/minimal_transformer_training.py
MIT License
Copyright (c) 2017-present The OpenNMT Authors.
This example demonstrates how to train a standard Transformer model using
OpenNMT-tf as a library in about 200 lines of code. While relatively short,
this example contains some advanced concepts such as dataset bucketing and
prefetching, token-based batching, gradients accumulation, beam search, etc.
Currently, the beam search part is not easily customizable. This is expected to
be improved for TensorFlow 2.0 which is eager first.
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Use opennmt-tf-1.25.1
import argparse
import copy
from datetime import datetime
import numpy as np
import os
import sys
import tensorflow as tf
import opennmt as onmt
from opennmt import constants
from opennmt.utils import misc
dir_path = os.path.dirname(os.path.realpath(__file__))
sys.path.append(dir_path + "/../../..")
from examples.tensorflow.decoding.utils.ft_decoding import ft_decoding
from examples.tensorflow.decoding.utils.bleu_score import bleu_score
from examples.tensorflow.decoder.utils.decoding import tf_sampling_decoding
from examples.tensorflow.decoder.utils.decoding import tf_beamsearch_decoding
from examples.tensorflow.decoder.utils.common import DecodingArgumentNew
from examples.tensorflow.decoder.utils.common import TransformerArgument
from examples.tensorflow.decoder.utils.common import DecodingSamplingArgument
from examples.tensorflow.decoder.utils.common import DecodingBeamsearchArgument
from examples.tensorflow.encoder.utils.encoder import ft_encoder_opennmt
from examples.tensorflow.encoder.utils.encoder import tf_encoder_opennmt
NUM_HEADS = 8
NUM_LAYERS = 6
HIDDEN_UNITS = 512
SIZE_PER_HEAD = 64
FFN_INNER_DIM = 2048
encoder = onmt.encoders.SelfAttentionEncoder(
num_layers=NUM_LAYERS,
num_units=HIDDEN_UNITS,
num_heads=NUM_HEADS,
ffn_inner_dim=FFN_INNER_DIM,
dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1)
decoder = onmt.decoders.SelfAttentionDecoder(
num_layers=NUM_LAYERS,
num_units=HIDDEN_UNITS,
num_heads=NUM_HEADS,
ffn_inner_dim=FFN_INNER_DIM,
dropout=0.1,
attention_dropout=0.1,
relu_dropout=0.1)
def translate(args_dict):
batch_size = args_dict['batch_size']
beam_size = args_dict['beam_width']
max_seq_len = args_dict['max_seq_len']
model_dir = args_dict["model_dir"]
source_file = args_dict["source"]
tgt_file = args_dict["target"]
time_args = args_dict["test_time"]
beam_search_diversity_rate = args_dict['beam_search_diversity_rate']
sampling_topk = args_dict['sampling_topk']
sampling_topp = args_dict['sampling_topp']
tf_datatype = tf.float32
max_ite = args_dict['max_iteration']
if args_dict['data_type'] == "fp16":
tf_datatype = tf.float16
print("\n=============== Argument ===============")
for key in args_dict:
print("{}: {}".format(key, args_dict[key]))
print("========================================")
# Define the "base" Transformer model.
source_inputter = onmt.inputters.WordEmbedder("source_vocabulary", embedding_size=512, dtype=tf_datatype)
target_inputter = onmt.inputters.WordEmbedder("target_vocabulary", embedding_size=512, dtype=tf_datatype)
inputter = onmt.inputters.ExampleInputter(source_inputter, target_inputter)
inputter.initialize({
"source_vocabulary": args_dict["source_vocabulary"],
"target_vocabulary": args_dict["target_vocabulary"]
})
mode = tf.estimator.ModeKeys.PREDICT
np.random.seed(1)
tf.set_random_seed(1)
# Create the inference dataset.
dataset = inputter.make_inference_dataset(source_file, batch_size)
iterator = dataset.make_initializable_iterator()
source = iterator.get_next()
encoder_args = TransformerArgument(beam_width=1,
head_num=NUM_HEADS,
size_per_head=SIZE_PER_HEAD,
inter_size=NUM_HEADS*SIZE_PER_HEAD*4,
num_layer=NUM_LAYERS,
dtype=tf_datatype,
remove_padding=True,
allow_gemm_test=False)
# Encode the source.
with tf.variable_scope("transformer/encoder"):
source_embedding = source_inputter.make_inputs(source)
source_embedding = tf.cast(source_embedding, tf_datatype)
# Using onmt fp16 for encoder.encode leads to significant accuracy drop
# So, we rewrite the encoder
# memory, _, _ = encoder.encode(source_embedding, source["length"], mode=mode)
memory = tf_encoder_opennmt(source_embedding, encoder_args, source["length"])
encoder_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
encoder_variables_dict = {}
for v in encoder_vars:
encoder_variables_dict[v.name] = tf.cast(v, tf_datatype)
ft_encoder_result = ft_encoder_opennmt(inputs=source_embedding,
encoder_args=encoder_args,
encoder_vars_dict=encoder_variables_dict,
sequence_length=source["length"])
# Generate the target.
with tf.variable_scope("transformer/decoder", reuse=tf.AUTO_REUSE):
target_inputter.build()
batch_size = tf.shape(memory)[0]
start_tokens = tf.fill([batch_size], constants.START_OF_SENTENCE_ID)
end_token = constants.END_OF_SENTENCE_ID
target_embedding = tf.cast(target_inputter.embedding, tf_datatype)
target_ids, _, target_length, _ = decoder.dynamic_decode_and_search(
target_embedding,
start_tokens,
end_token,
vocab_size=target_inputter.vocabulary_size,
beam_width=beam_size,
memory=memory,
memory_sequence_length=source["length"],
maximum_iterations=max_seq_len)
target_vocab_rev = target_inputter.vocabulary_lookup_reverse()
target_tokens = target_vocab_rev.lookup(tf.cast(target_ids, tf.int64))
decoder_args = TransformerArgument(beam_width=beam_size,
head_num=NUM_HEADS,
size_per_head=SIZE_PER_HEAD,
inter_size=NUM_HEADS*SIZE_PER_HEAD*4,
num_layer=NUM_LAYERS,
dtype=tf_datatype,
kernel_init_range=0.00,
bias_init_range=0.00)
decoder_args_2 = copy.deepcopy(decoder_args) # for beam search
decoder_args_2.__dict__ = copy.deepcopy(decoder_args.__dict__)
decoder_args_2.beam_width = 1 # for sampling
ft_decoder_beamsearch_args = DecodingBeamsearchArgument(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
decoder_args,
beam_search_diversity_rate)
ft_decoder_sampling_args = DecodingSamplingArgument(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
decoder_args_2,
sampling_topk,
sampling_topp)
decoding_beamsearch_args = DecodingArgumentNew(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
beam_search_diversity_rate,
0,
0.0,
decoder_args)
decoding_sampling_args = DecodingArgumentNew(target_inputter.vocabulary_size,
constants.START_OF_SENTENCE_ID,
constants.END_OF_SENTENCE_ID,
max_seq_len,
0.0,
sampling_topk,
sampling_topp,
decoder_args_2)
all_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES)
ft_target_ids, ft_target_length, _, _, _ = ft_decoding(ft_encoder_result,
source["length"],
target_embedding,
all_vars,
decoding_beamsearch_args)
ft_target_tokens = target_vocab_rev.lookup(tf.cast(ft_target_ids, tf.int64))
ft_sampling_target_ids, ft_sampling_target_length, _, _, _ = ft_decoding(ft_encoder_result,
source["length"],
target_embedding,
all_vars,
decoding_sampling_args)
ft_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(ft_sampling_target_ids, tf.int64))
# ### TF Sampling Decoding ###
tf_sampling_target_ids, tf_sampling_target_length = tf_sampling_decoding(memory,
source["length"],
target_embedding,
ft_decoder_sampling_args,
decoder_type=0)
# tf_sampling_target_tokens: [batch_size, seq_len]
tf_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(tf_sampling_target_ids, tf.int64))
# ### end of TF BeamSearch Decoding ###
### OP BeamSearch Decoder ###
ft_decoder_beamsearch_target_ids, ft_decoder_beamsearch_target_length, _, _, _ = tf_beamsearch_decoding(memory,
source["length"],
target_embedding,
ft_decoder_beamsearch_args,
decoder_type=1)
# ft_decoder_beamsearch_target_tokens: [batch_size, beam_width, seq_len]
ft_decoder_beamsearch_target_tokens = target_vocab_rev.lookup(tf.cast(ft_decoder_beamsearch_target_ids, tf.int64))
### end of OP BeamSearch Decoder ###
### OP Sampling Decoder ###
ft_decoder_sampling_target_ids, ft_decoder_sampling_target_length = tf_sampling_decoding(memory,
source["length"],
target_embedding,
ft_decoder_sampling_args,
decoder_type=1)
ft_decoder_sampling_target_tokens = target_vocab_rev.lookup(tf.cast(ft_decoder_sampling_target_ids, tf.int64))
### end of OP BeamSearch Decoder ###
class TranslationResult(object):
def __init__(self, token_op, length_op, name):
self.token_op = token_op
self.length_op = length_op
self.name = name
self.file_name = name + ".txt"
self.token_list = []
self.length_list = []
self.batch_num = 0
self.execution_time = 0.0 # seconds
self.sentence_num = 0
self.bleu_score = None
translation_result_list = []
if time_args != "":
translation_result_list.append(TranslationResult(
tf_sampling_target_tokens, tf_sampling_target_length, "tf-decoding-sampling-for-warmup"))
if time_args.find("0") != -1:
translation_result_list.append(TranslationResult(
target_tokens, target_length, "tf-decoding-beamsearch"))
if time_args.find("1") != -1:
translation_result_list.append(TranslationResult(
ft_decoder_beamsearch_target_tokens, ft_decoder_beamsearch_target_length, "ft-decoder-beamsearch"))
if time_args.find("2") != -1:
translation_result_list.append(TranslationResult(
ft_target_tokens, ft_target_length, "ft-decoding-beamsearch"))
if time_args.find("3") != -1:
translation_result_list.append(TranslationResult(
tf_sampling_target_tokens, tf_sampling_target_length, "tf-decoding-sampling"))
if time_args.find("4") != -1:
translation_result_list.append(TranslationResult(
ft_decoder_sampling_target_tokens, ft_decoder_sampling_target_length, "ft-decoder-sampling"))
if time_args.find("5") != -1:
translation_result_list.append(TranslationResult(
ft_sampling_target_tokens, ft_sampling_target_length, "ft-decoding-sampling"))
# Iterates on the dataset.
float_checkpoint_path = tf.train.latest_checkpoint(model_dir)
half_checkpoint_path = tf.train.latest_checkpoint(model_dir + "_fp16")
float_var_list = []
half_var_list = []
for var in tf.global_variables():
if var.dtype.base_dtype == tf.float32:
float_var_list.append(var)
elif var.dtype.base_dtype == tf.float16:
half_var_list.append(var)
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
for i in range(len(translation_result_list)):
with tf.Session(config=config) as sess:
if(len(float_var_list) > 0):
float_saver = tf.train.Saver(float_var_list)
float_saver.restore(sess, float_checkpoint_path)
if(len(half_var_list) > 0):
half_saver = tf.train.Saver(half_var_list)
half_saver.restore(sess, half_checkpoint_path)
sess.run(tf.tables_initializer())
sess.run(iterator.initializer)
t1 = datetime.now()
while True:
try:
batch_tokens, batch_length = sess.run([translation_result_list[i].token_op,
translation_result_list[i].length_op])
for tokens, length in zip(batch_tokens, batch_length):
# misc.print_bytes(b" ".join(tokens[0][:length[0] - 1]))
if translation_result_list[i].name.find("beamsearch") != -1:
translation_result_list[i].token_list.append(
b" ".join(tokens[0][:length[0] - 1]).decode("UTF-8"))
else:
translation_result_list[i].token_list.append(b" ".join(tokens[:length - 1]).decode("UTF-8"))
translation_result_list[i].batch_num += 1
if translation_result_list[i].name == "tf-decoding-sampling-for-warmup" and translation_result_list[i].batch_num > 20:
break
if translation_result_list[i].batch_num >= max_ite:
break
except tf.errors.OutOfRangeError:
break
t2 = datetime.now()
time_sum = (t2 - t1).total_seconds()
translation_result_list[i].execution_time = time_sum
with open(translation_result_list[i].file_name, "w") as file_b:
for s in translation_result_list[i].token_list:
file_b.write(s)
file_b.write("\n")
ref_file_path = "./.ref_file.txt"
os.system("head -n %d %s > %s" % (len(translation_result_list[i].token_list), tgt_file, ref_file_path))
translation_result_list[i].bleu_score = bleu_score(translation_result_list[i].file_name, ref_file_path)
os.system("rm {}".format(ref_file_path))
for t in translation_result_list:
if t.name == "tf-decoding-sampling-for-warmup":
continue
print("[INFO] {} translates {} batches taking {:.2f} sec to translate {} tokens, BLEU score: {:.2f}, {:.0f} tokens/sec.".format(
t.name, t.batch_num, t.execution_time, t.bleu_score.sys_len, t.bleu_score.score, t.bleu_score.sys_len / t.execution_time))
return translation_result_list
def main():
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-batch', '--batch_size', type=int, default=1, metavar='NUMBER',
help='batch size (default: 1)')
parser.add_argument('-beam', '--beam_width', type=int, default=4, metavar='NUMBER',
help='beam width (default: 4)')
parser.add_argument('-s', '--max_seq_len', type=int, default=200, metavar='NUMBER',
help='max sequence length (default: 200)')
parser.add_argument("--source", default="../examples/tensorflow/decoding/utils/translation/test.en",
help="Path to the source file.")
parser.add_argument("--target", default="../examples/tensorflow/decoding/utils/translation/test.de",
help="Path to the target file.")
parser.add_argument("--source_vocabulary", default="../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
help="Path to the source vocabulary.")
parser.add_argument("--target_vocabulary", default="../examples/tensorflow/decoding/utils/translation/wmtende.vocab",
help="Path to the target vocabulary.")
parser.add_argument("--model_dir", default="../translation/ckpt",
help="Directory where checkpoint are written.")
parser.add_argument('-time', '--test_time', type=str, default='', metavar='STRING',
help='''
Test the time of which one (default: '' (not test anyone) );
'': not test anyone
'0': test tf_decoding_beamsearch
'1': test op_decoder_beamsearch
'2': test op_decoding_beamsearch
'3': test tf_decoding_sampling
'4': test op_decoder_sampling
'5': test op_decoding_sampling
'e.g., if you want to test op_decoder_beamsearch and op_decoding_sampling,
then you need to use -time '15' ''')
parser.add_argument('-diversity_rate', '--beam_search_diversity_rate', type=float, default=0.0, metavar='NUMBER',
help='deviersity rate of beam search. default is 0. When diversity rate = 0, it is equivalent to the naive beams earch.')
parser.add_argument('-topk', '--sampling_topk', type=int, default=1, metavar='NUMBER',
help='Candidate (k) value of top k sampling in decoding. Default is 1.')
parser.add_argument('-topp', '--sampling_topp', type=float, default=0.0, metavar='NUMBER',
help='Probability (p) value of top p sampling in decoding. Default is 0.0. ')
parser.add_argument('-d', '--data_type', type=str, default="fp32", metavar='STRING',
help='data type (default: fp32)', choices=['fp32', 'fp16'])
parser.add_argument('-max_ite', '--max_iteration', type=int, default=100000, metavar='NUMBER',
help='Maximum iteraiton for translation, default is 100000 (as large as possible to run all test set).')
args = parser.parse_args()
translate(vars(args))
# example script
# python ../examples/tensorflow/decoding/translate_example.py --source ../examples/tensorflow/decoding/utils/translation/test.en --target ../examples/tensorflow/decoding/utils/translation/test.de --source_vocabulary ../examples/tensorflow/decoding/utils/translation/wmtende.vocab --target_vocabulary ../examples/tensorflow/decoding/utils/translation/wmtende.vocab --model_dir ../translation/ckpt/ -time 02
if __name__ == "__main__":
main()
| 2.453125 | 2 |
fides/constants.py | dweindl/fides | 0 | 12769033 | <reponame>dweindl/fides
"""
Constants
-----------
This module provides a central place to define native python enums and
constants that are used in multiple other modules
"""
import enum
import numpy as np
class Options(str, enum.Enum):
"""
Defines all the fields that can be specified in Options to
:py:class:`Optimizer`
"""
MAXITER = 'maxiter' #: maximum number of allowed iterations
MAXTIME = 'maxtime' #: maximum amount of walltime in seconds
FATOL = 'fatol' #: absolute tolerance for convergence based on fval
FRTOL = 'frtol' #: relative tolerance for convergence based on fval
XTOL = 'xtol' #: tolerance for convergence based on x
GATOL = 'gatol' #: absolute tolerance for convergence based on grad
GRTOL = 'grtol' #: relative tolerance for convergence based on grad
SUBSPACE_DIM = 'subspace_solver' #: trust region subproblem subspace
STEPBACK_STRAT = 'stepback_strategy' #: method to use for stepback
THETA_MAX = 'theta_max' #: maximal fraction of step that would hit bounds
DELTA_INIT = 'delta_init' #: initial trust region radius
MU = 'mu' # acceptance threshold for trust region ratio
ETA = 'eta' # trust region increase threshold for trust region ratio
GAMMA1 = 'gamma1' # factor by which trust region radius will be decreased
GAMMA2 = 'gamma2' # factor by which trust region radius will be increased
REFINE_STEPBACK = 'refine_stepback' # whether stepbacks are refined via
# optimization
SCALED_GRADIENT = 'scaled_gradient' # whether scaled gradient should be
# added to the set of possible stepback proposals
class SubSpaceDim(str, enum.Enum):
r"""
Defines the possible choices of subspace dimension in which the
subproblem will be solved.
"""
TWO = '2D' #: Two dimensional Newton/Gradient subspace
FULL = 'full' #: Full :math:`\mathbb{R}^n`
STEIHAUG = 'scg' #: CG subspace via Steihaug's method
class StepBackStrategy(str, enum.Enum):
"""
Defines the possible choices of search refinement if proposed step
reaches optimization boundary
"""
SINGLE_REFLECT = 'reflect_single' #: single reflection at boundary
REFLECT = 'reflect' #: recursive reflections at boundary
TRUNCATE = 'truncate' #: truncate step at boundary and re-solve
# restricted subproblem
MIXED = 'mixed' #: mix reflections and truncations
DEFAULT_OPTIONS = {
Options.MAXITER: 1e3,
Options.MAXTIME: np.inf,
Options.FATOL: 1e-8,
Options.FRTOL: 1e-8,
Options.XTOL: 0,
Options.GATOL: 1e-6,
Options.GRTOL: 0,
Options.SUBSPACE_DIM: SubSpaceDim.FULL,
Options.STEPBACK_STRAT: StepBackStrategy.REFLECT,
Options.THETA_MAX: 0.95,
Options.DELTA_INIT: 1.0,
Options.MU: 0.25, # [NodedalWright2006]
Options.ETA: 0.75, # [NodedalWright2006]
Options.GAMMA1: 1/4, # [NodedalWright2006]
Options.GAMMA2: 2, # [NodedalWright2006]
Options.REFINE_STEPBACK: False,
Options.SCALED_GRADIENT: False,
}
class ExitFlag(int, enum.Enum):
"""
Defines possible exitflag values for the optimizer to indicate why
optimization exited. Negative value indicate errors while positive
values indicate convergence.
"""
DID_NOT_RUN = 0 #: Optimizer did not run
MAXITER = -1 #: Reached maximum number of allowed iterations
MAXTIME = -2 #: Expected to reach maximum allowed time in next iteration
NOT_FINITE = -3 #: Encountered non-finite fval/grad/hess
EXCEEDED_BOUNDARY = -4 #: Exceeded specified boundaries
DELTA_TOO_SMALL = -5 #: Trust Region Radius too small to proceed
FTOL = 1 #: Converged according to fval difference
XTOL = 2 #: Converged according to x difference
GTOL = 3 #: Converged according to gradient norm
| 2.859375 | 3 |
VariationalPrinciple/VariationalPrinciple.py | szhang-cis/Kuru_Mac | 0 | 12769034 | import numpy as np
from Kuru import QuadratureRule, FunctionSpace , Mesh
from Kuru.FiniteElements.LocalAssembly._KinematicMeasures_ import _KinematicMeasures_
from Kuru.VariationalPrinciple._GeometricStiffness_ import GeometricStiffnessIntegrand as GetGeomStiffness
from .DisplacementApproachIndices import FillGeometricB
#from ._MassIntegrand_ import __MassIntegrand__, __ConstantMassIntegrand__
__all__ = ["VariationalPrinciple"]
class VariationalPrinciple(object):
energy_dissipation = []
internal_energy = []
kinetic_energy = []
external_energy = []
power_dissipation = []
internal_power = []
kinetic_power = []
external_power = []
def __init__(self, mesh, variables_order=(1,0),
analysis_type='static', analysis_nature='nonlinear', fields='mechanics',
quadrature_rules=None, median=None, quadrature_type=None,
function_spaces=None, compute_post_quadrature=True):
self.variables_order = variables_order
self.nvar = None
self.ndim = mesh.points.shape[1]
if isinstance(self.variables_order,int):
self.variables_order = tuple(self.variables_order)
self.quadrature_rules = quadrature_rules
self.quadrature_type = quadrature_type
self.function_spaces = function_spaces
self.median = median
self.analysis_type = analysis_type
self.analysis_nature = analysis_nature
self.fields = fields
self.compute_post_quadrature = compute_post_quadrature
# GET NUMBER OF VARIABLES
self.GetNumberOfVariables()
def GetQuadratureOrder(self, C, element_type, quadrature_degree=None):
"""Finds quadrature degree/strength for a given polynomial order C=p-1 [where p is polynomial degree]"""
if quadrature_degree is None:
if element_type == "tri" or element_type == "tet":
norder = 2*C if C > 0 else 1
norder_post = 2*(C+1)
else:
norder = C+2
# ACTUAL
# norder_post = 2*(C+2)
# ALTHOUGH THIS INTEGRATES EXACTLY
norder_post = C+2
else:
norder = quadrature_degree
if element_type == "tri" or element_type == "tet":
norder_post = 2*quadrature_degree
else:
norder_post = quadrature_degree
return norder, norder_post
def GetQuadraturesAndFunctionSpaces(self, mesh, variables_order=(1,),
quadrature_rules=None, quadrature_type=None, function_spaces=None, compute_post_quadrature=True,
equally_spaced_bases=False, quadrature_degree=None):
""""The default function for computing quadrature rules and function spaces for equall order single
and multi-physics/fields problems"""
C = mesh.InferPolynomialDegree() - 1
mesh.InferBoundaryElementType()
if quadrature_rules == None and self.quadrature_rules == None:
# OPTION FOR QUADRATURE TECHNIQUE FOR TRIS AND TETS
optimal_quadrature = 3
if mesh.element_type == "quad" or mesh.element_type == "hex":
if quadrature_type == "wv":
optimal_quadrature = 4
norder, norder_post = self.GetQuadratureOrder(C, mesh.element_type, quadrature_degree=quadrature_degree)
# GET QUADRATURE
quadrature = QuadratureRule(optimal=optimal_quadrature, norder=norder, mesh_type=mesh.element_type)
if self.compute_post_quadrature:
# COMPUTE INTERPOLATION FUNCTIONS AT ALL INTEGRATION POINTS FOR POST-PROCESSING
post_quadrature = QuadratureRule(optimal=optimal_quadrature, norder=norder_post, mesh_type=mesh.element_type)
else:
post_quadrature = None
# BOUNDARY QUADRATURE
bquadrature = QuadratureRule(optimal=optimal_quadrature, norder=C+2, mesh_type=mesh.boundary_element_type)
self.quadrature_rules = (quadrature,post_quadrature,bquadrature)
else:
self.quadrature_rules = quadrature_rules
if function_spaces == None and self.function_spaces == None:
# CREATE FUNCTIONAL SPACES
function_space = FunctionSpace(mesh, self.quadrature_rules[0], p=C+1, equally_spaced=equally_spaced_bases)
if self.compute_post_quadrature:
post_function_space = FunctionSpace(mesh, self.quadrature_rules[1], p=C+1, equally_spaced=equally_spaced_bases)
else:
post_function_space = None
# CREATE BOUNDARY FUNCTIONAL SPACES
bfunction_space = FunctionSpace(mesh.CreateDummyLowerDimensionalMesh(),
self.quadrature_rules[2], p=C+1, equally_spaced=equally_spaced_bases)
self.function_spaces = (function_space,post_function_space,bfunction_space)
else:
self.function_spaces = function_spaces
local_size = self.function_spaces[0].Bases.shape[0]*self.nvar
self.local_rows = np.repeat(np.arange(0,local_size),local_size,axis=0)
self.local_columns = np.tile(np.arange(0,local_size),local_size)
self.local_size = local_size
# FOR MASS
local_size_m = self.function_spaces[0].Bases.shape[0]*self.ndim
self.local_rows_mass = np.repeat(np.arange(0,local_size_m),local_size_m,axis=0)
self.local_columns_mass = np.tile(np.arange(0,local_size_m),local_size_m)
self.local_size_m = local_size_m
def GetNumberOfVariables(self):
"""Returns (self.nvar) i.e. number of variables/unknowns per node, for the formulation.
Note that self.nvar does not take into account the unknowns which get condensated
"""
# nvar = 0
# for i in self.variables_order:
# # DO NOT COUNT VARIABLES THAT GET CONDENSED OUT
# if i!=0:
# if mesh.element_type == "tri":
# nvar += (i+1)*(i+2) // 2
# elif mesh.element_type == "tet":
# nvar += (i+1)*(i+2)*(i+3) // 6
# elif mesh.element_type == "quad":
# nvar += (i+1)**2
# elif mesh.element_type == "hex":
# nvar += (i+1)**3
# nvar = sum(self.variables_order)
if self.nvar == None:
self.nvar = self.ndim
return self.nvar
def FindIndices(self,A):
return self.local_rows, self.local_columns, A.ravel()
def GeometricStiffnessIntegrand(self, SpatialGradient, CauchyStressTensor):
"""Applies to displacement based, displacement potential based and all mixed
formulations that involve static condensation"""
ndim = self.ndim
nvar = self.nvar
B = np.zeros((nvar*SpatialGradient.shape[0],ndim*ndim))
S = np.zeros((ndim*ndim,ndim*ndim))
SpatialGradient = SpatialGradient.T.copy('c')
FillGeometricB(B,SpatialGradient,S,CauchyStressTensor,ndim,nvar)
BDB = np.dot(np.dot(B,S),B.T)
return BDB
def __GeometricStiffnessIntegrand__(self, SpatialGradient, CauchyStressTensor, detJ):
"""Applies to displacement based formulation"""
return GetGeomStiffness(np.ascontiguousarray(SpatialGradient),CauchyStressTensor, detJ, self.nvar)
def VolumetricStiffnessIntegrand(self, material, SpatialGradient, detJ, dV):
"""Computes the volumetric stiffness using Hu-Washizu on Mean Dilatation method"""
if material.has_low_level_dispatcher:
from ._VolumetricStiffness_ import _VolumetricStiffnessIntegrand_
stiffness, MeanVolume = _VolumetricStiffnessIntegrand_(material,
np.ascontiguousarray(SpatialGradient), np.ascontiguousarray(detJ),
np.ascontiguousarray(dV), self.nvar)
else:
MaterialVolume = np.sum(dV)
if material.has_state_variables and material.has_growth_remodeling:
dve = np.true_divide(detJ,material.StateVariables[:,material.id_growth])
CurrentElasticVolume = np.sum(dve)
# AVERAGE SPATIAL GRADIENT IN PHYSICAL ELEMENT [\frac{1}{v}\int\nabla(N)dv(nodeperelem x ndim)]
AverageDeformationv = np.einsum('i,ijk,i->jk',material.StateVariables[:,material.id_density],SpatialGradient,dve)
AverageDeformationv = AverageDeformationv.flatten()
AverageDeformationu = np.einsum('ijk,i->jk',SpatialGradient,dve)
AverageDeformationu = AverageDeformationu.flatten()
stiffness = np.einsum('i,j->ij',AverageDeformationv,AverageDeformationu)
MeanVolume = (CurrentElasticVolume-MaterialVolume)/MaterialVolume
elif material.has_state_variables and not material.has_growth_remodeling:
CurrentElasticVolume = np.sum(detJ)
# AVERAGE SPATIAL GRADIENT IN PHYSICAL ELEMENT [\frac{1}{v}\int\nabla(N)dv(nodeperelem x ndim)]
AverageDeformationv = np.einsum('i,ijk,i->jk',material.StateVariables[:,material.id_density],SpatialGradient,detJ)
AverageDeformationv = AverageDeformationv.flatten()
AverageDeformationu = np.einsum('ijk,i->jk',SpatialGradient,detJ)
AverageDeformationu = AverageDeformationu.flatten()
stiffness = np.einsum('i,j->ij',AverageDeformationv,AverageDeformationu)
MeanVolume = (CurrentElasticVolume-MaterialVolume)/MaterialVolume
elif not material.has_state_variables and not material.has_growth_remodeling:
CurrentVolume = np.sum(detJ)
# AVERAGE SPATIAL GRADIENT IN PHYSICAL ELEMENT [\frac{1}{v}\int\nabla(N)dv(nodeperelem x ndim)]
AverageSpatialGradient = np.einsum('ijk,i->jk',SpatialGradient,detJ)
AverageSpatialGradient = AverageSpatialGradient.flatten()
stiffness = np.einsum('i,j->ij',AverageSpatialGradient,AverageSpatialGradient)
MeanVolume = (CurrentVolume-MaterialVolume)/MaterialVolume
stiffness = np.true_divide(stiffness,MaterialVolume)
material.pressure = material.kappa*MeanVolume
stiffness *= material.kappa
return stiffness
| 2.28125 | 2 |
tests/application_tests.py | dhdemerson/azure-lightning-flask | 0 | 12769035 | """Tests for the azure_lightning_flask application"""
import unittest
from mock import patch
from azure.common import AzureMissingResourceHttpError
from azure_lightning_flask.application import create_app
from tests.test_config import TestConfig
class TestApplication(unittest.TestCase):
"""Tests for the azure_lightning_flask application"""
class ContentRowResponse(object): # pylint: disable=too-few-public-methods
"""Simulated content response from Azure Table"""
content = '<html></html>'
class ActiveRowResponse(object): # pylint: disable=too-few-public-methods
"""Simulated active row response indicating active revision"""
content = '{0}:ActiveRevision'.format(TestConfig.APP_NAME)
def setUp(self):
self.active_row_reponse = self.ActiveRowResponse()
self.content_row_response = self.ContentRowResponse()
self.default_revision_responses = iter([
self.ActiveRowResponse(),
self.ContentRowResponse()
])
self.app = create_app(TestConfig)
self.client = self.app.test_client()
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_root_valid_revision(self, mock_get_entity):
"""Test the application returns the revision specified"""
mock_get_entity.return_value = self.content_row_response
revision = 'TestRevision'
url = '/?{0}={1}'.format(TestConfig.REVISION_PARAMETER, revision)
response = self.client.get(url)
row_key = '{0}:{1}'.format(TestConfig.APP_NAME, revision)
mock_get_entity.assert_called_once_with(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
row_key
)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_root_default_revision(self, mock_get_entity):
"""Test the application returns the active revision when no revision is specified"""
mock_get_entity.side_effect = self.default_revision_responses
url = '/'
response = self.client.get(url)
active_row_key = '{0}:current'.format(TestConfig.APP_NAME)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
active_row_key
)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
self.active_row_reponse.content
)
self.assertEqual(mock_get_entity.call_count, 2)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_root_invalid_revision(self, mock_get_entity):
"""Test the application returns a 404 response when a specified revision can't be found"""
mock_get_entity.side_effect = AzureMissingResourceHttpError("Not Found", 404)
url = '/?{0}=InvalidRevsion'.format(TestConfig.REVISION_PARAMETER)
response = self.client.get(url)
self.assertEqual(response.status_code, 404)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_root_empty_revision(self, mock_get_entity):
"""Test that an empty/blank but specified revision returns the active revision"""
mock_get_entity.side_effect = self.default_revision_responses
url = '/?{0}='.format(TestConfig.REVISION_PARAMETER)
response = self.client.get(url)
active_row_key = '{0}:current'.format(TestConfig.APP_NAME)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
active_row_key
)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
self.active_row_reponse.content
)
self.assertEqual(mock_get_entity.call_count, 2)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_nonroot_default_revision(self, mock_get_entity):
"""Test the application handles and responds correctly to arbitrary paths"""
mock_get_entity.side_effect = self.default_revision_responses
url = '/directory/much/deep/wow'
response = self.client.get(url)
active_row_key = '{0}:current'.format(TestConfig.APP_NAME)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
active_row_key
)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
self.active_row_reponse.content
)
self.assertEqual(mock_get_entity.call_count, 2)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_revision_with_additional_parameters(self, mock_get_entity): # pylint: disable=C0103
"""Test the application returns a requested revision even among other query parameters"""
mock_get_entity.return_value = self.content_row_response
revision = 'TestRevision'
url = '/?index_key=123&{0}={1}&revision=456'.format(
TestConfig.REVISION_PARAMETER,
revision
)
response = self.client.get(url)
row_key = '{0}:{1}'.format(TestConfig.APP_NAME, revision)
mock_get_entity.assert_called_once_with(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
row_key
)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
@patch('azure_lightning_flask.helpers.TableService.get_entity')
def test_default_with_parameters(self, mock_get_entity):
"""Test that the application ignores query parameters that are not requesting a revision"""
mock_get_entity.side_effect = self.default_revision_responses
url = '/?index_key=123&&revision=456'
response = self.client.get(url)
active_row_key = '{0}:current'.format(TestConfig.APP_NAME)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
active_row_key
)
mock_get_entity.assert_any_call(
TestConfig.AZURE_STORAGE_TABLE,
TestConfig.AZURE_STORAGE_TABLE_PARTITION_KEY,
self.active_row_reponse.content
)
self.assertEqual(mock_get_entity.call_count, 2)
self.assertEqual(response.data, self.content_row_response.content)
self.assertEqual(response.status_code, 200)
| 2.40625 | 2 |
year1/python/week3/q9_minus.py | OthmanEmpire/university | 1 | 12769036 | ### This program subtracts ###
def minus(a, b):
sub = a - b
print(sub)
print("Welcome to subtracting your life version 1.0.0.0.0.0.0.1!")
x,y = input("Please input the digits of your life that you wish to subtract: ").split()
x = int(x)
y = int(y)
minus(x,y)
| 3.9375 | 4 |
sendHeartbeat.py | lvoleing/rabbitMQ | 3 | 12769037 | from flask import Flask, request
from flask_apscheduler import APScheduler
import json
import socket
import datetime
import mqManagementClass
class Config(object):
# 任务列表
JOBS = [
{
'id': 'heratbeat',
'func': '__main__:sendHeartbeat', #执行函数
'args': None,
'trigger': 'interval',
'seconds': 10, #间隔时间(S)
}
]
app = Flask(__name__)
app.config.from_object(Config())
@app.route("/sendHeartbeat", methods=["POST"])
def sendHeartbeat():
userName = socket.gethostname()
time = datetime.datetime.now()
timestamp = time.strftime("%Y%m%d%H%M%S%f")
inputJson ={
"destination": "192.168.4.16",
"timestamp" : timestamp,
"user" : userName,
"cmd" : None
}
nodeIP = inputJson["destination"]
mqMaster = mqManagementClass.mqManagement()
ifSuccess = mqMaster.sentCmdToNode(nodeIP , inputJson)
print(ifSuccess)
outJson = {
"success": ifSuccess
}
return json.dumps(outJson)
if __name__ == '__main__':
scheduler=APScheduler()
scheduler.init_app(app)
scheduler.start()
app.run(debug=False)
| 2.375 | 2 |
tests/unit/test_jobs_storage.py | neuro-inc/platform-api | 0 | 12769038 | <filename>tests/unit/test_jobs_storage.py
from datetime import timedelta
from typing import Any
import pytest
from platform_api.orchestrator.job import (
JobRecord,
JobRequest,
JobStatus,
current_datetime_factory,
)
from platform_api.orchestrator.job_request import Container, ContainerResources
from platform_api.orchestrator.jobs_storage import (
InMemoryJobsStorage,
JobFilter,
JobStorageJobFoundError,
)
class TestInMemoryJobsStorage:
async def test_get_all_jobs_empty(self) -> None:
jobs_storage = InMemoryJobsStorage()
jobs = await jobs_storage.get_all_jobs()
assert not jobs
def _create_job_request(self, is_gpu_job: bool = False) -> JobRequest:
return JobRequest.create(
Container(
image="testimage",
resources=ContainerResources(
cpu=1, memory_mb=128, gpu=1, gpu_model_id="nvidia-tesla-k80"
)
if is_gpu_job
else ContainerResources(cpu=1, memory_mb=128),
)
)
def _create_job(
self, cluster_name: str = "test-cluster", **kwargs: Any
) -> JobRecord:
return JobRecord.create(
request=self._create_job_request(), cluster_name=cluster_name, **kwargs
)
def _create_finished_job(
self,
run_time: timedelta,
is_gpu_job: bool = False,
job_status: JobStatus = JobStatus.SUCCEEDED,
cluster_name: str = "test-cluster",
**kwargs: Any,
) -> JobRecord:
job = JobRecord.create(
request=self._create_job_request(is_gpu_job=is_gpu_job),
cluster_name=cluster_name,
**kwargs,
)
current_time = current_datetime_factory()
job.set_status(
JobStatus.RUNNING, current_datetime_factory=lambda: current_time - run_time
)
job.set_status(job_status, current_datetime_factory=lambda: current_time)
return job
async def test_set_get_job(self) -> None:
jobs_storage = InMemoryJobsStorage()
pending_job = self._create_job()
await jobs_storage.set_job(pending_job)
running_job = self._create_job(status=JobStatus.RUNNING, materialized=True)
await jobs_storage.set_job(running_job)
succeeded_job = self._create_job(status=JobStatus.SUCCEEDED, materialized=True)
await jobs_storage.set_job(succeeded_job)
job = await jobs_storage.get_job(pending_job.id)
assert job.id == pending_job.id
assert job.request == pending_job.request
jobs = await jobs_storage.get_all_jobs()
assert {job.id for job in jobs} == {
pending_job.id,
running_job.id,
succeeded_job.id,
}
job_filter = JobFilter(statuses={JobStatus.PENDING, JobStatus.RUNNING})
jobs = await jobs_storage.get_all_jobs(job_filter)
assert {job.id for job in jobs} == {running_job.id, pending_job.id}
jobs = await jobs_storage.get_running_jobs()
assert {job.id for job in jobs} == {running_job.id}
jobs = await jobs_storage.get_unfinished_jobs()
assert {job.id for job in jobs} == {pending_job.id, running_job.id}
jobs = await jobs_storage.get_jobs_for_deletion()
assert {job.id for job in jobs} == {succeeded_job.id}
async def test_try_create_job(self) -> None:
jobs_storage = InMemoryJobsStorage()
job = self._create_job(name="job-name")
async with jobs_storage.try_create_job(job):
pass
retrieved_job = await jobs_storage.get_job(job.id)
assert retrieved_job.id == job.id
with pytest.raises(JobStorageJobFoundError):
async with jobs_storage.try_create_job(job):
pass
class TestJobFilter:
def _create_job_request(self) -> JobRequest:
return JobRequest.create(
Container(
image="testimage", resources=ContainerResources(cpu=1, memory_mb=128)
)
)
def _create_job(
self, cluster_name: str = "test-cluster", **kwargs: Any
) -> JobRecord:
return JobRecord.create(
request=self._create_job_request(), cluster_name=cluster_name, **kwargs
)
def test_check_empty_filter(self) -> None:
job = self._create_job(owner="testuser")
assert JobFilter().check(job)
def test_check_statuses(self) -> None:
job = self._create_job(owner="testuser", status=JobStatus.PENDING)
assert not JobFilter(statuses={JobStatus.RUNNING}).check(job)
def test_check_tags_job_zero_filter_zero(self) -> None:
job = self._create_job(owner="testuser", status=JobStatus.PENDING)
filt = JobFilter()
assert filt.check(job)
def test_check_tags_job_all_fileter_all(self) -> None:
job = self._create_job(
owner="testuser", status=JobStatus.PENDING, tags=["t1", "t2", "t3"]
)
filt = JobFilter(tags={"t1", "t2", "t3"})
assert filt.check(job)
def test_check_tags_job_zero_filter_all(self) -> None:
job = self._create_job(owner="testuser", status=JobStatus.PENDING)
filt = JobFilter(tags={"t1", "t2", "t3"})
assert not filt.check(job)
def test_check_tags_job_all_filter_zero(self) -> None:
job = self._create_job(
owner="testuser", status=JobStatus.PENDING, tags=["t1", "t2", "t3"]
)
filt = JobFilter()
assert filt.check(job)
def test_check_tags_job_less_filter_more(self) -> None:
job = self._create_job(owner="testuser", status=JobStatus.PENDING, tags=["t1"])
filt = JobFilter(tags={"t1", "t2", "t3"})
assert not filt.check(job)
def test_check_tags_job_more_filter_less(self) -> None:
job = self._create_job(
owner="testuser", status=JobStatus.PENDING, tags=["t1", "t2", "t3"]
)
filt = JobFilter(tags={"t1"})
assert filt.check(job)
def test_check_tags_intersect(self) -> None:
job = self._create_job(
owner="testuser", status=JobStatus.PENDING, tags=["t1", "t2"]
)
filt = JobFilter(tags={"t2", "t3"})
assert not filt.check(job)
def test_check_tags_disjoint(self) -> None:
job = self._create_job(
owner="testuser", status=JobStatus.PENDING, tags=["t1", "t2"]
)
filt = JobFilter(tags={"t3", "t4"})
assert not filt.check(job)
def test_check_owners(self) -> None:
job = self._create_job(owner="testuser")
assert not JobFilter(owners={"anotheruser"}).check(job)
def test_check_name(self) -> None:
job = self._create_job(owner="testuser", name="testname")
assert not JobFilter(name="anothername").check(job)
def test_check_cluster_names(self) -> None:
job = JobRecord.create(
request=self._create_job_request(),
owner="testuser",
cluster_name="my-cluster",
)
assert not JobFilter(clusters={"test-cluster": {}}).check(job)
assert JobFilter(clusters={"my-cluster": {}}).check(job)
def test_check_ids(self) -> None:
job = self._create_job(owner="testuser", name="testname")
job2 = self._create_job(owner="testuser")
assert JobFilter(ids={job.id}).check(job)
assert JobFilter(ids={job2.id}).check(job2)
assert not JobFilter(ids={job.id}).check(job2)
assert not JobFilter(ids={job2.id}).check(job)
assert JobFilter(ids={job.id, job2.id}).check(job)
assert JobFilter(ids={job.id, job2.id}).check(job2)
def test_check_ids_status(self) -> None:
job = self._create_job(
owner="testuser", name="testname", status=JobStatus.PENDING
)
assert JobFilter(ids={job.id}, statuses={JobStatus.PENDING}).check(job)
assert not JobFilter(ids={job.id}, statuses={JobStatus.RUNNING}).check(job)
def test_check_all(self) -> None:
job = self._create_job(
status=JobStatus.PENDING, owner="testuser", name="testname"
)
assert JobFilter(
statuses={JobStatus.PENDING},
owners={"testuser"},
name="testname",
clusters={"test-cluster": {}},
).check(job)
def test_check_clusters_and_owners(self) -> None:
filter = JobFilter(
clusters={
"cluster1": {None: {"user2": set()}},
"cluster2": {None: {"user1": set()}},
},
owners={"user1", "user2"},
)
found = []
for cluster_name in ("cluster1", "cluster2", "cluster3"):
for owner in ("user1", "user2", "user3"):
job = self._create_job(cluster_name=cluster_name, owner=owner)
if filter.check(job):
found.append((cluster_name, owner))
assert found == [("cluster1", "user2"), ("cluster2", "user1")]
def test_check_clusters_and_owners2(self) -> None:
filter = JobFilter(
clusters={"cluster1": {}, "cluster2": {None: {"user2": set()}}},
owners={"user1", "user2"},
)
found = []
for cluster_name in ("cluster1", "cluster2", "cluster3"):
for owner in ("user1", "user2", "user3"):
job = self._create_job(cluster_name=cluster_name, owner=owner)
if filter.check(job):
found.append((cluster_name, owner))
assert found == [
("cluster1", "user1"),
("cluster1", "user2"),
("cluster2", "user2"),
]
def test_check_clusters_and_owners3(self) -> None:
filter = JobFilter(
clusters={"cluster1": {}, "cluster2": {None: {"user2": set()}}}
)
found = []
for cluster_name in ("cluster1", "cluster2", "cluster3"):
for owner in ("user1", "user2", "user3"):
job = self._create_job(cluster_name=cluster_name, owner=owner)
if filter.check(job):
found.append((cluster_name, owner))
assert found == [
("cluster1", "user1"),
("cluster1", "user2"),
("cluster1", "user3"),
("cluster2", "user2"),
]
def test_check_clusters_and_owners4(self) -> None:
filter = JobFilter(
clusters={
"cluster1": {None: {"user1": set()}},
"cluster2": {None: {"user1": set(), "user2": set()}},
"cluster3": {None: {"user1": set(), "user3": set()}},
},
owners={"user1", "user2", "user3"},
)
found = []
for cluster_name in ("cluster1", "cluster2", "cluster3", "cluster4"):
for owner in ("user1", "user2", "user3", "user4"):
job = self._create_job(cluster_name=cluster_name, owner=owner)
if filter.check(job):
found.append((cluster_name, owner))
assert found == [
("cluster1", "user1"),
("cluster2", "user1"),
("cluster2", "user2"),
("cluster3", "user1"),
("cluster3", "user3"),
]
def test_check_owners_and_names(self) -> None:
filter = JobFilter(
clusters={"test-cluster": {None: {"user1": {"name1"}, "user2": {"name2"}}}},
owners={"user1", "user2"},
)
found = []
for owner in ("user1", "user2", "user3"):
for name in ("name1", "name2", "name3", None):
job = self._create_job(owner=owner, name=name)
if filter.check(job):
found.append((owner, name))
assert found == [("user1", "name1"), ("user2", "name2")]
def test_check_owners_and_names2(self) -> None:
filter = JobFilter(
clusters={"test-cluster": {None: {"user1": set(), "user2": {"name2"}}}},
owners={"user1", "user2"},
)
found = []
for owner in ("user1", "user2", "user3"):
for name in ("name1", "name2", None):
job = self._create_job(owner=owner, name=name)
if filter.check(job):
found.append((owner, name))
assert found == [
("user1", "name1"),
("user1", "name2"),
("user1", None),
("user2", "name2"),
]
| 2.15625 | 2 |
src/itoolkit/transport/base.py | jkyeung/python-itoolkit | 1 | 12769039 | class XmlServiceTransport(object):
"""XMLSERVICE transport base class
Args:
ctl (str): XMLSERVICE control options, see
http://yips.idevcloud.com/wiki/index.php/XMLService/XMLSERVICEQuick#ctl
ipc (str): An XMLSERVICE ipc key for stateful conections, see
http://yips.idevcloud.com/wiki/index.php/XMLService/XMLSERVICEConnect
"""
def __init__(self, ctl="*here *cdata", ipc="*na"):
self.ipc = ipc
self.ctl = ctl
self.trace_attrs = ["ipc", "ctl"]
def trace_data(self):
output = ""
for i in self.trace_attrs:
if isinstance(i, tuple):
trace, attr = i
else:
trace = attr = i
output += " {}({})".format(trace, getattr(self, attr))
return output
def call(self, tk):
"""Call XMLSERVICE with accumulated actions
Args:
tk (iToolKit): An iToolkit object
Returns:
str: The XML returned from XMLSERVICE
"""
raise NotImplementedError
| 2.53125 | 3 |
main.py | musebc/pyplotgui | 0 | 12769040 | <reponame>musebc/pyplotgui
"""The main window for the PyPlot GUI"""
import sys
import random
from PyQt5.QtWidgets import QApplication
from matplotlib.figure import Figure
from views.edit import PlotEdit
from views.plot import Plot
def create_a_sample_plot():
figure = Figure(figsize=(5, 4), dpi=100)
data = [random.random() for i in range(25)]
subplot = figure.add_subplot(111)
current_line, = subplot.plot(data, 'r-')
title = "Unnamed Plot"
subplot.set_title(title)
return figure
if __name__ == '__main__':
APP = QApplication(sys.argv)
plot = create_a_sample_plot()
plot_window = Plot(plot)
edit_window = PlotEdit()
plot_window.show()
edit_window.show()
sys.exit(APP.exec_())
| 2.875 | 3 |
recursion/0206_reverse_linked_list.py | MartinMa28/Algorithms_review | 0 | 12769041 | <reponame>MartinMa28/Algorithms_review
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def reverseList(self, head: ListNode) -> ListNode:
if head == None or head.next == None:
return head
next_node = head.next
head.next = None
while next_node:
next_next_node = next_node.next
next_node.next = head
head = next_node
next_node = next_next_node
return head
def reverseList_recursive(self, head: ListNode) -> ListNode:
if head.next == None:
return head
remaining = head.next
head.next = None
remaining = self.reverseList_recursive(remaining)
cur = remaining
while cur.next:
cur = cur.next
cur.next = head
return remaining
if __name__ == "__main__":
h = ListNode(1)
h.next = ListNode(2)
h.next.next = ListNode(3)
h.next.next.next = ListNode(4)
solu = Solution()
solu.reverseList_recursive(h) | 3.953125 | 4 |
setup.py | add30417/Py-Weishaupt-WCM-COM | 4 | 12769042 | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="weishaupt-wcm-com",
version="0.0.10",
author="<NAME>",
author_email="<EMAIL>",
description="Interfacing the Weishaupt WCM-COM module",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/schmiegelt/Py-Weishaupt-WCM-COM",
install_requires=["requests"],
packages=setuptools.find_packages(),
python_requires='>=3.6',
)
| 1.390625 | 1 |
Harris_Corner_Detection_And_SIFT/code/student_code.py | dasdristanta13/Computer-vision | 0 | 12769043 | <reponame>dasdristanta13/Computer-vision
import numpy as np
import cv2 # You must not use cv2.cornerHarris()
# You must not add any other library
### If you need additional helper methods, add those.
### Write details description of those
"""
Returns the harris corners, image derivative in X direction, and
image derivative in Y direction.
Args
- image: numpy nd-array of dim (m, n, c)
- window_size: The shaps of the windows for harris corner is (window_size, wind)
- alpha: used in calculating corner response function R
- threshold: For accepting any point as a corner, the R value must be
greater then threshold * maximum R value.
- nms_size = non maximum suppression window size is (nms_size, nms_size)
around the corner
Returns
- corners: the list of detected corners
- Ix: image derivative in X direction
- Iy: image derivative in Y direction
"""
def harris_corners(image, window_size=5, alpha=0.04, threshold=1e-2,
nms_size=11):
### YOUR CODE HERE
image = cv2.GaussianBlur(image,(9,9),3)
Ix = cv2.Sobel(image,cv2.CV_64F,1,0,ksize=5)
Iy = cv2.Sobel(image,cv2.CV_64F,0,1,ksize=5)
Ixx = cv2.GaussianBlur(Ix*Ix,(window_size,window_size),5)
Iyy = cv2.GaussianBlur(Iy*Iy,(window_size,window_size),5)
Ixy = cv2.GaussianBlur(Ix*Iy,(window_size,window_size),5)
detM=Ixx*Iyy-(Ixy)**2
traceM=Ixx+Iyy
R=detM-alpha*(traceM)**2
R_threshold=np.max(R)*threshold
R[R<R_threshold]=0
for i in range(len(R)-nms_size):
for j in range(len(R[1,:])-nms_size):
temp=R[i:i+nms_size, j:j+nms_size]
temp[temp!=np.max(temp)]=0
corners = R
return corners, Ix, Iy
"""
Creates key points form harris corners and returns the list of keypoints.
You must use cv2.KeyPoint() method.
Args
- corners: list of Normalized corners.
- Ix: image derivative in X direction
- Iy: image derivative in Y direction
- threshold: only select corners whose R value is greater than threshold
Returns
- keypoints: list of cv2.KeyPoint
Notes:
You must use cv2.KeyPoint() method. You should also pass
angle of gradient at the corner. You can calculate this from Ix, and Iy
"""
def get_keypoints(corners, Ix, Iy, threshold):
### YOUR CODE HERE
keypoints=[]
for i in range(corners.shape[0]):
for j in range(corners.shape[1]):
if (corners[i,j]>threshold):
keypoints.append(cv2.KeyPoint(j,i,1,np.degrees(np.arctan(Iy[i,j]/Ix[i,j])),corners[i,j],0,-1))
# print(np.degrees(np.arctan(Ix[i,j]/Iy[i,j])))
return keypoints
def get_features(image, keypoints, feature_width, scales=None):
"""
To start with, you might want to simply use normalized patches as your
local feature. This is very simple to code and works OK. However, to get
full credit you will need to implement the more effective SIFT descriptor
(See Szeliski 4.1.2 or the original publications at
http://www.cs.ubc.ca/~lowe/keypoints/)
Your implementation does not need to exactly match the SIFT reference.
Here are the key properties your (baseline) descriptor should have:
(1) a 4x4 grid of cells, each feature_width/4. It is simply the
terminology used in the feature literature to describe the spatial
bins where gradient distributions will be described.
(2) each cell should have a histogram of the local distribution of
gradients in 8 orientations. Appending these histograms together will
give you 4x4 x 8 = 128 dimensions.
(3) Each feature should be normalized to unit length.
You do not need to perform the interpolation in which each gradient
measurement contributes to multiple orientation bins in multiple cells
As described in Szeliski, a single gradient measurement creates a
weighted contribution to the 4 nearest cells and the 2 nearest
orientation bins within each cell, for 8 total contributions. This type
of interpolation probably will help, though.
You do not have to explicitly compute the gradient orientation at each
pixel (although you are free to do so). You can instead filter with
oriented filters (e.g. a filter that responds to edges with a specific
orientation). All of your SIFT-like feature can be constructed entirely
from filtering fairly quickly in this way.
You do not need to do the normalize -> threshold -> normalize again
operation as detailed in Szeliski and the SIFT paper. It can help, though.
Another simple trick which can help is to raise each element of the final
feature vector to some power that is less than one.
Args:
- image: A numpy array of shape (m,n) or (m,n,c). can be grayscale or color, your choice
- x: A numpy array of shape (k,), the x-coordinates of interest points
- y: A numpy array of shape (k,), the y-coordinates of interest points
- feature_width: integer representing the local feature width in pixels.
You can assume that feature_width will be a multiple of 4 (i.e. every
cell of your local SIFT-like feature will have an integer width
and height). This is the initial window size we examine around
each keypoint.
- scales: Python list or tuple if you want to detect and describe features
at multiple scales
You may also detect and describe features at particular orientations.
Returns:
- fv: A numpy array of shape (k, feat_dim) representing a feature vector.
"feat_dim" is the feature_dimensionality (e.g. 128 for standard SIFT).
These are the computed features.
"""
assert image.ndim == 2, 'Image must be grayscale'
#############################################################################
# TODO: YOUR CODE HERE #
# If you choose to implement rotation invariance, enabling it should not #
# decrease your matching accuracy. #
#############################################################################
| 3.390625 | 3 |
BasicPythonScripts/Temperature Conveter/temperature_converter.py | tanvi355/Awesome_Python_Scripts | 3 | 12769044 | def Cel():
celsius = float(input("Enter the temperature in Celsius ")) # It will take user input
fahrenheit :float = (celsius * 9 / 5) + 32 # calculation part
print("Value in Fahrenheit ", fahrenheit)
def Far():
fahrenheit = float(input("Enter the temperature in Fahrenheit ")) # It will take user input
celsius :float = (fahrenheit - 32) * 5 / 9 # calculation part
print(" Value in Celsius", celsius)
def condition():
cont=int(input((" Enter 1 to continue or else to exit ")))
if cont ==1:
main()
condition()
def main():
print(" To convert Temperatures")
choice=int(input("Enter 1 to convert Celsius to Fahrenheit 0r Enter 2 for vice versa "))
if choice ==1 :
Cel()
elif choice ==2 :
Far()
else :
print(' Wrong choice ')
return
main()
condition()
| 4.09375 | 4 |
cp2/parser.py | thomas-franceschi/theory-project-pied-piper | 0 | 12769045 | #CP2
#<NAME>
#<NAME>
#import sys
class baseParser:
def __init__(self, expression, length, counter):
self.expression = expression #String being parsed
self.length = length #Max range for token
self.counter = counter #Token
self.M = '' #Output
def parseRegexp(self):
self.M = self.parseUnion()
if self.counter == self.length:
return self.M
else:
print('regex error, count: ' + str(self.counter) + ' length: ' + str(self.length))
return 1 #ERROR
def parseUnion(self):
self.M = self.parseConcat()
if self.counter == self.length:
return self.M
if self.expression[self.counter] == '|': #If next token is |
self.counter = self.counter + 1 #'read' |
self.M = self.union(self.M, self.parseConcat())
return self.M
def parseConcat(self):
if self.counter == self.length:
return self.epsilon()
if self.expression[self.counter] == '|' or self.expression[self.counter] == ')':
return self.epsilon()
else:
self.M = self.parseUnary()
while self.counter < self.length and self.expression[self.counter] != '|' and self.expression[self.counter] != ')':
self.M = self.concat(self.M, self.parseUnary())
return self.M
def parseUnary(self):
self.M = self.parsePrimary()
if self.counter == self.length:
return self.M
if self.expression[self.counter] == '*':
self.counter = self.counter + 1; #read *
return self.star(self.M)
else:
return self.M
def parsePrimary(self):
if self.expression[self.counter] == '(':
self.counter = self.counter + 1 #read (
self.M = self.parseUnion()
self.counter = self.counter + 1 #read )
return self.M
elif self.expression[self.counter] == '@':
self.counter = self.counter + 1 #read @
return self.emptyset()
elif self.expression[self.counter] != '(' and self.expression[self.counter] != ')' and self.expression[self.counter] != '*' and self.expression[self.counter] != '|':
a = self.expression[self.counter]
self.counter = self.counter + 1
return self.symbol(a)
else:
print('parse primary error')
return 1 #Error
#Placeholders
def emptyset(self):
return 'emptyset()'
def epsilon(self):
return 'epsilon()'
def symbol(self, a):
return 'symbol(' + str(a) + ')'
def union(self, M1, M2):
return 'union(' + str(M1) + ',' + str(M2) + ')'
def concat(self, M1,M2):
return 'concat(' + str(M1) + ',' + str(M2) + ')'
def star(self, M):
return 'star(' + M + ')'
if __name__ == "__main__":
#expression = '(ab|a)*'
length = len(expression)
counter = 0
parser = baseParser(expression, length, counter)
M = parser.parseRegexp()
print(M) | 3.171875 | 3 |
python/rapids_triton/triton/client.py | divyegala/rapids-triton | 1 | 12769046 | <reponame>divyegala/rapids-triton
# Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tritonclient.http as triton_http
import tritonclient.grpc as triton_grpc
STANDARD_PORTS = {
'http': 8000,
'grpc': 8001
}
def get_triton_client(
protocol="grpc",
host='localhost',
port=None,
concurrency=4):
"""Get Triton client instance of desired type """
if port is None:
port = STANDARD_PORTS[protocol]
if protocol == 'grpc':
client = triton_grpc.InferenceServerClient(
url=f'{host}:{port}',
verbose=False
)
elif protocol == 'http':
client = triton_http.InferenceServerClient(
url=f'{host}:{port}',
verbose=False,
concurrency=concurrency
)
else:
raise RuntimeError('Bad protocol: "{}"'.format(protocol))
return client
| 2.171875 | 2 |
home/migrations/0011_auto_20200217_1647.py | IATI/new-website | 4 | 12769047 | # Generated by Django 2.2.9 on 2020-02-17 16:47
from django.db import migrations, models
import django.db.models.deletion
import modelcluster.fields
class Migration(migrations.Migration):
dependencies = [
('wagtailimages', '0001_squashed_0021'),
('wagtailcore', '0041_group_collection_permissions_verbose_name_plural'),
('home', '0010_homepage_testimonial'),
]
operations = [
migrations.AddField(
model_name='homepage',
name='about_iati_description',
field=models.TextField(default='', help_text='Description for the about IATI section'),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='about_iati_description_en',
field=models.TextField(help_text='Description for the about IATI section', null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_description_es',
field=models.TextField(help_text='Description for the about IATI section', null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_description_fr',
field=models.TextField(help_text='Description for the about IATI section', null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_description_pt',
field=models.TextField(help_text='Description for the about IATI section', null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_link_label',
field=models.CharField(default='', help_text='Link label for the about IATI section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='about_iati_link_label_en',
field=models.CharField(help_text='Link label for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_link_label_es',
field=models.CharField(help_text='Link label for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_link_label_fr',
field=models.CharField(help_text='Link label for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_link_label_pt',
field=models.CharField(help_text='Link label for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_page',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailcore.Page'),
),
migrations.AddField(
model_name='homepage',
name='about_iati_title',
field=models.CharField(default='', help_text='Title for the about IATI section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='about_iati_title_en',
field=models.CharField(help_text='Title for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_title_es',
field=models.CharField(help_text='Title for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_title_fr',
field=models.CharField(help_text='Title for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_title_pt',
field=models.CharField(help_text='Title for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_video',
field=models.URLField(default='', help_text='Video embed URL for the about IATI section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='about_iati_video_en',
field=models.URLField(help_text='Video embed URL for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_video_es',
field=models.URLField(help_text='Video embed URL for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_video_fr',
field=models.URLField(help_text='Video embed URL for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='about_iati_video_pt',
field=models.URLField(help_text='Video embed URL for the about IATI section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='activities_description',
field=models.CharField(default='', help_text='Description for the activities statistics section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='activities_description_en',
field=models.CharField(help_text='Description for the activities statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='activities_description_es',
field=models.CharField(help_text='Description for the activities statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='activities_description_fr',
field=models.CharField(help_text='Description for the activities statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='activities_description_pt',
field=models.CharField(help_text='Description for the activities statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='getting_started_title',
field=models.CharField(default='', help_text='Title for the getting started section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='getting_started_title_en',
field=models.CharField(help_text='Title for the getting started section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='getting_started_title_es',
field=models.CharField(help_text='Title for the getting started section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='getting_started_title_fr',
field=models.CharField(help_text='Title for the getting started section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='getting_started_title_pt',
field=models.CharField(help_text='Title for the getting started section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='header_video_en',
field=models.URLField(blank=True, help_text='Optional: video embed URL for page header', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='header_video_es',
field=models.URLField(blank=True, help_text='Optional: video embed URL for page header', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='header_video_fr',
field=models.URLField(blank=True, help_text='Optional: video embed URL for page header', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='header_video_pt',
field=models.URLField(blank=True, help_text='Optional: video embed URL for page header', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_description',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section'),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_description_en',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_description_es',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_description_fr',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_description_pt',
field=models.TextField(blank=True, help_text='Optional: description for the IATI in action section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_title',
field=models.CharField(default='', help_text='Title for the IATI in action section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_title_en',
field=models.CharField(help_text='Title for the IATI in action section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_title_es',
field=models.CharField(help_text='Title for the IATI in action section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_title_fr',
field=models.CharField(help_text='Title for the IATI in action section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_in_action_title_pt',
field=models.CharField(help_text='Title for the IATI in action section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title',
field=models.CharField(default='', help_text='Title for the IATI tools section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_description',
field=models.TextField(blank=True, help_text='Optional: description for the IATI tools section'),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_description_en',
field=models.TextField(blank=True, help_text='Optional: description for the IATI tools section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_description_es',
field=models.TextField(blank=True, help_text='Optional: description for the IATI tools section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_description_fr',
field=models.TextField(blank=True, help_text='Optional: description for the IATI tools section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_description_pt',
field=models.TextField(blank=True, help_text='Optional: description for the IATI tools section', null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_en',
field=models.CharField(help_text='Title for the IATI tools section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_es',
field=models.CharField(help_text='Title for the IATI tools section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_fr',
field=models.CharField(help_text='Title for the IATI tools section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='iati_tools_title_pt',
field=models.CharField(help_text='Title for the IATI tools section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_link_label',
field=models.CharField(default='', help_text='Label for the view all news button', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='latest_news_link_label_en',
field=models.CharField(help_text='Label for the view all news button', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_link_label_es',
field=models.CharField(help_text='Label for the view all news button', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_link_label_fr',
field=models.CharField(help_text='Label for the view all news button', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_link_label_pt',
field=models.CharField(help_text='Label for the view all news button', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_title',
field=models.CharField(default='', help_text='Title for the latest new section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='latest_news_title_en',
field=models.CharField(help_text='Title for the latest new section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_title_es',
field=models.CharField(help_text='Title for the latest new section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_title_fr',
field=models.CharField(help_text='Title for the latest new section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_title_pt',
field=models.CharField(help_text='Title for the latest new section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_tweets_title',
field=models.CharField(default='', help_text='Title for the latest news Twitter section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='latest_news_tweets_title_en',
field=models.CharField(help_text='Title for the latest news Twitter section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_tweets_title_es',
field=models.CharField(help_text='Title for the latest news Twitter section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_tweets_title_fr',
field=models.CharField(help_text='Title for the latest news Twitter section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='latest_news_tweets_title_pt',
field=models.CharField(help_text='Title for the latest news Twitter section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='organisations_description',
field=models.CharField(default='', help_text='Description for the organisations statistics section', max_length=255),
preserve_default=False,
),
migrations.AddField(
model_name='homepage',
name='organisations_description_en',
field=models.CharField(help_text='Description for the organisations statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='organisations_description_es',
field=models.CharField(help_text='Description for the organisations statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='organisations_description_fr',
field=models.CharField(help_text='Description for the organisations statistics section', max_length=255, null=True),
),
migrations.AddField(
model_name='homepage',
name='organisations_description_pt',
field=models.CharField(help_text='Description for the organisations statistics section', max_length=255, null=True),
),
migrations.CreateModel(
name='IATIToolsItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('item', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='iati_tools_items', to='home.HomePage')),
('page', models.ForeignKey(help_text='Page link for the item', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='IATIInActionItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('title', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255)),
('title_en', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_fr', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_es', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_pt', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('description', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255)),
('description_en', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_fr', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_es', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_pt', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('item', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='iati_in_action_items', to='home.HomePage')),
('page', models.ForeignKey(help_text='Page link for the item', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='IATIInActionFeaturedItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('title', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255)),
('title_en', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_fr', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_es', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('title_pt', models.CharField(blank=True, help_text='Optional: title for the item. Defaults to the selected page title if left blank', max_length=255, null=True)),
('description', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255)),
('description_en', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_fr', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_es', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('description_pt', models.CharField(blank=True, help_text='Optional: description for the item. Defaults to the selected page excerpt if left blank', max_length=255, null=True)),
('quote', models.CharField(blank=True, help_text='Optional: quote for the item', max_length=255)),
('quote_en', models.CharField(blank=True, help_text='Optional: quote for the item', max_length=255, null=True)),
('quote_fr', models.CharField(blank=True, help_text='Optional: quote for the item', max_length=255, null=True)),
('quote_es', models.CharField(blank=True, help_text='Optional: quote for the item', max_length=255, null=True)),
('quote_pt', models.CharField(blank=True, help_text='Optional: quote for the item', max_length=255, null=True)),
('quotee', models.CharField(blank=True, help_text='Optional: the source of the quote', max_length=255)),
('quotee_en', models.CharField(blank=True, help_text='Optional: the source of the quote', max_length=255, null=True)),
('quotee_fr', models.CharField(blank=True, help_text='Optional: the source of the quote', max_length=255, null=True)),
('quotee_es', models.CharField(blank=True, help_text='Optional: the source of the quote', max_length=255, null=True)),
('quotee_pt', models.CharField(blank=True, help_text='Optional: the source of the quote', max_length=255, null=True)),
('image', models.ForeignKey(blank=True, help_text='Optional: image for the item. Defaults to the selected page image if left blank', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('item', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='iati_in_action_featured_item', to='home.HomePage')),
('page', models.ForeignKey(help_text='Page link for the item', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='GettingStartedItems',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('title', models.CharField(help_text='Title for the item', max_length=255)),
('title_en', models.CharField(help_text='Title for the item', max_length=255, null=True)),
('title_fr', models.CharField(help_text='Title for the item', max_length=255, null=True)),
('title_es', models.CharField(help_text='Title for the item', max_length=255, null=True)),
('title_pt', models.CharField(help_text='Title for the item', max_length=255, null=True)),
('description', models.CharField(help_text='Description for the item', max_length=255)),
('description_en', models.CharField(help_text='Description for the item', max_length=255, null=True)),
('description_fr', models.CharField(help_text='Description for the item', max_length=255, null=True)),
('description_es', models.CharField(help_text='Description for the item', max_length=255, null=True)),
('description_pt', models.CharField(help_text='Description for the item', max_length=255, null=True)),
('link_label', models.CharField(help_text='Link label for the item', max_length=255)),
('link_label_en', models.CharField(help_text='Link label for the item', max_length=255, null=True)),
('link_label_fr', models.CharField(help_text='Link label for the item', max_length=255, null=True)),
('link_label_es', models.CharField(help_text='Link label for the item', max_length=255, null=True)),
('link_label_pt', models.CharField(help_text='Link label for the item', max_length=255, null=True)),
('image', models.ForeignKey(help_text='Image for the item', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='+', to='wagtailimages.Image')),
('item', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='getting_started_items', to='home.HomePage')),
('page', models.ForeignKey(help_text='Page link for the item', null=True, on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailcore.Page')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
]
| 1.585938 | 2 |
StoppingTargetAcceptanceCalculator.py | HCasler/SSC-Acceptance | 0 | 12769048 | #! usr/bin/env python
#import calcGeomAcceptancePointSource as ptSrc
from PointSourceAcceptanceCalculator import PointSourceAcceptanceCalculator
from CollimatorGeometry import CollimatorGeometry
import math
from scipy import integrate
class StoppingTargetAcceptanceCalculator:
def __init__(self, collimatorGeometry=None):
# default values from 26586 (stopping target CRR, June 2019)
self.numFoils = 37
self.foilSpacing = 2.22 # cm! Everything else I've been doing is in cm
self.r_in = 2.1
self.r_out = 7.5
self.foilThick = 0.01
# TODO: default position!
if collimatorGeometry is None:
self.geometry = CollimatorGeometry()
self.geometry.useStraightHoleGeometry()
else:
self.geometry = collimatorGeometry
self.ptSrc = PointSourceAcceptanceCalculator(self.geometry)
#@staticmethod
def numeratorLeft(self, z, y, x):
return self.ptSrc.getNonNormalizedAcceptance([x, y, z], "left")
#@staticmethod
def numeratorRight(self, z, y, x):
return self.ptSrc.getNonNormalizedAcceptance([x, y, z], "right")
#@staticmethod
def denominatorLeft(self, z, y, x):
return self.ptSrc.getFullSphericalArea([x, y, z], self.geometry.leftHoleCenterBackCoords)
#@staticmethod
def denominatorRight(self, z, y, x):
return self.ptSrc.getFullSphericalArea([x, y, z], self.geometry.rightHoleCenterBackCoords)
@staticmethod
def errPropDivision(num, numErr, denom, denomErr):
resErr1 = numErr/denom
resErr2 = -0.5*num*denomErr/ denom**2
totalErr = math.sqrt(resErr1**2 + resErr2**2)
return totalErr
@staticmethod
def errPropSum(errsList):
sqrSum = 0.0
for err in errsList:
sqrSum += err**2
return math.sqrt(sqrSum)
def singleFoilNumerator(self, centerPos, leftOrRight):
accept = None
err = None
x0 = centerPos[0]
y0 = centerPos[1]
z0 = centerPos[2]
xLowOut = x0 - self.r_out
xHighOut = x0 + self.r_out
xLowIn = x0 - self.r_in
xHighIn = x0 + self.r_in
def yLowOut(x):
return y0 - math.sqrt(self.r_out**2 - (x - x0)**2)
def yHighOut(x):
return y0 + math.sqrt(self.r_out**2 - (x - x0)**2)
def yLowIn(x):
return y0 - math.sqrt(self.r_in**2 - (x - x0)**2)
def yHighIn(x):
return y0 + math.sqrt(self.r_in**2 - (x - x0)**2)
def zLow(x, y):
return z0 - self.foilThick/2
def zHigh(x, y):
return z0 + self.foilThick/2
# do an integral over the outer radius, subtract the interal over the
# inner radius
if leftOrRight is "left":
unSubbed = integrate.tplquad(self.numeratorLeft, xLowOut, xHighOut, yLowOut, yHighOut, zLow, zHigh)
center = integrate.tplquad(self.numeratorLeft, xLowIn, xHighIn, yLowIn, yHighIn, zLow, zHigh)
accept = unSubbed[0] - center[0]
err = self.errPropSum([unSubbed[1], center[1]])
elif leftOrRight is "right":
unSubbed = integrate.tplquad(self.numeratorRight, xLowOut, xHighOut, yLowOut, yHighOut, zLow, zHigh)
center = integrate.tplquad(self.numeratorRight, xLowIn, xHighIn, yLowIn, yHighIn, zLow, zHigh)
accept = unSubbed[0] - center[0]
err = self.errPropSum([unSubbed[1], center[1]])
return (accept, err)
def singleFoilDenominator(self, centerPos, leftOrRight):
accept = None
err = None
x0 = centerPos[0]
y0 = centerPos[1]
z0 = centerPos[2]
xLowOut = x0 - self.r_out
xHighOut = x0 + self.r_out
xLowIn = x0 - self.r_in
xHighIn = x0 + self.r_in
def yLowOut(x):
return y0 - math.sqrt(self.r_out**2 - (x - x0)**2)
def yHighOut(x):
return y0 + math.sqrt(self.r_out**2 - (x - x0)**2)
def yLowIn(x):
return y0 - math.sqrt(self.r_in**2 - (x - x0)**2)
def yHighIn(x):
return y0 + math.sqrt(self.r_in**2 - (x - x0)**2)
def zLow(x, y):
return z0 - self.foilThick/2
def zHigh(x, y):
return z0 + self.foilThick/2
# do an integral over the outer radius, subtract the interal over the
# inner radius
if leftOrRight is "left":
unSubbed = integrate.tplquad(self.denominatorLeft, xLowOut, xHighOut, yLowOut, yHighOut, zLow, zHigh)
center = integrate.tplquad(self.denominatorLeft, xLowIn, xHighIn, yLowIn, yHighIn, zLow, zHigh)
accept = unSubbed[0] - center[0]
err = self.errPropSum([unSubbed[1], center[1]])
elif leftOrRight is "right":
unSubbed = integrate.tplquad(self.denominatorRight, xLowOut, xHighOut, yLowOut, yHighOut, zLow, zHigh)
center = integrate.tplquad(self.denominatorRight, xLowIn, xHighIn, yLowIn, yHighIn, zLow, zHigh)
accept = unSubbed[0] - center[0]
err = self.errPropSum([unSubbed[1], center[1]])
return (accept, err)
def getGeometricAcceptance(self, sourceCenterPos, leftOrRight):
if leftOrRight is not "left" and leftOrRight is not "right":
raise RuntimeError("argument leftOrRight must be either \"left\" or \"right\"")
zLow = sourceCenterPos[2] - (self.numFoils-1)*self.foilSpacing/2.0
# currently assuming stopping target is aligned with "my" z axis
positions = []
for i in range(0, self.numFoils):
z = zLow + i*self.foilSpacing
newPosition = [sourceCenterPos[0], sourceCenterPos[1], z]
positions.append(newPosition)
numSum = 0.0
numErrs = []
denomSum = 0.0
denomErrs = []
for position in positions:
accept1, err1 = self.singleFoilNumerator(position, leftOrRight)
accept2, err2 = self.singleFoilDenominator(position, leftOrRight)
numSum += accept1
denomSum += accept2
numErrs.append(err1)
denomErrs.append(err2)
numErr = self.errPropSum(numErrs)
denomErr = self.errPropSum(denomErrs)
accept = numSum / denomSum
err = self.errPropDivision(numSum, numErr, denomSum, denomErr)
return accept, err
if __name__ == "__main__":
# give it a little test, just to be sure we don't error out anywhere
calc = StoppingTargetAcceptanceCalculator()
#calc.numFoils = 3
print "num foils: ", calc.numFoils
centerPos = [0.0, 0.0, -2400.0]
centerPos = CollimatorGeometry.stoppingTargetCenterPosition
print "getting left..."
leftResult = calc.getGeometricAcceptance(centerPos, "left")
print "left hole result: ", leftResult
print "getting right..."
rightResult = calc.getGeometricAcceptance(centerPos, "right")
print "right result: ", rightResult
| 2.1875 | 2 |
fairest/models/Rule.py | houfu/fairest | 0 | 12769049 | <reponame>houfu/fairest
# Copyright (c) 2021. <NAME>
#
# This software is licensed under the The MIT License.
# You should have received a copy of the license terms with the software.
# Otherwise, you can find the text here: https://opensource.org/licenses/MIT
#
#
# This software is licensed under the The MIT License.
# You should have received a copy of the license terms with the software.
# Otherwise, you can find the text here: https://opensource.org/licenses/MIT
#
from abc import abstractmethod
from enum import Enum, auto
from typing import Optional, Union, List, Type
from fairest.models import Request, DocumentModel, Report, DocumentSection
class RuleType(Enum):
DOCUMENT_MODEL = auto()
DOCUMENT = auto()
SECTION = auto()
class RuleDescription:
def __init__(self, title, author="", contact="", description="A Fairest Rule"):
"""
This class describes a rule for information. For use with :method:`BaseRule.describe()`
:param title: The name of the rule.
:param author: The person/organisation which wrote the rule.
:param contact: A method to contact the author.
:param description: A general description of what the Rule does.
"""
self.title = title
self.author = author
self.description = description
self.contact = contact
class RuleProperty:
def __init__(self, property_name: str, description="", property_default=None, friendly_name=""):
"""
This class describes information on a property which can be set for a rule to customise its behavior.
For use with :method:`BaseRule.describe_properties()`
:param property_name: Name of property in the code.
:param description: A description of what the property does.
:param property_default: A description of what the default value of the property is.
:param friendly_name: A user friendly name of the property
"""
self.property_name = property_name
self.description = description
self.property_default = property_default
self.friendly_name = friendly_name if friendly_name else self.property_name
class BaseRule:
def __init__(self, properties=None, request: Request = None):
self._properties = properties if properties is not None else {}
if request is not None:
if self.get_rule_name() in request.options:
self._properties.update(request.options[self.get_rule_name()])
@property
def properties(self) -> dict:
"""
Getter for properties of the Rule.
They are custom configurations set by the user or the server to adjust the behaviour of a rule.
"""
return self._properties
@properties.setter
def properties(self, properties: dict):
"""
Setter for properties of the Rule.
They are custom configurations set by the user or the server to adjust the behaviour of a rule.
The Rule's properties are merged or updated with properties parameter.
:param properties: A dictionary to update or merge with the Rule's properties. (By default it is empty)
:return: None
"""
self._properties.update(properties)
@classmethod
def get_rule_name(cls):
"""
Returns the name of the Rule. Uses the name of the class as a default.
Override this function to provide a different name (recommended).
"""
return cls.__name__
@classmethod
@abstractmethod
def get_rule_type(cls) -> RuleType:
...
@classmethod
def describe(cls) -> RuleDescription:
"""Returns a RuleDescription of a Rule. Override this function to customise the description (recommended)."""
return RuleDescription(cls.__name__)
@classmethod
def describe_properties(cls) -> List[RuleProperty]:
"""
Returns a description of the properties which are available to be set in properties.
This is used for helping the user to set properties.
:return: A dictionary of keys being the key of the property and the value being a tuple of
the description of the property and an optional default value.
"""
return []
def __repr__(self):
return self.get_rule_name()
class BaseDocumentModelRule(BaseRule):
def __init__(self, properties=None, request: Request = None):
super().__init__(properties, request)
@abstractmethod
def run_document_model_rule(self, request: Request) -> Optional[DocumentModel]: ...
@abstractmethod
def check_document(self, document: Union[str, bytes], current: Optional[DocumentModel]) -> bool: ...
@classmethod
def get_rule_type(cls) -> RuleType:
return RuleType.DOCUMENT_MODEL
class BaseDocumentRule(BaseRule):
def __init__(self, properties=None, request: Request = None):
super().__init__(properties, request)
@abstractmethod
def run_document_rule(self, request: Request, model: DocumentModel) -> Optional[Union[Report, List[Report]]]: ...
@classmethod
def get_rule_type(cls) -> RuleType:
return RuleType.DOCUMENT
class BaseSectionRule(BaseRule):
def __init__(self, properties=None, request: Request = None):
super().__init__(properties, request)
@abstractmethod
def run_section_rule(self, request: Request, model: DocumentModel, section: DocumentSection) -> Optional[
Union[Report, List[Report]]]: ...
@classmethod
def get_rule_type(cls) -> RuleType:
return RuleType.SECTION
# Type aliases
RuleClass = Type[BaseRule]
DocumentModelRuleClass = Type[BaseDocumentModelRule]
DocumentRuleClass = Type[BaseDocumentRule]
SectionRuleClass = Type[BaseSectionRule]
| 2.453125 | 2 |
Week 1/PS1_Problem3.py | NagiLam/MIT-6.00.1x_2018 | 0 | 12769050 | """ Problem Set 1 - Problem 3
Assume s is a string of lower case characters.
Write a program that prints the longest substring of s in which the letters occur in alphabetical order. For example, if s = 'azcbobobegghakl', then your program should print
Longest substring in alphabetical order is: beggh
In the case of ties, print the first substring. For example, if s = 'abcbcd', then your program should print
Longest substring in alphabetical order is: abc"""
# s = 'abcbcd'
maxLength = 0
current = s[0]
longest = s[0]
for i in range(len(s)-1):
if s[i + 1] >= s[i]:
current += s[i + 1]
print(current)
if len(current) > maxLength:
maxLength = len(current)
longest = current
else:
current = s[i+1]
i += 1
print ("Longest substring in alphabetical order is: " + longest)
| 4.25 | 4 |