max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
pineboolib/plugins/sql/flqpsql2.py | deavid/pineboo | 2 | 12768251 | from pineboolib import logging
from pineboolib.application.utils.check_dependencies import check_dependencies
from sqlalchemy import create_engine # type: ignore
from PyQt5.Qt import qWarning # type: ignore
from PyQt5.QtWidgets import QMessageBox, QWidget # type: ignore
from pineboolib.plugins.sql.flqpsql import FLQPSQL
from typing import Any, SupportsInt, Union, cast
logger = logging.getLogger(__name__)
class FLQPSQL2(FLQPSQL):
def __init__(self):
super().__init__()
self.name_ = "FLQPSQL2"
self.alias_ = "PostgreSQL"
self.mobile_ = True
self.pure_python_ = True
def useThreads(self):
return False
def useTimer(self):
return True
def safe_load(self):
return check_dependencies({"pg8000": "pg8000", "sqlalchemy": "sqlAlchemy"}, False)
def connect(
self, db_name, db_host, db_port: Union[bytes, str, SupportsInt], db_userName, db_password
) -> Any:
self._dbname = db_name
check_dependencies({"pg8000": "pg8000", "sqlalchemy": "sqlAlchemy"})
import pg8000 # type: ignore
import traceback
# conninfostr = "dbname=%s host=%s port=%s user=%s password=%s connect_timeout=5"
# % (db_name, db_host, db_port, db_userName, db_password)
try:
self.conn_ = pg8000.connect(
user=db_userName,
host=db_host,
port=int(db_port),
database=db_name,
password=<PASSWORD>,
timeout=5,
)
self.engine_ = create_engine(
"postgresql+pg8000://%s:%s@%s:%s/%s"
% (db_userName, db_password, db_host, db_port, db_name)
)
except Exception as e:
from pineboolib.application import project
if project._DGI and not project.DGI.localDesktop():
if repr(traceback.format_exc()).find("the database system is starting up") > -1:
raise
return False
if project._splash:
project._splash.hide()
if repr(traceback.format_exc()).find("does not exist") > -1:
ret = QMessageBox.warning(
QWidget(),
"Pineboo",
"La base de datos %s no existe.\n¿Desea crearla?" % db_name,
cast(QMessageBox, QMessageBox.Ok | QMessageBox.No),
)
if ret == QMessageBox.No:
return False
else:
try:
tmpConn = pg8000.connect(
user="postgres",
host=db_host,
port=int(db_port),
password=<PASSWORD>,
timeout=5,
)
tmpConn.autocommit = True
cursor = tmpConn.cursor()
try:
cursor.execute("CREATE DATABASE %s" % db_name)
except Exception:
print("ERROR: FLPSQL.connect", traceback.format_exc())
cursor.execute("ROLLBACK")
cursor.close()
return False
cursor.close()
return self.connect(db_name, db_host, db_port, db_userName, db_password)
except Exception:
qWarning(traceback.format_exc())
QMessageBox.information(
QWidget(),
"Pineboo",
"ERROR: No se ha podido crear la Base de Datos %s" % db_name,
QMessageBox.Ok,
)
print("ERROR: No se ha podido crear la Base de Datos %s" % db_name)
return False
else:
QMessageBox.information(
QWidget(), "Pineboo", "Error de conexión\n%s" % str(e), QMessageBox.Ok
)
return False
# self.conn_.autocommit = True #Posiblemente tengamos que ponerlo a
# false para que las transacciones funcionen
# self.conn_.set_isolation_level(
# pg8000.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.conn_.autocommit = True
if self.conn_:
self.open_ = True
try:
cursor = self.conn_.cursor()
cursor.execute("SET CLIENT_ENCODING TO 'UTF8'")
except Exception:
qWarning(traceback.format_exc())
return self.conn_
| 2.125 | 2 |
resources/libraries/python/honeycomb/HcAPIKwBridgeDomain.py | preym17/csit | 0 | 12768252 | <filename>resources/libraries/python/honeycomb/HcAPIKwBridgeDomain.py
# Copyright (c) 2018 Cisco and/or its affiliates.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Keywords to manipulate bridge domain configuration using Honeycomb REST API.
The keywords make possible to put and get configuration data and to get
operational data.
"""
from resources.libraries.python.HTTPRequest import HTTPCodes
from resources.libraries.python.honeycomb.HoneycombSetup import HoneycombError
from resources.libraries.python.honeycomb.HoneycombUtil \
import DataRepresentation
from resources.libraries.python.honeycomb.HoneycombUtil \
import HoneycombUtil as HcUtil
class BridgeDomainKeywords(object):
"""Keywords to manipulate bridge domain configuration.
Implements keywords which get configuration and operational data about
bridge domains and put the bridge domains' parameters using Honeycomb REST
API.
"""
PARAMS = ("flood", "forward", "learn", "unknown-unicast-flood",
"arp-termination")
def __init__(self):
pass
@staticmethod
def _configure_bd(node, bd_name, data,
data_representation=DataRepresentation.JSON):
"""Send bridge domain configuration data and check the response.
:param node: Honeycomb node.
:param bd_name: The name of bridge domain.
:param data: Configuration data to be sent in PUT request.
:param data_representation: How the data is represented.
:type node: dict
:type bd_name: str
:type data: dict
:type data_representation: DataRepresentation
:returns: Content of response.
:rtype: bytearray
:raises HoneycombError: If the status code in response on PUT is not
200 = OK.
"""
status_code, resp = HcUtil.\
put_honeycomb_data(node, "config_bridge_domain", data,
data_representation=data_representation)
if status_code not in (HTTPCodes.OK, HTTPCodes.ACCEPTED):
raise HoneycombError(
"The configuration of bridge domain '{0}' was not successful. "
"Status code: {1}.".format(bd_name, status_code))
return resp
@staticmethod
def _set_bd_properties(node, bd_name, path, new_value=None):
"""Set bridge domain properties.
This method reads bridge domain configuration data, creates, changes or
removes the requested data and puts it back to Honeycomb.
:param node: Honeycomb node.
:param bd_name: The name of bridge domain.
:param path: Path to data we want to change, create or remove.
:param new_value: The new value to be set. If None, the item will be
removed.
:type node: dict
:type bd_name: str
:type path: tuple
:type new_value: str, dict or list
:returns: Content of response.
:rtype: bytearray
:raises HoneycombError: If it is not possible to get or set the data.
"""
status_code, resp = HcUtil.\
get_honeycomb_data(node, "config_bridge_domain")
if status_code not in (HTTPCodes.OK, HTTPCodes.ACCEPTED):
raise HoneycombError(
"Not possible to get configuration information about the "
"bridge domains. Status code: {0}.".format(status_code))
if new_value:
new_data = HcUtil.set_item_value(resp, path, new_value)
else:
new_data = HcUtil.remove_item(resp, path)
return BridgeDomainKeywords._configure_bd(node, bd_name, new_data)
@staticmethod
def _create_bd_structure(bd_name, **kwargs):
"""Create the bridge domain data structure as it is expected by
Honeycomb REST API.
:param bd_name: Bridge domain name.
:param kwargs: Parameters and their values. The accepted parameters are
defined in BridgeDomainKeywords.PARAMS.
:type bd_name: str
:type kwargs: dict
:returns: Bridge domain data structure.
:rtype: dict
"""
bd_structure = {"name": bd_name}
for param, value in kwargs.items():
if param not in BridgeDomainKeywords.PARAMS:
raise HoneycombError("The parameter {0} is invalid.".
format(param))
bd_structure[param] = str(value)
return bd_structure
@staticmethod
def get_all_bds_cfg_data(node):
"""Get configuration data about all bridge domains from Honeycomb.
:param node: Honeycomb node.
:type node: dict
:returns: Configuration data about all bridge domains from Honeycomb.
:rtype: list
:raises HoneycombError: If it is not possible to get configuration data.
"""
status_code, resp = HcUtil.\
get_honeycomb_data(node, "config_bridge_domain")
if status_code != HTTPCodes.OK:
raise HoneycombError(
"Not possible to get configuration information about the "
"bridge domains. Status code: {0}.".format(status_code))
try:
return resp["bridge-domains"]["bridge-domain"]
except (KeyError, TypeError):
return []
@staticmethod
def get_bd_cfg_data(node, bd_name):
"""Get configuration data about the given bridge domain from Honeycomb.
:param node: Honeycomb node.
:param bd_name: The name of bridge domain.
:type node: dict
:type bd_name: str
:returns: Configuration data about the given bridge domain from
Honeycomb.
:rtype: dict
"""
intfs = BridgeDomainKeywords.get_all_bds_cfg_data(node)
for intf in intfs:
if intf["name"] == bd_name:
return intf
return {}
@staticmethod
def get_all_bds_oper_data(node):
"""Get operational data about all bridge domains from Honeycomb.
:param node: Honeycomb node.
:type node: dict
:returns: Operational data about all bridge domains from Honeycomb.
:rtype: list
:raises HoneycombError: If it is not possible to get operational data.
"""
status_code, resp = HcUtil.\
get_honeycomb_data(node, "oper_bridge_domains")
if status_code != HTTPCodes.OK:
raise HoneycombError(
"Not possible to get operational information about the "
"bridge domains. Status code: {0}.".format(status_code))
try:
return resp["bridge-domains-state"]["bridge-domain"]
except (KeyError, TypeError):
return []
@staticmethod
def get_bd_oper_data(node, bd_name):
"""Get operational data about the given bridge domain from Honeycomb.
:param node: Honeycomb node.
:param bd_name: The name of bridge domain.
:type node: dict
:type bd_name: str
:returns: Operational data about the given bridge domain from Honeycomb.
:rtype: dict
"""
intfs = BridgeDomainKeywords.get_all_bds_oper_data(node)
for intf in intfs:
if intf["name"] == bd_name:
return intf
return {}
@staticmethod
def add_first_bd(node, bd_name, **kwargs):
"""Add the first bridge domain.
If there are any other bridge domains configured, they will be removed.
:param node: Honeycomb node.
:param bd_name: Bridge domain name.
:param kwargs: Parameters and their values. The accepted parameters are
defined in BridgeDomainKeywords.PARAMS
:type node: dict
:type bd_name: str
:type kwargs: dict
:returns: Bridge domain data structure.
:rtype: dict
"""
new_bd = BridgeDomainKeywords._create_bd_structure(bd_name, **kwargs)
bridge_domain = {"bridge-domains": {"bridge-domain": [new_bd, ]}}
return BridgeDomainKeywords._configure_bd(node, bd_name, bridge_domain)
@staticmethod
def add_bd(node, bd_name, **kwargs):
"""Add a bridge domain.
:param node: Honeycomb node.
:param bd_name: Bridge domain name.
:param kwargs: Parameters and their values. The accepted parameters are
defined in BridgeDomainKeywords.PARAMS
:type node: dict
:type bd_name: str
:type kwargs: dict
:returns: Bridge domain data structure.
:rtype: dict
"""
path = ("bridge-domains", "bridge-domain")
new_bd = BridgeDomainKeywords._create_bd_structure(bd_name, **kwargs)
bridge_domain = [new_bd, ]
return BridgeDomainKeywords._set_bd_properties(node, bd_name, path,
bridge_domain)
@staticmethod
def remove_all_bridge_domains(node):
"""Remove all bridge domains.
:param node: Honeycomb node.
:type node: dict
:returns: Content of response.
:rtype: bytearray
:raises HoneycombError: If it is not possible to remove all bridge
domains.
"""
data = {"bridge-domains": {"bridge-domain": []}}
status_code, resp = HcUtil.\
put_honeycomb_data(node, "config_bridge_domain", data)
if status_code not in (HTTPCodes.OK, HTTPCodes.ACCEPTED):
raise HoneycombError("Not possible to remove all bridge domains. "
"Status code: {0}.".format(status_code))
return resp
@staticmethod
def remove_bridge_domain(node, bd_name):
"""Remove a bridge domain.
:param node: Honeycomb node.
:param bd_name: The name of bridge domain to be removed.
:type node: dict
:type bd_name: str
:returns: Content of response.
:rtype: bytearray
:raises HoneycombError: If it is not possible to remove the bridge
domain.
"""
path = ("bridge-domains", ("bridge-domain", "name", bd_name))
status_code, resp = HcUtil.\
get_honeycomb_data(node, "config_bridge_domain")
if status_code != HTTPCodes.OK:
raise HoneycombError(
"Not possible to get configuration information about the "
"bridge domains. Status code: {0}.".format(status_code))
new_data = HcUtil.remove_item(resp, path)
status_code, resp = HcUtil.\
put_honeycomb_data(node, "config_bridge_domain", new_data)
if status_code != HTTPCodes.OK:
raise HoneycombError("Not possible to remove bridge domain {0}. "
"Status code: {1}.".
format(bd_name, status_code))
return resp
@staticmethod
def configure_bridge_domain(node, bd_name, param, value):
"""Configure a bridge domain.
:param node: Honeycomb node.
:param bd_name: Bridge domain name.
:param param: Parameter to set, change or remove. The accepted
parameters are defined in BridgeDomainKeywords.PARAMS
:param value: The new value to be set, change or remove. If None, the
item will be removed.
:type node: dict
:type bd_name: str
:type param: str
:type value: str
:returns: Content of response.
:rtype: bytearray
"""
if param not in BridgeDomainKeywords.PARAMS:
raise HoneycombError("The parameter {0} is invalid.".format(param))
path = ("bridge-domains", ("bridge-domain", "name", bd_name), param)
return BridgeDomainKeywords.\
_set_bd_properties(node, bd_name, path, value)
@staticmethod
def add_l2_fib_entry(node, bd_name, l2_fib_entry):
"""Add an L2 FIB entry to the bridge domain's list of L2 FIB entries.
:param node: Honeycomb node.
:param bd_name: Bridge domain's name.
:param l2_fib_entry: L2 FIB entry to be added to the L2 FIB table.
:type node: dict
:type bd_name: str
:type l2_fib_entry: dict
:returns: Content of response.
:rtype: bytearray
"""
path = ("bridge-domains",
("bridge-domain", "name", bd_name),
"l2-fib-table",
"l2-fib-entry")
new_l2_fib_entry = [l2_fib_entry, ]
return BridgeDomainKeywords._set_bd_properties(
node, bd_name, path, new_l2_fib_entry)
@staticmethod
def modify_l2_fib_entry(node, bd_name, mac, param, value):
"""Modify an existing L2 FIB entry in the bridge domain's L2 FIB table.
The L2 FIB entry is specified by MAC address.
:param node: Honeycomb node.
:param bd_name: Bridge domain's name.
:param mac: MAC address used as the key in L2 FIB data structure.
:param param: The parameter to be modified.
:param value: The new value of the parameter.
:type node: dict
:type bd_name: str
:type mac: str
:type param: str
:type value: str or int
:returns: Content of response.
:rtype: bytearray
"""
path = ("bridge-domains",
("bridge-domain", "name", bd_name),
"l2-fib-table",
("l2-fib-entry", "phys-address", mac),
param)
return BridgeDomainKeywords._set_bd_properties(
node, bd_name, path, value)
@staticmethod
def remove_l2_fib_entry(node, bd_name, mac):
"""Remove an L2 FIB entry from bridge domain's L2 FIB table. The
entry is specified by MAC address.
:param node: Honeycomb node.
:param bd_name: Bridge domain's name.
:param mac: MAC address used as the key in L2 FIB data structure.
:type node: dict
:type bd_name: str
:type mac: str
:returns: Content of response.
:rtype: bytearray
:raises HoneycombError: If it is not possible to remove the specified
entry.
"""
path = ("bridge-domains",
("bridge-domain", "name", bd_name),
"l2-fib-table",
("l2-fib-entry", "phys-address", str(mac)))
status_code, resp = HcUtil.\
get_honeycomb_data(node, "config_bridge_domain")
if status_code != HTTPCodes.OK:
raise HoneycombError("Not possible to get configuration information"
" about the L2 FIB entry {0} from bridge "
"domain {1}. Status code: {2}.".
format(mac, bd_name, status_code))
new_data = HcUtil.remove_item(resp, path)
status_code, resp = HcUtil.\
put_honeycomb_data(node, "config_bridge_domain", new_data)
if status_code != HTTPCodes.OK:
raise HoneycombError("Not possible to remove L2 FIB entry {0} from "
"bridge domain {1}. Status code: {2}.".
format(mac, bd_name, status_code))
return resp
@staticmethod
def remove_all_l2_fib_entries(node, bd_name):
"""Remove all entries from the bridge domain's L2 FIB table.
:param node: Honeycomb node.
:param bd_name: Bridge domain's name.
:type node: dict
:type bd_name: str
:returns: Content of response.
:rtype: bytearray
"""
path = ("bridge-domains",
("bridge-domain", "name", bd_name),
"l2-fib-table")
return BridgeDomainKeywords._set_bd_properties(
node, bd_name, path, None)
@staticmethod
def get_all_l2_fib_entries(node, bd_name):
"""Retrieves all entries from the bridge domain's L2 FIB table.
:param node: Honeycomb node.
:param bd_name: Bridge domain's name.
:type node: dict
:type bd_name: str
:returns: Bridge domain's L2 FIB table or empty list if the table does
not exist or it is empty.
:rtype: list
"""
bd_data = BridgeDomainKeywords.get_bd_oper_data(node, bd_name)
try:
return bd_data["l2-fib-table"]["l2-fib-entry"]
except KeyError:
return []
@staticmethod
def get_l2_fib_entry(node, bd_name, mac):
"""Retrieves an entry from bridge domain's L2 FIB table. The entry is
specified by MAC address.
:param node: Honeycomb node.
:param bd_name: Bridge domain's name.
:param mac: MAC address used as the key in L2 FIB data structure.
:type node: dict
:type bd_name: str
:type mac: str
:returns: The requested entry from bridge domain's L2 FIB table or empty
dictionary if it does not exist in the L2 FIB table.
:rtype: dict
"""
l2_fib = BridgeDomainKeywords.get_all_l2_fib_entries(node, bd_name)
for entry in l2_fib:
if entry["phys-address"] == mac:
return entry
return {}
| 2.15625 | 2 |
lib/matplotlib/tests/test_gridspec.py | jbbrokaw/matplotlib | 113 | 12768253 | import matplotlib.gridspec as gridspec
from nose.tools import assert_equal
def test_equal():
gs = gridspec.GridSpec(2, 1)
assert_equal(gs[0, 0], gs[0, 0])
assert_equal(gs[:, 0], gs[:, 0])
| 2.125 | 2 |
WiiBin.py | idaholab/WiiBin | 10 | 12768254 | #Copyright 2020 Battelle Energy Alliance, LLC, ALL RIGHTS RESERVED.
#Buffered File with 0x00's
#Offset measured from center of chunk
#Adjustable slide %
#Added .Net Bytecode
#Added startup Notes
#Additional Error checking
#Added DotNet Bytecode
#Added Compiler Detection
#Added Percent Compressed or Encrypted
from tkinter import *
from tkinter import filedialog
from tkinter import messagebox
import shutil
import os
import sys
import subprocess
import math
import zipfile
programPath = '.' + os.sep
selectedFilename = ''
def clearDirectories():
if os.path.exists(programPath + 'Input'):
shutil.rmtree(programPath + 'Input')
if os.path.exists(programPath + 'Output'):
shutil.rmtree(programPath + 'Output')
if os.path.exists(programPath + 'OutputTemp'):
shutil.rmtree(programPath + 'OutputTemp')
os.mkdir(programPath + 'Input')
os.mkdir(programPath + 'Output')
os.mkdir(programPath + 'OutputTemp')
def getEndianess():
inputResults = open(programPath + 'Output' + os.sep + 'Input.csv','r')
contents = inputResults.read()
inputResults.close()
#Read last Entry (Big Endian Status bit)
contents = contents[-2:-1]
if '0' in contents:
contents = 'Big'
else:
contents = 'Little'
return contents
def goButtonArch():
#Clear resultsbox
resultsBox.delete(1.0, END)
#Check if Arch or Compiler Radio Button Selected
if radioValue.get() == 0:
resultsBox.insert(END, 'Radio Button Not Selected')
return None
#Clear Current PickleFiles
modelPath = programPath + 'PickledSKLearnModels'
models = os.listdir(modelPath)
for model in models:
if model.endswith(".sav"):
os.remove(os.path.join(modelPath, model))
#Unzip Selected PickledModels
if radioValue.get() == 1:
zip = zipfile.ZipFile(programPath + 'PickledSKLearnModels' + os.sep + 'Architectures.zip')
zip.extractall(programPath + 'PickledSKLearnModels')
if radioValue.get() == 2:
zip = zipfile.ZipFile(programPath + 'PickledSKLearnModels' + os.sep + 'Compilers.zip')
zip.extractall(programPath + 'PickledSKLearnModels')
if radioValue.get() == 3:
zip = zipfile.ZipFile(programPath + 'PickledSKLearnModels' + os.sep + 'Bytecode.zip')
zip.extractall(programPath + 'PickledSKLearnModels')
if radioValue.get() == 4:
zip = zipfile.ZipFile(programPath + 'PickledSKLearnModels' + os.sep + 'Architectures_and_Bytecode.zip')
zip.extractall(programPath + 'PickledSKLearnModels')
#Copy file to Input Dir
global selectedFilename
try:
shutil.copy(selectedFilename, programPath + 'Input')
except:
resultsBox.insert(END, 'File not selected. Select a file and try again.')
return None
try:
os.system('python3 GenerateByteHistogram.py a ' + programPath + 'Input' + os.sep + ' ' + programPath + 'Output' + os.sep + ' 0 100000000000000000000 ' + entropyEntryText.get() + ' ' + blocksizeEntryText.get() + ' 0')
except:
os.system('python GenerateByteHistogram.py a ' + programPath + 'Input' + os.sep + ' ' + programPath + 'Output' + os.sep + ' 0 100000000000000000000 ' + entropyEntryText.get() + ' ' + blocksizeEntryText.get() + ' 0')
results = open(programPath + 'Output' + os.sep + 'Results.txt','r')
contents = results.read()
header = 'Type\tProbability\t\tAlgorthim\n------------------------------------------\n'
resultsBox.insert(END,header + contents)
#Set Endianess GUI Box
endianLabelText.set(getEndianess())
clearDirectories()
def goButtonData():
#Clear resultsbox
resultsBox.delete(1.0, END)
#Check if Arch or Compiler Radio Button Selected
if radioValue.get() == 0:
resultsBox.insert(END, 'Radio Button Not Selected')
return None
#Clear Current PickleFiles
modelPath = programPath + 'PickledSKLearnModels'
models = os.listdir(modelPath)
for model in models:
if model.endswith(".sav"):
os.remove(os.path.join(modelPath, model))
#Unzip Selected PickledModels
if radioValue.get() == 1:
zip = zipfile.ZipFile(programPath + 'PickledSKLearnModels' + os.sep + 'Architectures.zip')
zip.extractall(programPath + 'PickledSKLearnModels')
if radioValue.get() == 2:
zip = zipfile.ZipFile(programPath + 'PickledSKLearnModels' + os.sep + 'Compilers.zip')
zip.extractall(programPath + 'PickledSKLearnModels')
if radioValue.get() == 3:
zip = zipfile.ZipFile(programPath + 'PickledSKLearnModels' + os.sep + 'Bytecode.zip')
zip.extractall(programPath + 'PickledSKLearnModels')
if radioValue.get() == 4:
zip = zipfile.ZipFile(programPath + 'PickledSKLearnModels' + os.sep + 'Architectures_and_Bytecode.zip')
zip.extractall(programPath + 'PickledSKLearnModels')
#Copy file to Input Dir
global selectedFilename
try:
shutil.copy(selectedFilename, programPath + 'Input')
except:
resultsBox.insert(END, 'File not selected. Select a file and try again.')
return None
#ZeroBuffer
zero=[0]*int((int(chunkEntryText.get())/2)-1)
zeroBuffer=bytearray(zero)
#Pad Original File with Leading and Trailing Zeros
old = open('Input' + os.sep + selectedFilename.split(os.sep)[-1], 'rb')
new = open('Input' + os.sep + 'temp__' + selectedFilename.split(os.sep)[-1], 'wb')
new.write(zeroBuffer)
new.write(old.read())
new.write(zeroBuffer)
old.close()
new.close()
os.remove('Input' + os.sep + selectedFilename.split(os.sep)[-1])
os.rename('Input' + os.sep + 'temp__' + selectedFilename.split(os.sep)[-1], 'Input' + os.sep + selectedFilename.split(os.sep)[-1])
#Split File Via Rolling Window
try:
os.system('python3 RollingWindowExtractor.py ' + programPath + 'Input' + os.sep + ' ' + programPath + 'OutputTemp' + os.sep + ' ' + chunkEntryText.get() + ' ' + slideEntryText.get())
os.system('python3 GenerateByteHistogram.py d ' + programPath + 'OutputTemp' + os.sep + ' ' + programPath + 'Output' + os.sep + ' 0 100000000000000000000 ' + entropyEntryText.get() + ' ' + blocksizeEntryText.get() + ' ' + votesEntryText.get())
except:
os.system('python RollingWindowExtractor.py ' + programPath + 'Input' + os.sep + ' ' + programPath + 'OutputTemp' + os.sep + ' ' + chunkEntryText.get() + ' ' + slideEntryText.get())
os.system('python GenerateByteHistogram.py d ' + programPath + 'OutputTemp' + os.sep + ' ' + programPath + 'Output' + os.sep + ' 0 100000000000000000000 ' + entropyEntryText.get() + ' ' + blocksizeEntryText.get() + ' ' + votesEntryText.get())
results = open(programPath + 'Output' + os.sep + 'Results.txt','r')
contents = results.readlines()
#Convert to list
finalList = []
for i in range(0,len(contents)):
finalList.append(str(contents[i].rstrip('\n')))
#Generate final String
finalString = 'File\tByte Offset\t\tAgreement\n'\
+ '-------------------------------------------------------\n'
for i in range(0,len(finalList)):
finalString = finalString + str(int(finalList[i].split(',')[0])+1) + '\t' + str(math.trunc((int(finalList[i].split(',')[0]))*(int(chunkEntryText.get())*(int(slideEntryText.get())/100)))).zfill(7) + '\t\t' + str(finalList[i].split(',')[1]) + '\n'
if len(finalList) == 0:
finalString = 'No Data Offset Matches Found'
resultsBox.insert(END,finalString)
#Set Endianess GUI Box
endianLabelText.set('<????>')
clearDirectories()
def entropyPercentage(passedFile):
upperLimit = entropyEntryText.get().split(':')[0]
fileSize = os.path.getsize(passedFile)
entropy = subprocess.check_output(['binwalk', '-E', '-v', '--block=' + str(blocksizeEntryText.get()), '--nplot', passedFile]).split()
entropy = entropy[entropy.index(b'ENTROPY')+2:len(entropy)]
del entropy[1::3]
#Loop through Binwalk elements to decode them
for index in range(0,len(entropy)):
entropy[index] = entropy[index].decode()
entropy.append(str(fileSize))
byteTotal = 0
for index in range(1,len(entropy),2):
if float(entropy[index]) > float(upperLimit):
byteTotal = byteTotal + (float(entropy[index+1])-float(entropy[index-1]))
percentage = str(round((byteTotal / fileSize) * 100, 2)) + '%'
return percentage
def fileSelectButton():
global selectedFilename
fileSelectedLabelText.set('Loading Selected File...')
selectedFilename = filedialog.askopenfilename(initialdir = programPath,title = "Select Binary")
#Check for Zero size files
##############################################
while os.path.getsize(selectedFilename) <= 0:
print("Zero-byte file detected. Please select a different file.")
selectedFilename = filedialog.askopenfilename(initialdir = programPath,title = "Select Binary")
#Check Entropy Percentage
entropyPercent = entropyPercentage(selectedFilename)
percentLabelText.set(entropyPercent)
fileSelectedLabelText.set(selectedFilename)
entropyNumber = float(entropyPercent.split('%')[0])
if entropyNumber >= 66:
percentLabel.config(fg="red")
messagebox.showwarning("Entropy Warning", "Warning: Selected Binary is " + str(entropyPercent) + " Compressed or Encrypted. WiiBin Results Should Not Be Trusted.")
elif entropyNumber >= 33:
percentLabel.config(fg="orange")
messagebox.showwarning("Entropy Warning", "Warning: Selected Binary is " + str(entropyPercent) + " Compressed or Encrypted. WiiBin Results Might Not Be Reliable.")
elif entropyNumber >= 0:
percentLabel.config(fg="green")
def entropyLabelClicked(event):
resultsBox.delete(1.0, END)
resultsBox.insert(END,"The window of entropy that will be considered when generating byte histograms. Anything outside of that window will be ignored. Syntax=Max:Min")
def blocksizeLabelClicked(event):
resultsBox.delete(1.0, END)
resultsBox.insert(END,"The size of individual block (in bytes) considered during entropy analysis. Default=512")
def chunksizeLabelClicked(event):
resultsBox.delete(1.0, END)
resultsBox.insert(END,"The size of chunk in bytes that the data offset process will break the inputed file into. Default=10000")
def slidesizeLabelClicked(event):
resultsBox.delete(1.0, END)
resultsBox.insert(END,"The percent of the chunk size that the sliding window is slid Default=50")
def reqVotesLabelClicked(event):
resultsBox.delete(1.0, END)
resultsBox.insert(END,"Number of ML algorithms that must agree on an file for it to be reported as part of the data offset output. Default=5 (Simple Majority)")
def radioArchSelected():
entropyEntryText.set('0.9:0.1')
resultsBox.delete(1.0, END)
resultsBox.insert(END,"Mode Changed to Architecture")
def radioCompSelected():
entropyEntryText.set('1.0:0.0')
resultsBox.delete(1.0, END)
resultsBox.insert(END,"Mode Changed to Compiler")
def radioByteSelected():
entropyEntryText.set('0.9:0.1')
resultsBox.delete(1.0, END)
resultsBox.insert(END,"Mode Changed to Bytecode")
def radioArchByteSelected():
entropyEntryText.set('0.9:0.1')
resultsBox.delete(1.0, END)
resultsBox.insert(END,"Mode Changed to Architecture & Bytecode")
#############Initialization###################
root = Tk()
root.configure(width=40)
root.title("WiiBin")
root.geometry("655x390")
root.resizable(0, 0)
radioValue = IntVar()
buttonText = StringVar()
buttonText.set('Select File...')
fileSelectButton = Button(root, textvariable=buttonText, command=fileSelectButton)
fileSelectButton.place(x=10,y=10)
fileSelectedLabelText = StringVar()
fileSelectedLabel = Entry(root,textvariable=fileSelectedLabelText,bd=0,bg='#D9D9D9',width=62)
fileSelectedLabel.place(x=130,y=15)
fileSelectedLabelText.set('<File Path>')
entropyLabel = Label(root,text='Entropy Span:')
entropyLabel.place(x=10,y=45)
entropyLabel.bind("<Button>", entropyLabelClicked)
entropyEntryText = StringVar()
entropyEntry = Entry(root,width=7,textvariable=entropyEntryText)
entropyEntry.place(x=108,y=45)
entropyEntryText.set('0.9:0.1')
blocksizeLabel = Label(root,text='Block Size (b):')
blocksizeLabel.place(x=180,y=45)
blocksizeLabel.bind("<Button>", blocksizeLabelClicked)
blocksizeEntryText = StringVar()
blocksizeEntry = Entry(root,width=4,textvariable=blocksizeEntryText)
blocksizeEntry.place(x=279,y=45)
blocksizeEntryText.set('512')
chunkLabel = Label(root,text='Chunk Size (b):')
chunkLabel.place(x=330,y=45)
chunkLabel.bind("<Button>", chunksizeLabelClicked)
chunkEntryText = StringVar()
chunkEntry = Entry(root,width=6,textvariable=chunkEntryText)
chunkEntry.place(x=434,y=45)
chunkEntryText.set('10000')
slideLabel = Label(root,text='Slide (%):')
slideLabel.place(x=520,y=70)
slideLabel.bind("<Button>", slidesizeLabelClicked)
slideEntryText = StringVar()
slideEntry = Entry(root,width=3,textvariable=slideEntryText)
slideEntry.place(x=590,y=70)
slideEntryText.set('50')
votesLabel = Label(root,text='Req\'d Votes: of 8')
votesLabel.place(x=500,y=45)
votesLabel.bind("<Button>", reqVotesLabelClicked)
votesEntryText = StringVar()
votesEntry = Entry(root,width=2,textvariable=votesEntryText)
votesEntry.place(x=590,y=45)
votesEntryText.set('5')
goButtonArch = Button(root,text="Determine Type",command=goButtonArch)
goButtonArch.place(x=10,y=75)
goButtonData = Button(root,text="Determine Offsets",command=goButtonData)
goButtonData.place(x=145,y=75)
radioButtonArch = Radiobutton(root, text="Architecture", variable=radioValue, value=1, command=radioArchSelected)
radioButtonArch.place(x=292,y=68)
radioButtonByte = Radiobutton(root, text="Bytecode", variable=radioValue, value=3, command=radioByteSelected)
radioButtonByte.place(x=292,y=90)
radioButtonComp = Radiobutton(root, text="Compiler", variable=radioValue, value=2, command=radioCompSelected)
radioButtonComp.place(x=400,y=68)
radioButtonArchByte = Radiobutton(root, text="Arch&Byte", variable=radioValue, value=4, command=radioArchByteSelected)
radioButtonArchByte.place(x=400,y=90)
endianLabel = Label(root,text='Endianness:', justify=RIGHT)
endianLabel.place(x=500,y=93)
endianLabelText = StringVar()
endianLabel = Label(root,width=7,textvariable=endianLabelText)
endianLabel.place(x=580,y=93)
endianLabelText.set('<????>')
percentLabel = Label(root,text='Percent Compressed/Encrypted:', justify=RIGHT)
percentLabel.place(x=370,y=112)
percentLabelText = StringVar()
percentLabel = Label(root,width=7,textvariable=percentLabelText)
percentLabel.place(x=580,y=112)
percentLabelText.set('<????>')
resultsBox = Text(root, width=75, height=14, padx=5, pady=5, borderwidth=2, relief=RIDGE)
resultsBox.place(x=10, y= 130)
scrollb = Scrollbar(root, command=resultsBox.yview)
scrollb.place(x=630, y=350)
resultsBox['yscrollcommand'] = scrollb.set
resultsBox.insert(END,'Welcome to WiiBin 1.7.1\n----------------------\n\nNotes:\n\nThe smaller the Slide (%) and Chunk Size, the more accurate the detection and longer the runtime.\n\nToo small a Chuck Size will cause make ML difficult and less accurate.\n\nThe minimum detectable code segment size is limited to half of the selected Chunk Size.\n\nTo detect smaller code segments the Chunk Size most be reduced.')
clearDirectories()
root.mainloop()
| 2.078125 | 2 |
examples/password_git.py | qualichat/questionary | 851 | 12768255 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
"""Example for a password question type.
Run example by typing `python -m examples.password` in your console."""
from pprint import pprint
import questionary
from examples import custom_style_dope
from questionary import Separator, Choice, prompt
def ask_pystyle(**kwargs):
# create the question object
question = questionary.password(
"Enter your git password", style=custom_style_dope, **kwargs
)
# prompt the user for an answer
return question.ask()
def ask_dictstyle(**kwargs):
questions = [
{"type": "password", "message": "Enter your git password", "name": "password"}
]
return prompt(questions, style=custom_style_dope, **kwargs)
if __name__ == "__main__":
pprint(ask_pystyle())
| 3.125 | 3 |
bin/bactopia-stats.py | bactopia/bactopia-wdl | 0 | 12768256 | #! /usr/bin/env python3
"""
usage: bactopia-stats [-h] STR STR
bactopia-stats - Ouput files to be used by Bactopia-WDL
positional arguments:
STR Directory where Bactopia outputs are.
STR Sample name used in Bactopia run
optional arguments:
-h, --help show this help message and exit
"""
import os
import json
import sys
PROGRAM = "bactopia-stats"
DESCRIPTION = 'Ouput files to be used by Bactopia-WDL'
def read_json(json_file):
""" Read input JSON file and return the dict. """
json_data = None
with open(json_file, 'rt') as json_fh:
json_data = json.load(json_fh)
return json_data
def write_output(file_name, output):
""" Write the output to a specific file. """
with open(file_name, 'wt') as file_fh:
if isinstance(output, float):
# Limit it two decimal places
file_fh.write(f'{output:.2f}\n')
elif isinstance(output, bool):
val = 'true' if output else 'false'
file_fh.write(f'{val}\n')
else:
file_fh.write(f'{output}\n')
if __name__ == '__main__':
import argparse as ap
import textwrap
parser = ap.ArgumentParser(
prog=PROGRAM,
conflict_handler='resolve',
description=(
f'{PROGRAM} - {DESCRIPTION}'
),
formatter_class=ap.RawDescriptionHelpFormatter
)
parser.add_argument('bactopia', metavar="STR", type=str,
help='Directory where Bactopia outputs are.')
parser.add_argument('sample_name', metavar="STR", type=str,
help='Sample name used in Bactopia run')
if len(sys.argv) == 1:
parser.print_help()
sys.exit(0)
args = parser.parse_args()
with open(f'{args.bactopia}/{args.sample_name}-genome-size.txt', 'rt') as file_fh:
write_output("GENOME_SIZE", file_fh.readline().rstrip())
# FASTQ Stats
is_paired = True if os.path.exists(f'{args.bactopia}/quality-control/{args.sample_name}_R1.fastq.gz') else False
write_output("IS_PAIRED", is_paired)
read_stats = {}
if (is_paired):
r1_raw_stats = assembly_stats = read_json(f'{args.bactopia}/quality-control/summary/{args.sample_name}_R1-original.json')
r2_raw_stats = assembly_stats = read_json(f'{args.bactopia}/quality-control/summary/{args.sample_name}_R2-original.json')
r1_qc_stats = assembly_stats = read_json(f'{args.bactopia}/quality-control/summary/{args.sample_name}_R1-final.json')
r2_qc_stats = assembly_stats = read_json(f'{args.bactopia}/quality-control/summary/{args.sample_name}_R2-final.json')
# Original Reads
write_output("RAW_TOTAL_BP", r1_raw_stats['qc_stats']['total_bp'] + r2_raw_stats['qc_stats']['total_bp'])
write_output("RAW_COVERAGE", r1_raw_stats['qc_stats']['coverage'] + r2_raw_stats['qc_stats']['coverage'])
write_output("RAW_READ_TOTAL", r1_raw_stats['qc_stats']['read_total'] + r2_raw_stats['qc_stats']['read_total'])
write_output("RAW_READ_MEAN", (r1_raw_stats['qc_stats']['read_mean'] + r2_raw_stats['qc_stats']['read_mean']) / 2.0)
write_output("RAW_QUAL_MEAN", (r1_raw_stats['qc_stats']['qual_mean'] + r2_raw_stats['qc_stats']['qual_mean']) / 2.0)
# After QC Reads
write_output("QC_TOTAL_BP", r1_qc_stats['qc_stats']['total_bp'] + r2_qc_stats['qc_stats']['total_bp'])
write_output("QC_COVERAGE", r1_qc_stats['qc_stats']['coverage'] + r2_qc_stats['qc_stats']['coverage'])
write_output("QC_READ_TOTAL", r1_qc_stats['qc_stats']['read_total'] + r2_qc_stats['qc_stats']['read_total'])
write_output("QC_READ_MEAN", (r1_qc_stats['qc_stats']['read_mean'] + r2_qc_stats['qc_stats']['read_mean']) / 2.0)
write_output("QC_QUAL_MEAN", (r1_qc_stats['qc_stats']['qual_mean'] + r2_qc_stats['qc_stats']['qual_mean']) / 2.0)
else:
se_raw_stats = assembly_stats = read_json(f'{args.bactopia}/quality-control/summary/{args.sample_name}-original.json')
se_qc_stats = assembly_stats = read_json(f'{args.bactopia}/quality-control/summary/{args.sample_name}-final.json')
# Original Reads
write_output("RAW_TOTAL_BP", se_raw_stats['qc_stats']['total_bp'])
write_output("RAW_COVERAGE", se_raw_stats['qc_stats']['coverage'])
write_output("RAW_READ_TOTAL", se_raw_stats['qc_stats']['read_total'])
write_output("RAW_READ_MEAN", se_raw_stats['qc_stats']['read_mean'])
write_output("RAW_QUAL_MEAN", se_raw_stats['qc_stats']['qual_mean'])
# After QC Reads
write_output("QC_TOTAL_BP", se_qc_stats['qc_stats']['total_bp'])
write_output("QC_COVERAGE", se_qc_stats['qc_stats']['coverage'])
write_output("QC_READ_TOTAL", se_qc_stats['qc_stats']['read_total'])
write_output("QC_READ_MEAN", se_qc_stats['qc_stats']['read_mean'])
write_output("QC_QUAL_MEAN", se_qc_stats['qc_stats']['qual_mean'])
# Assembly related stats
assembly_stats = read_json(f'{args.bactopia}/assembly/{args.sample_name}.json')
write_output("TOTAL_CONTIG", assembly_stats['total_contig'])
write_output("TOTAL_CONTIG_LENGTH", assembly_stats['total_contig_length'])
write_output("MAX_CONTIG_LENGTH", assembly_stats['max_contig_length'])
write_output("MEAN_CONTIG_LENGTH", assembly_stats['mean_contig_length'])
write_output("N50_CONTIG_LENGTH", assembly_stats['n50_contig_length'])
write_output("GC_PERCENT", float(assembly_stats['contig_percent_c']) + float(assembly_stats['contig_percent_g']))
| 3.046875 | 3 |
Stack/402. Remove K Digits.py | Into-Y0u/Github-Baby | 2 | 12768257 | <gh_stars>1-10
class Solution:
def removeKdigits(self, num: str, k: int) -> str:
if len(num) == 0 and len(num) <= k :
return "0"
st = [num[0]]
i = 1
while i < len(num):
while len(st) > 0 and int(st[-1]) > int(num[i]) and k > 0:
st.pop()
k-=1
st.append(num[i])
i+=1
while k > 0 :
st.pop()
k-=1
while len(st) > 0 and st[0] == "0" :
st.pop(0)
if len(st) == 0 :
return "0"
else :
return "".join(st)
#python special
class Solution:
def removeKdigits(self, num: str, k: int) -> str:
stack = []
for n in num:
while k > 0 and stack and stack[-1] > n:
stack.pop()
k -= 1
stack.append(n)
ans = stack[:-k] if k else stack
return "".join(ans).lstrip('0') or "0"
| 3.109375 | 3 |
main.py | Kazumasan/ulauncher-sessions | 2 | 12768258 | <reponame>Kazumasan/ulauncher-sessions
import os
import subprocess
import json
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from helpers import *
from ulauncher.api.client.EventListener import EventListener
from ulauncher.api.client.Extension import Extension
from ulauncher.api.shared.action.ExtensionCustomAction import ExtensionCustomAction
from ulauncher.api.shared.action.RenderResultListAction import RenderResultListAction
from ulauncher.api.shared.event import ItemEnterEvent, KeywordQueryEvent
from ulauncher.api.shared.item.ExtensionResultItem import ExtensionResultItem
class DemoExtension(Extension):
def __init__(self):
super(DemoExtension, self).__init__()
self.subscribe(KeywordQueryEvent, KeywordQueryEventListener())
self.subscribe(ItemEnterEvent, ItemEnterEventListener())
class KeywordQueryEventListener(EventListener):
def on_event(self, event, extension):
extensions_raw = extension.preferences["file_extensions"]
extension_depth = extension.preferences["extension_depth"]
workspaces_root = os.path.expanduser(extension.preferences["root"])
workspaces = scan_workspaces(
workspaces_root, int(extension_depth), formatExtension(extensions_raw)
)
print("----> Found: ", workspaces)
#filter workspaces by query
query = event.get_argument()
print(query)
if(query != "" and query != None):
query = query.strip().lower()
filtered = []
for ws in workspaces:
if query in ws.lower():
filtered.append(ws)
else:
filtered = workspaces
entries = []
for fl in filtered:
action = checkFile(fl)
entries.append(
ExtensionResultItem(
icon=action["icon"],
name=formatName(fl),
description=fl,
on_enter=ExtensionCustomAction(
{
"open_cmd": action["command"],
"opt": [fl, action],
},
keep_app_open=True,
),
)
) # append Items here
return RenderResultListAction(entries)
class ItemEnterEventListener(EventListener):
def on_event(self, event, extension):
data = event.get_data()
cmd_path = data["open_cmd"]
opt = data["opt"]
if cmd_path == "notlinked":
return RenderResultListAction(notlinked(opt[0], opt[1]))
else:
execAction(cmd_path, opt)
if __name__ == "__main__":
DemoExtension().run()
| 1.976563 | 2 |
configlib/conftypes.py | ddorn/pyconfiglib | 1 | 12768259 | <gh_stars>1-10
import json
import click
from typing import TYPE_CHECKING
if TYPE_CHECKING:
import configlib
def is_valid(instance, type_):
if isinstance(type_, ConfigType):
return type_.is_valid(instance)
return isinstance(instance, type_)
class ConfigType(click.ParamType):
name = 'any'
def __repr__(self):
return '<ConfigType %s>' % self.name
def convert(self, value, param=None, ctx=None):
try:
return self.load(value)
except (IndexError, ValueError):
self.fail('%s is not a %s' % (value, self.name), param, ctx)
def load(self, value: str):
"""
Convert the string representation to the real data.
Raise anything if not correct format.
"""
return value
def is_valid(self, value):
"""Validate a real data."""
return True
def save(self, value):
"""Converts the real data back into a json valid data"""
return value
@property
def __name__(self):
return self.name
class SubConfigType(ConfigType):
name = 'SubConfig'
def __init__(self, sub_config_class):
self.sub_config_class = sub_config_class # type: type(configlib.SubConfig)
def load(self, value):
if isinstance(value, str):
try:
value = json.loads(value)
except json.JSONDecodeError:
raise ValueError('Not a valid json')
if isinstance(value, dict):
return self.sub_config_class(value)
raise ValueError
def save(self, value: 'configlib.SubConfig'):
return value.__get_json_dict__()
def is_valid(self, value):
return isinstance(value, self.sub_config_class)
class _ColorType(ConfigType):
name = 'color'
def load(self, value):
if len(value) not in (4, 7) or value[0] != '#':
raise ValueError
size = len(value) // 3
factor = 1 if size == 2 else 16
r, g, b = [value[1 + size * i: 1 + size * (i + 1)] for i in range(3)]
return [int(c, 16) * factor for c in (r, g, b)]
def is_valid(self, value):
return isinstance(value, (tuple, list)) and \
len(value) == 3 and \
all(isinstance(c, int) and
0 <= c < 256
for c in value)
def save(self, value):
return '#{:02x}{:02x}{:02x}'.format(*value)
class _PathType(ConfigType):
name = 'path'
def is_valid(self, value):
return isinstance(value, str)
class Python(ConfigType):
"""
Represent a real python type that is converted from a string with eval().
:note: It can be maliciously used to inject code
"""
name = 'dict'
def __init__(self, type_: type):
"""
Represent a real python type that is converted from a string with eval().
:param type type_: The corresponding python type like dict, list or tuple...
"""
self.type = type_
self.name = type_.__name__
def is_valid(self, value):
return isinstance(value, self.type)
def save(self, value):
return value
def load(self, value: str):
if isinstance(value, str):
try:
value = eval(value)
except:
pass
else:
# convert gently between similar types, for instance
# From tuples to lists, because tuples are stored as list in json...
try:
value = self.type(value)
except:
pass
if not isinstance(value, self.type):
raise ValueError('Does not evaluate to a %s' % self.type.__name__)
return value
color = _ColorType()
path = _PathType()
| 2.984375 | 3 |
main.py | seunghaekim/achapters003-ggded | 0 | 12768260 | import BartlebyMachine.main as bartleby
import BartlebyMachine.book as book
bartleby = bartleby.Bartleby()
bartleby.addTableOfContent('toc.ggded.yaml')
bartleby.markdownToLatex()
bartleby.writeLatex()
| 1.421875 | 1 |
train.py | jimmyyhwu/spatial-action-maps | 21 | 12768261 | # Adapted from https://pytorch.org/tutorials/intermediate/reinforcement_q_learning.html
import argparse
import random
import sys
import time
from collections import namedtuple
from pathlib import Path
# Prevent numpy from using up all cpu
import os
os.environ['MKL_NUM_THREADS'] = '1' # pylint: disable=wrong-import-position
import torch
import torch.optim as optim
from torch.nn.functional import smooth_l1_loss
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
import utils
torch.backends.cudnn.benchmark = True
Transition = namedtuple('Transition', ('state', 'action', 'reward', 'ministeps', 'next_state'))
class ReplayBuffer:
def __init__(self, capacity):
self.capacity = capacity
self.buffer = []
self.position = 0
def push(self, *args):
if len(self.buffer) < self.capacity:
self.buffer.append(None)
self.buffer[self.position] = Transition(*args)
self.position = (self.position + 1) % self.capacity
def sample(self, batch_size):
transitions = random.sample(self.buffer, batch_size)
return Transition(*zip(*transitions))
def __len__(self):
return len(self.buffer)
def train(cfg, policy_net, target_net, optimizer, batch, transform_func):
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
state_batch = torch.cat([transform_func(s) for s in batch.state]).to(device) # (32, 3, 96, 96)
action_batch = torch.tensor(batch.action, dtype=torch.long).to(device) # (32,)
reward_batch = torch.tensor(batch.reward, dtype=torch.float32).to(device) # (32,)
ministeps_batch = torch.tensor(batch.ministeps, dtype=torch.float32).to(device) # (32,)
non_final_next_states = torch.cat([transform_func(s) for s in batch.next_state if s is not None]).to(device, non_blocking=True) # (?32, 3, 96, 96)
output = policy_net(state_batch) # (32, 2, 96, 96)
state_action_values = output.view(cfg.batch_size, -1).gather(1, action_batch.unsqueeze(1)).squeeze(1) # (32,)
next_state_values = torch.zeros(cfg.batch_size, dtype=torch.float32, device=device) # (32,)
non_final_mask = torch.tensor(tuple(map(lambda s: s is not None, batch.next_state)), dtype=torch.bool, device=device) # (32,)
if cfg.use_double_dqn:
with torch.no_grad():
best_action = policy_net(non_final_next_states).view(non_final_next_states.size(0), -1).max(1)[1].view(non_final_next_states.size(0), 1) # (32?, 1)
next_state_values[non_final_mask] = target_net(non_final_next_states).view(non_final_next_states.size(0), -1).gather(1, best_action).view(-1) # (32?,)
else:
next_state_values[non_final_mask] = target_net(non_final_next_states).view(non_final_next_states.size(0), -1).max(1)[0].detach() # (32,)
expected_state_action_values = (reward_batch + torch.pow(cfg.discount_factor, ministeps_batch) * next_state_values) # (32,)
td_error = torch.abs(state_action_values - expected_state_action_values).detach() # (32,)
loss = smooth_l1_loss(state_action_values, expected_state_action_values)
optimizer.zero_grad()
loss.backward()
if cfg.grad_norm_clipping is not None:
torch.nn.utils.clip_grad_norm_(policy_net.parameters(), cfg.grad_norm_clipping)
optimizer.step()
train_info = {}
train_info['q_value_min'] = output.min().item()
train_info['q_value_max'] = output.max().item()
train_info['td_error'] = td_error.mean()
train_info['loss'] = loss
return train_info
def main(cfg):
# Set up logging and checkpointing
log_dir = Path(cfg.log_dir)
checkpoint_dir = Path(cfg.checkpoint_dir)
print('log_dir: {}'.format(log_dir))
print('checkpoint_dir: {}'.format(checkpoint_dir))
# Create environment
kwargs = {}
if sys.platform == 'darwin':
kwargs['use_gui'] = True
env = utils.get_env_from_cfg(cfg, **kwargs)
# Policy
policy = utils.get_policy_from_cfg(cfg, env.get_action_space(), train=True)
# Optimizer
optimizer = optim.SGD(policy.policy_net.parameters(), lr=cfg.learning_rate, momentum=0.9, weight_decay=cfg.weight_decay)
# Replay buffer
replay_buffer = ReplayBuffer(cfg.replay_buffer_size)
# Resume if applicable
start_timestep = 0
episode = 0
if cfg.checkpoint_path is not None:
checkpoint = torch.load(cfg.checkpoint_path)
start_timestep = checkpoint['timestep']
episode = checkpoint['episode']
optimizer.load_state_dict(checkpoint['optimizer'])
replay_buffer = checkpoint['replay_buffer']
print("=> loaded checkpoint '{}' (timestep {})".format(cfg.checkpoint_path, start_timestep))
# Target net
target_net = policy.build_network()
target_net.load_state_dict(policy.policy_net.state_dict())
target_net.eval()
# Logging
train_summary_writer = SummaryWriter(log_dir=str(log_dir / 'train'))
visualization_summary_writer = SummaryWriter(log_dir=str(log_dir / 'visualization'))
meters = Meters()
state = env.reset()
total_timesteps_with_warm_up = cfg.learning_starts + cfg.total_timesteps
for timestep in tqdm(range(start_timestep, total_timesteps_with_warm_up),
initial=start_timestep, total=total_timesteps_with_warm_up, file=sys.stdout):
start_time = time.time()
# Select an action
if cfg.exploration_timesteps > 0:
exploration_eps = 1 - min(max(timestep - cfg.learning_starts, 0) / cfg.exploration_timesteps, 1) * (1 - cfg.final_exploration)
else:
exploration_eps = cfg.final_exploration
action, _ = policy.step(state, exploration_eps=exploration_eps)
# Step the simulation
next_state, reward, done, info = env.step(action)
ministeps = info['ministeps']
# Store in buffer
replay_buffer.push(state, action, reward, ministeps, next_state)
state = next_state
# Reset if episode ended
if done:
state = env.reset()
episode += 1
# Train network
if timestep >= cfg.learning_starts:
batch = replay_buffer.sample(cfg.batch_size)
train_info = train(cfg, policy.policy_net, target_net, optimizer, batch, policy.apply_transform)
# Update target network
if (timestep + 1) % cfg.target_update_freq == 0:
target_net.load_state_dict(policy.policy_net.state_dict())
step_time = time.time() - start_time
################################################################################
# Logging
# Meters
meters.update('step_time', step_time)
if timestep >= cfg.learning_starts:
for name, val in train_info.items():
meters.update(name, val)
if done:
for name in meters.get_names():
train_summary_writer.add_scalar(name, meters.avg(name), timestep + 1)
eta_seconds = meters.avg('step_time') * (total_timesteps_with_warm_up - timestep)
meters.reset()
train_summary_writer.add_scalar('episodes', episode, timestep + 1)
train_summary_writer.add_scalar('eta_hours', eta_seconds / 3600, timestep + 1)
for name in ['cumulative_cubes', 'cumulative_distance', 'cumulative_reward']:
train_summary_writer.add_scalar(name, info[name], timestep + 1)
# Visualize Q-network outputs
if timestep >= cfg.learning_starts and not cfg.use_steering_commands:
random_state = random.choice(replay_buffer.buffer).state
_, info = policy.step(random_state, debug=True)
output = info['output'].cpu().numpy()
visualization = utils.get_state_and_output_visualization(random_state, output).transpose((2, 0, 1))
visualization_summary_writer.add_image('output', visualization, timestep + 1)
################################################################################
# Checkpointing
if (timestep + 1) % cfg.checkpoint_freq == 0 or timestep + 1 == total_timesteps_with_warm_up:
# Save model
if not checkpoint_dir.exists():
checkpoint_dir.mkdir(parents=True, exist_ok=True)
model_name = 'model_{:08d}.pth.tar'.format(timestep + 1)
torch.save({
'timestep': timestep + 1,
'state_dict': policy.policy_net.state_dict(),
}, str(checkpoint_dir / model_name))
# Save checkpoint
checkpoint_name = 'checkpoint_{:08d}.pth.tar'.format(timestep + 1)
torch.save({
'timestep': timestep + 1,
'episode': episode,
'optimizer': optimizer.state_dict(),
'replay_buffer': replay_buffer,
}, str(checkpoint_dir / checkpoint_name))
# Save updated config file
cfg.model_path = str(checkpoint_dir / model_name)
cfg.checkpoint_path = str(checkpoint_dir / checkpoint_name)
utils.write_config(cfg, log_dir / 'config.yml')
# Remove old checkpoint
old_checkpoint_path = checkpoint_dir / 'checkpoint_{:08d}.pth.tar'.format((timestep + 1) - cfg.checkpoint_freq)
if old_checkpoint_path.exists():
old_checkpoint_path.unlink()
env.close()
# Create file to indicate training completed
(log_dir / 'success').touch()
class AverageMeter:
def __init__(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
class Meters:
def __init__(self):
self.meters = {}
def get_names(self):
return self.meters.keys()
def reset(self):
for _, meter in self.meters.items():
meter.reset()
def update(self, name, val):
if name not in self.meters:
self.meters[name] = AverageMeter()
self.meters[name].update(val)
def avg(self, name):
return self.meters[name].avg
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('config_path')
config_path = parser.parse_args().config_path
config_path = utils.setup_run(config_path)
main(utils.read_config(config_path))
| 2.34375 | 2 |
newsfeed/settings/production.py | mccarrion/newsfeed-django | 2 | 12768262 | """
These are the production settings for this Django project.
"""
import logging
from .base import *
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.getenv('SECRET_KEY')
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
# SECURITY WARNING: If you deploy a Django app to production, make sure to set
# an appropriate host here.
# See https://docs.djangoproject.com/en/dev/ref/settings/
ALLOWED_HOSTS = ['*']
CORS_ORIGIN_ALLOW_ALL = True
# Define thumbnail dimensions
THUMBNAIL_CACHE_DIMENSIONS = True
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql',
'NAME': 'newsfeed',
'USER': os.getenv('DATABASE_USER'),
'PASSWORD': os.getenv('DATABASE_PASSWORD'),
'HOST': '127.0.0.1',
'PORT': '5432',
}
}
# Static file storage
STATIC_URL = 'http://storage.googleapis.com/newsfeed-django/static/'
MEDIA_URL = 'http://storage.googleapis.com/newsfeed-django/media/'
ADMIN_MEDIA_PREFIX = STATIC_URL + 'admin/'
| 1.679688 | 2 |
trypython/stdlib/dataclasses_/dataclasses02.py | devlights/try-python | 4 | 12768263 | """
dataclasses モジュールのサンプルです.
fronzen プロパティの指定について
REFERENCESS:: http://bit.ly/2KTZynw
http://bit.ly/2KJCnwk
http://bit.ly/2KHeNA9
http://bit.ly/2KFLGxc
"""
import dataclasses as dc
from trypython.common.commoncls import SampleBase
from trypython.common.commonfunc import pr
@dc.dataclass(frozen=True)
class Data1:
name: str
unit_price: float
quantity: int = 0
def total_cost(self) -> float:
return self.unit_price * self.quantity
class Sample(SampleBase):
def exec(self):
obj = Data1(name='test', unit_price=300.5)
try:
# --------------------------------------------------------
# frozen 指定している dataclass は値の設定が出来ないようになる.
# dataclasses.FrozenInstanceError が発生する.
# --------------------------------------------------------
# noinspection PyDataclass
obj.quantity = 5
except dc.FrozenInstanceError as e:
pr('frozen な dataclass に値を設定', e)
def go():
obj = Sample()
obj.exec()
| 3.3125 | 3 |
manage.py | ponyatov/laguna | 0 | 12768264 | <reponame>ponyatov/laguna
# powered by metaL: https://github.com/ponyatov/metaL/wiki/metaL-manifest
# \ <section:top>
import os
import sys
import config
# / <section:top>
# \ <section:mid>
def main():
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'proj.settings')
from django.core.management import execute_from_command_line
execute_from_command_line(sys.argv)
# / <section:mid>
# \ <section:bot>
if __name__ == '__main__':
main()
# / <section:bot> | 1.71875 | 2 |
Chapter09/filewrite1.py | LuisPereda/Learning_Python | 0 | 12768265 | file_input = open("motivation.txt",'w')
file_input.write("Never give up")
file_input.write("\nRise above hate")
file_input.write("\nNo body remember second place")
file_input.close() | 2.625 | 3 |
biosys/apps/main/api/urls.py | parksandwildlife/biosys | 2 | 12768266 | <reponame>parksandwildlife/biosys<filename>biosys/apps/main/api/urls.py
from __future__ import absolute_import, unicode_literals, print_function, division
from django.conf.urls import url
from rest_framework import routers
from rest_framework.authtoken.views import obtain_auth_token
from djoser import views as djoser_views
from main.api import views as api_views
router = routers.DefaultRouter()
router.register(r'users?', api_views.UserViewSet, 'user')
router.register(r'programs?', api_views.ProgramViewSet, 'program')
router.register(r'projects?', api_views.ProjectViewSet, 'project')
router.register(r'sites?', api_views.SiteViewSet, 'site')
router.register(r'datasets?', api_views.DatasetViewSet, 'dataset')
router.register(r'records?', api_views.RecordViewSet, 'record')
router.register(r'media', api_views.MediaViewSet, 'media')
router.register(r'project-media', api_views.ProjectMediaViewSet, 'project-media')
router.register(r'dataset-media', api_views.DatasetMediaViewSet, 'dataset-media')
url_patterns = [
url(r'auth-token/', obtain_auth_token, name="auth-token"),
url(r'projects?/(?P<pk>\d+)/sites/?', api_views.ProjectSitesView.as_view(), name='project-sites'), # bulk sites
url(r'projects?/(?P<pk>\d+)/upload-sites/?', api_views.ProjectSitesUploadView.as_view(),
name='upload-sites'), # file upload for sites
url(r'datasets?/(?P<pk>\d+)/records/?', api_views.DatasetRecordsView.as_view(), name='dataset-records'),
# upload data files
url(r'datasets?/(?P<pk>\d+)/upload-records/?', api_views.DatasetUploadRecordsView.as_view(),
name='dataset-upload'),
url(r'statistics/?', api_views.StatisticsView.as_view(), name="statistics"),
url(r'whoami/?', api_views.WhoamiView.as_view(), name="whoami"),
url(r'species/?', api_views.SpeciesView.as_view(), name="species"),
url(r'logout/?', api_views.LogoutView.as_view(), name="logout"),
# utils
url(r'utils/geometry-to-data/dataset/(?P<pk>\d+)/?',
api_views.GeoConvertView.as_view(output='data'),
name="geometry-to-data"
),
url(r'utils/data-to-geometry/dataset/(?P<pk>\d+)/?',
api_views.GeoConvertView.as_view(output='geometry'),
name="data-to-geometry"
),
url(r'utils/infer-dataset/?', api_views.InferDatasetView.as_view(), name='infer-dataset'),
url(r'^password/?$', djoser_views.SetPasswordView.as_view(), name='set-password'),
url(
r'^password/reset/?$',
djoser_views.PasswordResetView.as_view(),
name='password-reset'
),
url(
r'^password/reset/confirm/?$',
djoser_views.PasswordResetConfirmView.as_view(),
name='password-reset-confirm'
),
]
app_name = 'api'
urls = router.urls + url_patterns
| 1.890625 | 2 |
setup.py | john-veillette/pymc-learn | 187 | 12768267 | <gh_stars>100-1000
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
# from builtins import *
from codecs import open
from os.path import realpath, dirname, join
from setuptools import setup, find_packages
import sys
import re
DISTNAME = 'pymc-learn'
DESCRIPTION = "Practical Probabilistic Machine Learning in Python"
AUTHOR = 'Pymc-Learn Team'
AUTHOR_EMAIL = '<EMAIL>'
URL = "https://github.com/pymc-learn/pymc-learn"
classifiers = [
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Operating System :: OS Independent'
]
PROJECT_ROOT = dirname(realpath(__file__))
with open(join(PROJECT_ROOT, 'README.rst'), encoding='utf-8') as r:
readme = r.read()
REQUIREMENTS_FILE = join(PROJECT_ROOT, 'requirements.txt')
with open(REQUIREMENTS_FILE) as f:
install_reqs = f.read().splitlines()
if sys.version_info < (3, 4):
install_reqs.append('enum34')
def get_version():
VERSIONFILE = join('pmlearn', '__init__.py')
lines = open(VERSIONFILE, 'rt').readlines()
version_regex = r"^__version__ = ['\"]([^'\"]*)['\"]"
for line in lines:
mo = re.search(version_regex, line, re.M)
if mo:
return mo.group(1)
raise RuntimeError('Unable to find version in %s.' % (VERSIONFILE,))
with open('AUTHORS.txt') as a:
# reSt-ify the authors list
authors = ''
for author in a.read().split('\n'):
authors += '| '+author+'\n'
with open('LICENSE') as l:
license = l.read()
if __name__ == "__main__":
setup(
name=DISTNAME,
version=get_version(),
description=DESCRIPTION,
long_description=readme,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
license=license,
packages=find_packages(),
package_data={'docs': ['*']},
include_package_data=True,
zip_safe=False,
install_requires=install_reqs,
classifiers=classifiers
) | 1.890625 | 2 |
Tests/testData.py | juanluislm/GPy-ABCD | 0 | 12768268 | import numpy as np
import scipy.io as sio
from GPy_ABCD.Models.modelSearch import *
from testConsistency import save_one_run
if __name__ == '__main__':
# np.seterr(all='raise') # Raise exceptions instead of RuntimeWarnings. The exceptions can then be caught by the debugger
datasets = ['01-airline', '02-solar', '03-mauna', '04-wheat', '05-temperature', '06-internet', '07-call-centre', '08-radio', '09-gas-production', '10-sulphuric', '11-unemployment', '12-births', '13-wages']
dataset_name = datasets[1-1]
data = sio.loadmat(f'./Data/{dataset_name}.mat')
# print(data.keys())
X = data['X']
Y = data['y']
sorted_models, tested_models, tested_k_exprs, expanded, not_expanded = explore_model_space(X, Y, start_kernels = standard_start_kernels, p_rules = production_rules_all,
restarts = 3, utility_function = 'BIC', rounds = 3, buffer = 2, dynamic_buffer = True, verbose = True, parallel = True)
for mod_depth in tested_models: print(', '.join([str(mod.kernel_expression) for mod in mod_depth]) + f'\n{len(mod_depth)}')
from matplotlib import pyplot as plt
for bm in sorted_models[:3]:
print(bm.kernel_expression)
print(bm.model.kern)
print(bm.model.log_likelihood())
print(bm.cached_utility_function)
bm.model.plot()
print(bm.interpret())
plt.show()
save_one_run(dataset_name, 'UNKNOWN', sorted_models, tested_models, tested_k_exprs)
| 1.914063 | 2 |
model/discriminator.py | azraelkuan/univnet | 83 | 12768269 | import torch
import torch.nn as nn
from .mpd import MultiPeriodDiscriminator
from .mrd import MultiResolutionDiscriminator
from omegaconf import OmegaConf
class Discriminator(nn.Module):
def __init__(self, hp):
super(Discriminator, self).__init__()
self.MRD = MultiResolutionDiscriminator(hp)
self.MPD = MultiPeriodDiscriminator(hp)
def forward(self, x):
return self.MRD(x), self.MPD(x)
if __name__ == '__main__':
hp = OmegaConf.load('../config/default.yaml')
model = Discriminator(hp)
x = torch.randn(3, 1, 16384)
print(x.shape)
mrd_output, mpd_output = model(x)
for features, score in mpd_output:
for feat in features:
print(feat.shape)
print(score.shape)
pytorch_total_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
print(pytorch_total_params)
| 2.3125 | 2 |
Python Fundamentals/Regular Expressions/Lab/Task01_02.py | IvanTodorovBG/SoftUni | 1 | 12768270 | import re
data = input()
pattern = r"\b([A-Z][a-z]+)\s([A-Z][a-z]+)\b"
matches = re.finditer(pattern, data)
for match in matches:
print(match.group(), end=" ")
| 3.921875 | 4 |
kindeditor/__version__.py | waketzheng/django-kindeditor | 4 | 12768271 | __title__ = "django-kindeditor"
__description__ = "Django admin KindEditor integration."
__url__ = "https://github.com/waketzheng/django-kindeditor"
__version__ = "0.3.0"
__author__ = "<NAME>"
__author_email__ = "<EMAIL>"
__license__ = "MIT"
__copyright__ = "Copyright 2019 Waket Zheng"
| 1.15625 | 1 |
sum/rankingTFIDF.py | zhangyuchen584/pushkin_gs | 0 | 12768272 | # -*- coding: utf-8 -*-
__author__ = "Yuchen"
__aim__ = 'rank top sentences in one topic'
__testCase__ = "../test/test_rankingTFIDF.py"
from sklearn.feature_extraction.text import CountVectorizer,TfidfTransformer
import sys
import argparse
import numpy as np
from termcolor import colored
from sklearn.metrics.pairwise import cosine_similarity
import operator
sys.path.append(r"../..")
from pushkin_gs.sum import tfidf_contentWords
class TFIDF(object):
def __init__(self, train_data, contentWords, topN, targCorpus):
"""
:param train_data: in 'tfidf_contentWords.py' file. after processing step, return a 'targData' dataset, use it to train sentence_tfidf_score
:param contentWords: in 'tfidf_contentWords.py' file. get 'contentWords' for each topic
:param topN: N sentence to summary the doc
:param targCorpus: return top N sentence from init corpus
"""
self.train_data = train_data
self.contentWords = contentWords
self.topN = topN
self.targCorpus = targCorpus
def SentRankTFIDF(self):
"""
:return: tfidfArray: [[0.12, 0.99, 0.24]
[0.4, 0.3, 0.4, 0.33, ..]...]
"""
"""#tfidf
#根据bag of words的原理计算corpus的词频矩阵,把每个句子(即矩阵的每一行)看做一个vector,计算每个vector(句子)在全部corpus中的tfidf值,每个句子的tfidf值是矩阵的每个行向量
"""
print ("func: SentRankTFIDF")
# convert corpus to term(word)_vectors
vectorizer = CountVectorizer()
# calculate appear times for each word
term_freq_matrix = vectorizer.fit_transform(self.train_data)
# get all terms(words) from corpus
termList = vectorizer.get_feature_names()
# 将词频矩阵term_freq_matrix统计成TF-IDF值
# calculate tfidf value for each sentence using term_freq_matrix
transformer = TfidfTransformer()
tfidf = transformer.fit_transform(term_freq_matrix)
# tfidf[i][j] is sentence[i]'s tfidf value
# 查看数据结构 tfidf[i][j]表示i类文本中的tf-idf权重
tfidfArray = tfidf.toarray()
# print (tfidf.toarray())
"""#claculate sentence score
##only summing tfidf values where the words belong to contentWords##
根据上面求得的sentence tfidf矩阵(tfidfArray),加和求每一行(每个句子)的tfidf value,
不是全部相加,只是把代表content words的值加起来
Finally, 每个句子的tfidf分数除以整个文章tfidf总分数,即是该句子的ranking(sentRanking[i] = sentValueList[i]/docTfidfScore)
"""
# content words in each sentence
contWoEachSent = [[w for w in self.contentWords if w in sent.lower().split()]
for sent in self.train_data]
# content words index(termList) in each sentence
contWoIndex = [[[termList.index(w)] for w in self.contentWords if w in sent.lower().split()]
for sent in self.train_data]
print (' content words in each sentence',contWoEachSent,'\n','content words index in each sent',contWoIndex)
# calculate tfidf value for each sentence, return a score list for all sentence(sentValueList)
sentValueList = []
for i,index in enumerate(contWoIndex):
sentValue = sum(tfidfArray[i,index])
sentValueList.append(float(sentValue))
print (' sentValueList',sentValueList)
# sentence ranking #normalization
sentRanking = [value/max(sentValueList) for value in sentValueList]
sentRanking = np.array(sentRanking)
# print ("sentRanking",sentRanking[np.argsort(-sentRanking)])
topNSent = [self.targCorpus[rank] for rank in np.argsort(-sentRanking)[:-1]]
topNProcess = [self.train_data[rank] for rank in np.argsort(-sentRanking)[:-1]]
dicTop = np.c_[sentRanking[np.argsort(-sentRanking)[:-1]],topNProcess,topNSent]
print (' sent with score',dicTop[:2])
print ("....")
print ('-'*200)
self.dicTop = dicTop
return dicTop
# calculate Similarity score each sentence with whole documents
def calculateSimilarity(self, sentence, doc):
if doc == []:
return 0
vocab = {}
for word in sentence[:-1].split():
vocab[word] = 0
docInOneSentence = ''
for t in doc:
docInOneSentence += (t + ' ')
for word in t[:-1].split():
vocab[word] = 0
cv = CountVectorizer(vocabulary=vocab.keys())
docVector = cv.fit_transform([docInOneSentence])
sentenceVector = cv.fit_transform([sentence])
return cosine_similarity(docVector, sentenceVector)[0][0]
def MMR(self, dicTopSentence):
print("func: MMR")
##惩罚因子
##score = a * i[2] + (1 - a) * similarity(i[sentence], (i - 1)[sentence])
n = 20 * len(self.targCorpus) / 100
alpha = 0.5
summarySet = []
temset = []
while n > 0:
mmr = {}
for sentence in dicTopSentence:
if not sentence[1] in temset:
# print (self.calculateSimilarity(sentence[1],summarySet))
mmr[sentence[1]] = alpha * float(sentence[0]) - (1 - alpha) * self.calculateSimilarity(sentence[1], temset)
selected = max(mmr.items(), key=operator.itemgetter(1))[0]
# print (selected)
temset.append(selected)
n -= 1
for temsents in temset:
summarySet.append(''.join([sent[2] for sent in self.dicTop if sent[1] == temsents]))
print ('\nTotal Sentences', colored(len(self.train_data),'red'))
print ('Top', colored(len(summarySet),'red') ,'sentences:')
for sent in enumerate(summarySet):
print (sent)
print ("**"*100)
return summarySet
def main():
"""
python rankingTFIDF.py --topic bmt_2.txt --contentWordNumber 100
:predefine:
:--allData: X.txt file, which contain (target1 polarity1\tsent1\ntarget2 polarity2\tsent2\n )
:--topic: bmt_0.txt, which contain (sent1 sent2 ... sentn)
"""
parser = argparse.ArgumentParser()
parser.add_argument('--topic', default='', help="target topic")
parser.add_argument('--contentWordNumber', default='', help="threshold for content Word Number")
parser.add_argument('--returnNSents', default='', help="top N sentences")
args = parser.parse_args()
targetTweets, targData, contentWords = tfidf_contentWords.main()
for key in targData:
trainData = targData[key].split(".")
# init corpus: finally return top N sentence from init corpus
for key in targetTweets:
initCorpus = targetTweets[key].split('\n')
instance = TFIDF(trainData, contentWords, args.returnNSents, initCorpus)
topSent = instance.SentRankTFIDF()
instance.MMR(topSent)
if __name__ == '__main__':
"""
python rankingTFIDF.py --topic bmt_2.txt --contentWordNumber 100 (--returnNSents 2)
"""
main() | 2.890625 | 3 |
telegram_payment_bot/member/members_payment_getter.py | ebellocchia/telegram_payment_bot | 3 | 12768273 | # Copyright (c) 2021 <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# Imports
#
from typing import Any, Dict, Optional
import pyrogram
from telegram_payment_bot.config.configurable_object import ConfigurableObject
from telegram_payment_bot.logger.logger import Logger
from telegram_payment_bot.misc.chat_members import ChatMembersList, ChatMembersGetter
from telegram_payment_bot.misc.helpers import MemberHelper
from telegram_payment_bot.misc.user import User
from telegram_payment_bot.payment.payments_loader_base import PaymentsLoaderBase
from telegram_payment_bot.payment.payments_loader_factory import PaymentsLoaderFactory
from telegram_payment_bot.payment.payments_data import SinglePayment, PaymentsData
#
# Classes
#
# Members payment getter class
class MembersPaymentGetter:
client: pyrogram.Client
config: ConfigurableObject
logger: Logger
payments_loader: PaymentsLoaderBase
payments_cache: Optional[PaymentsData]
single_payment_cache: Optional[Dict[str, Any]]
# Constructor
def __init__(self,
client: pyrogram.Client,
config: ConfigurableObject,
logger: Logger) -> None:
self.client = client
self.config = config
self.logger = logger
self.payments_loader = PaymentsLoaderFactory(config, logger).CreateLoader()
self.payments_cache = None
self.single_payment_cache = None
self.ReloadPayment()
# Reload payment
def ReloadPayment(self):
self.payments_cache = None
self.single_payment_cache = None
# Get all members with OK payment
def GetAllMembersWithOkPayment(self,
chat: pyrogram.types.Chat) -> ChatMembersList:
# Get all payments
payments = self.__GetAllPayments()
# Filter chat members
return ChatMembersGetter(self.client).FilterMembers(
chat,
lambda member: (
MemberHelper.IsValidMember(member) and
member.user.username is not None and
not payments.IsExpiredByUser(User.FromUserObject(self.config, member.user))
)
)
# Get all members with expired payment
def GetAllMembersWithExpiredPayment(self,
chat: pyrogram.types.Chat) -> ChatMembersList:
# Get all payments
payments = self.__GetAllPayments()
# For safety: if no data was loaded, no user is expired
if payments.Empty():
return ChatMembersList()
# Filter chat members
return ChatMembersGetter(self.client).FilterMembers(
chat,
lambda member: (
MemberHelper.IsValidMember(member) and
(member.user.username is None or
payments.IsExpiredByUser(User.FromUserObject(self.config, member.user)))
)
)
# Get all members with expiring payment
def GetAllMembersWithExpiringPayment(self,
chat: pyrogram.types.Chat,
days: int) -> ChatMembersList:
# Get all payments
payments = self.__GetAllPayments()
# For safety: if no data was loaded, no user is expired
if payments.Empty():
return ChatMembersList()
# Filter chat members
return ChatMembersGetter(self.client).FilterMembers(
chat,
lambda member: (
MemberHelper.IsValidMember(member) and
(member.user.username is None or
payments.IsExpiringInDaysByUser(User.FromUserObject(self.config, member.user), days))
)
)
# Get all emails with expired payment
def GetAllEmailsWithExpiredPayment(self) -> PaymentsData:
return self.__GetAllPayments().FilterExpired()
# Get all emails with expiring payment in the specified number of days
def GetAllEmailsWithExpiringPayment(self,
days: int) -> PaymentsData:
return self.__GetAllPayments().FilterExpiringInDays(days)
# Get if single member is expired
def IsSingleMemberExpired(self,
chat: pyrogram.types.Chat,
user: pyrogram.types.User) -> bool:
# If the user is not in the chat, consider payment as not expired
chat_members = ChatMembersGetter(self.client).GetSingle(chat, user)
if chat_members is None:
return False
# Get single payment
single_payment = self.__GetSinglePayment(user)
# If the user is not in the payment data, consider payment as expired
return single_payment.IsExpired() if single_payment is not None else True
# Get all payments
def __GetAllPayments(self) -> PaymentsData:
# Load only the first time
if self.payments_cache is None:
self.payments_cache = self.payments_loader.LoadAll()
return self.payments_cache
# Get single payment
def __GetSinglePayment(self,
user: pyrogram.types.User) -> Optional[SinglePayment]:
# Load only the first time
if self.single_payment_cache is None or self.single_payment_cache["user_id"] != user.id:
self.single_payment_cache = {
"payment": self.payments_loader.LoadSingleByUser(User.FromUserObject(self.config, user)),
"user_id": user.id,
}
return self.single_payment_cache["payment"]
| 1.609375 | 2 |
python/GafferTest/TestRunner.py | Tuftux/gaffer | 31 | 12768274 | ##########################################################################
#
# Copyright (c) 2019, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of <NAME> nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import functools
import json
import time
import collections
import Gaffer
# TestRunner capable of measuring performance of certain
# tests and failing them if they contain regressions compared
# to previous results.
class TestRunner( unittest.TextTestRunner ) :
def __init__( self, previousResultsFile = "" ) :
unittest.TextTestRunner.__init__(
self,
verbosity = 2,
)
self.__previousResultsFile = previousResultsFile
# Decorator used to annotate tests which measure performance.
class PerformanceTestMethod( object ) :
def __init__( self, repeat = 3, acceptableDifference = 0.01 ) :
self.__repeat = repeat
self.__acceptableDifference = acceptableDifference
# Called to return the decorated method.
def __call__( self, method ) :
@functools.wraps( method )
def wrapper( *args, **kw ) :
timings = []
for i in range( 0, self.__repeat ) :
Gaffer.ValuePlug.clearCache() # Put each iteration on an equal footing
TestRunner.PerformanceScope._total = None
t = time.time()
result = method( *args, **kw )
totalTime = time.time() - t
scopedTime = TestRunner.PerformanceScope._total
timings.append( scopedTime if scopedTime is not None else totalTime )
# Stash timings so they can be recovered
# by TestRunner.__Result.
args[0].timings = timings
# If previous timings are available, then
# compare against them and throw if a regression
# is detected.
previousTimings = getattr( args[0], "previousTimings" )
if previousTimings :
args[0].assertLessEqual( min( timings ), min( previousTimings ) + self.__acceptableDifference )
return result
wrapper.performanceTestMethod = True
return wrapper
# Context manager used to time only specific blocks
# within a PerformanceTestMethod.
class PerformanceScope( object ) :
# Protected to allow access by PerformanceTestMethod.
_total = None
def __enter__( self ) :
self.__startTime = time.time()
def __exit__( self, type, value, traceBack ) :
t = time.time() - self.__startTime
if TestRunner.PerformanceScope._total is not None :
TestRunner.PerformanceScope._total += t
else :
TestRunner.PerformanceScope._total = t
def run( self, test ) :
result = unittest.TextTestRunner.run( self, test )
result.writePerformance()
return result
# Adds a skip decorator to all non-performance-related tests.
@staticmethod
def filterPerformanceTests( test ) :
if isinstance( test, unittest.TestSuite ) :
for t in test :
TestRunner.filterPerformanceTests( t )
elif isinstance( test, unittest.TestCase ) :
testMethod = getattr( test, test._testMethodName )
if not getattr( testMethod, "performanceTestMethod", False ) :
setattr(
test, test._testMethodName,
unittest.skip( "Not a performance test" )( testMethod )
)
def _makeResult( self ) :
return self.__Result(
self.stream, self.descriptions, self.verbosity,
previousResultsFile = self.__previousResultsFile
)
class __Result( unittest.TextTestResult ) :
def __init__( self, stream, descriptions, verbosity, previousResultsFile ) :
unittest.TextTestResult.__init__( self, stream, descriptions, verbosity )
self.__results = collections.OrderedDict()
if previousResultsFile :
with open( previousResultsFile ) as f :
self.__previousResults = json.load( f )
else :
self.__previousResults = {}
self.__performanceImprovements = []
# Methods unique to __Result
def save( self, fileName ) :
with open( fileName, "w" ) as f :
json.dump( self.__results, f, indent = 4 )
def writePerformance( self ) :
if not len( self.__performanceImprovements ) :
return
self.stream.write( "{0}\n".format( self.separator2 ) )
self.stream.write( "{n} Performance Improvement{s} :\n\n".format(
n = len( self.__performanceImprovements ),
s = "s" if len( self.__performanceImprovements ) > 1 else ""
) )
for s in self.__performanceImprovements :
self.stream.write( "{}\n".format( s ) )
# Overrides for TextTestResult methods
def startTest( self, test ) :
previousResults = self.__previousResults.get( str( test ), {} )
test.previousTimings = previousResults.get( "timings", [] )
unittest.TextTestResult.startTest( self, test )
def addSuccess( self, test ) :
unittest.TextTestResult.addSuccess( self, test )
timings = getattr( test, "timings", None )
if timings and test.previousTimings :
new = min( timings )
old = min( test.previousTimings )
reduction = 100 * (old-new)/old
if reduction > 2 :
self.__performanceImprovements.append(
"- {test} : was {old:.2f}s now {new:.2f}s ({reduction:.0f}% reduction)".format(
test = str( test), old = old, new = new, reduction = reduction
)
)
self.__addResult( test, "success" )
def addError( self, test, error ) :
unittest.TextTestResult.addError( self, test, error )
self.__addResult( test, "error" )
def addFailure( self, test, error ) :
unittest.TextTestResult.addFailure( self, test, error )
self.__addResult( test, "failure" )
def wasSuccessful( self ) :
return unittest.TextTestResult.wasSuccessful( self )
# Private methods
def __addResult( self, test, result ) :
d = {
"result" : result
}
timings = getattr( test, "timings", None )
if timings :
d["timings"] = timings
self.__results[str(test)] = d
| 0.945313 | 1 |
mongoDB_management.py | jcamstan3370/MachineLearningPerovskites | 6 | 12768275 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Jared
"""
import pandas as pd
import pymongo
import json
from os import listdir
from os.path import isfile, join
import multiprocessing as mp
import numpy as np
import dbConfig
from builder.dummyCrystalBuilder import processDummyCrystals
from ml.feature import getCompFeature
def import_content(db, filename, collection):
data = pd.read_csv(filename)
data = data.dropna()
data_json = json.loads(data.to_json(orient='records'))
db[collection].insert_many(data_json)
def update_database(db, folder, collection):
filepaths = [f for f in listdir(folder) if
(isfile(join(folder, f)) and f.endswith('.csv'))]
db[collection].delete_many({})
for filename in filepaths:
import_content(db, folder + filename, collection)
print('Loading ' + str(db[collection].count()) +
' items from ' + collection + '...')
db[collection].aggregate([
{
"$lookup": {
"from": collection,
"localField": "crystal_id",
"foreignField": "crystal_id",
"as" : "fromItems"
}
},
{
"$replaceRoot": { "newRoot": { "$mergeObjects":
[ { "$arrayElemAt": [ "$fromItems", 0 ] },
"$$ROOT" ] } }
},
{ "$project": { "fromItems": 0 } },
{ "$out": collection + "_aggregated" }
])
print('Done.')
def parallelize(df, numProcesses, func):
df_split = np.array_split(df, numProcesses)
pool = mp.Pool(processes=numProcesses)
results = pool.map(func, df_split)
pool.close()
pool.join()
results_df = pd.concat(results)
return results_df
def process_features(db, **kwargs):
df = pd.DataFrame(list(db['qw_outputs_aggregated'].find()))
if dbConfig.dummy == True:
df = processDummyCrystals(df)
print('Processing Features... ')
df = df.drop(df[df['nIterations'] >= 201].index).copy()
if kwargs['numProcesses'] > 1:
feature = parallelize(df, kwargs['numProcesses'], getCompFeature)
else:
feature = getCompFeature(df)
print('Len features', len(feature.columns))
if dbConfig.saveFeatures == True:
feature.to_csv(dbConfig.saveFeaturesPath +
dbConfig.saveFeaturesFile, index=False)
print('Done.')
def getDB():
client = pymongo.MongoClient(dbConfig.host, dbConfig.port)
return(client['perovskites'])
def main():
db = getDB()
update_database(db, dbConfig.crystalDBFolder, 'qw_outputs')
process_features(db, numProcesses = 4)
update_database(db, dbConfig.featureDBFolder, 'features')
if __name__ == "__main__":
main()
| 2.28125 | 2 |
ama_speech.py | UncleEngineer/AMAShop | 2 | 12768276 | #ama_speech.py
import speech_recognition as spr
from gtts import gTTS
from playsound import playsound
from googletrans import Translator
import googletrans
import time
def RecognizeAndSpeech(sound1='th',sound2='zh-cn'):
time.sleep(2)
print('Recognizing..')
#print(googletrans.LANGUAGES)
##### RECOGNITION ######
rec = spr.Recognizer()
with spr.Microphone() as speak:
audio = rec.listen(speak)
try:
result = rec.recognize_google(audio,language=sound1)
print('Stop...')
print(result)
except:
print('Error We can not recognize your sound')
result = 'ERROR'
##### TRANSLATOR ######
LAM = Translator()
word = LAM.translate(result,dest=sound2)
print('Meaning: ',word.text)
##### TEXT TO SPEECH ######
tts = gTTS(text=word.text, lang=sound2)
tts.save('result.mp3')
playsound('result.mp3')
RecognizeAndSpeech('th','zh-cn')
RecognizeAndSpeech('th','de')
| 3.1875 | 3 |
src/kgmk/sample/enum_sample.py | kagemeka/python | 0 | 12768277 | from enum import (
Enum,
auto,
unique,
IntEnum,
Flag,
IntFlag,
)
import enum
@unique
class Color(Enum):
RED = auto()
BLUE = auto()
YELLOW = "yellow"
CYAN = 3
GREEN = auto()
print(Color.RED.value)
print(Color.GREEN.value) | 3.65625 | 4 |
tests_functional/test_config_sync/test_config_sync.py | brailovskiy/grpc-test | 0 | 12768278 | import pytest
import allure
from hamcrest import *
from shared.data_generators import Generators
@allure.issue("SAN-71", "Drafts")
@pytest.mark.parametrize('d_user', ["2 users"], indirect=True)
class TestConfigSync:
""" Tests for synchronization, setting and getting configs
"""
@allure.title("Test for drafts")
@allure.testcase("XTE-11", "Test for drafts")
def test_drafts(self, d_user, update1):
"""
Test for drafts
"""
with allure.step("User1 send draft"):
updates = update1
msg = Generators.random_text_message()
d_user.send_draft(d_user.u1, d_user.outpeer1, draft_type='drafts_PRIVATE_', msg=msg)
params = d_user.get_parameters(d_user.u1)
print(params)
with allure.step("User1 gets draft, check draft body and chat id"):
for update in updates:
print(update)
if update.unboxed_update.HasField('updateParameterChanged'):
key = list(update.unboxed_update.updateParameterChanged.key.split('\n'))
id = key[1].split(' ')[1]
assert_that(int(id), equal_to(d_user.u2.user_info.user.id))
assert_that(update.unboxed_update.updateParameterChanged.value.value, equal_to(msg))
break
with allure.step("User1 delete draft"):
d_user.send_draft(d_user.u1, d_user.outpeer1, draft_type='drafts_PRIVATE_', msg=None)
with allure.step("User1 gets update with deletion of draft"):
for update in updates:
if update.unboxed_update.HasField('updateParameterChanged'):
assert_that(update.unboxed_update.updateParameterChanged.value.value is '')
break
with allure.step("Draft of User1 should be empty"):
params = d_user.get_parameters(d_user.u1)
assert_that(getattr(params.parameters[0], 'key'), is_not(''))
assert_that(getattr(params.parameters[0], 'value'), equal_to(''))
| 1.945313 | 2 |
src/image_processor/bounding_box.py | salieri/2d-enhance | 0 | 12768279 |
class BoundingBox:
x: int
y: int
x2: int
y2: int
cx: int
cy: int
width: int
height: int
def __init__(self, x: int, y: int, width: int, height: int):
self.x = x
self.y = y
self.width = width
self.height = height
self.x2 = x + width - 1
self.y2 = y + height - 1
self.cx = round(x + width / 2)
self.cy = round(y + height / 2)
| 3.515625 | 4 |
temperature_program.py | denislavdenov/python-book | 0 | 12768280 | <reponame>denislavdenov/python-book
#!/usr/bin/env python3
def convert_to_celsius(fahrenheit: float) -> float:
return (fahrenheit - 32.0) * 5.0 / 9.0
def above_freezing(celsius: float) -> bool:
return celsius > 0
fahrenheit = float(input('Enter the temperature in degrees Fahrenheit: '))
celsius = convert_to_celsius(fahrenheit)
print(celsius)
if above_freezing(celsius):
print('It is above freezing.')
else:
print('It is below freezing.') | 4.25 | 4 |
samples/sampleLib.py | big-main/vm-automation | 1 | 12768281 | import json
def checkForProcess(vmObject, processName):
vmObject.updateProcList()
if processName in ' '.join(vmObject.procList):
return True
else:
return False
def loadJsonFile(fileName):
try:
fileObject = open(fileName, 'r')
fileStr = fileObject.read()
fileObject.close()
except IOError as e:
print("UNABLE TO OPEN FILE: " + str(fileName) + '\n' + str(e))
return None
try:
fileDic = json.loads(fileStr)
except Exception as e:
print("UNABLE TO PARSE FILE: " + str(fileName) + '\n' + str(e))
return None
return fileDic
def makeVmList(vmServer, keywordArg, fileArg):
vmList = []
if fileArg != None:
vmFileObj = open(fileArg, 'r')
desiredVms = vmFileObj.read().splitlines()
vmFileObj.close()
vmServer.enumerateVms()
for vm in vmServer.vmList:
if vm.vmName in desiredVms:
vmList.append(vm)
if keywordArg != None:
vmServer.enumerateVms()
for vm in vmServer.vmList:
if keywordArg in vm.vmName:
vmList.append(vm)
return vmList
def waitForProcess(vmObject, procName, timeout = 600):
retVal = False
waitCount = 1
if timeout > 0:
waitCount = timeout/5
for i in range(waitCount):
vmObject.updateProcList()
if procName in ' '.join(vmObject.procList):
retVal = True
break
time.sleep(5)
return retVal
| 2.40625 | 2 |
exercisio23.py | bruno194/EXERCICIOS | 1 | 12768282 | <reponame>bruno194/EXERCICIOS
numero = str(input('digite um numero'))
print('unidade: {}'.format(numero[1]))
print('dezena: {}'.format(numero[2]))
print('centena: {} '.format(numero[3]))
print('unidade de milhar: {}'.format(numero[4]))
| 3.875 | 4 |
src/Statistics_TechIndicators/CalcTechIndictors.py | zmcx16/AxisTradeCult | 35 | 12768283 | <reponame>zmcx16/AxisTradeCult<gh_stars>10-100
import pandas
import numpy
def GetRollingMean(values, window):
return pandas.Series.rolling(values, window = window, center = False).mean()
def GetRollingStd(values, window):
return pandas.Series.rolling(values, window = window, center = False).std()
def GetRollingMax(values, window):
return pandas.Series.rolling(values, window = window, center = False).max()
def GetRollingMin(values, window):
return pandas.Series.rolling(values, window = window, center = False).min()
def GetEMA(values, window):
return values.ewm(span=window).mean()
def GetSMMA(values, window):
return values.ewm(ignore_na=False, alpha=1.0 / window, min_periods=0, adjust=True).mean()
def GetDMA(values, short_window=10, long_window=50):
return GetRollingMean(values, short_window)-GetRollingMean(values, long_window)
def GetRSV(close_values, high_values, low_values, period):
hight_max = pandas.Series.rolling(high_values, window = period, center = False).max()
low_min = pandas.Series.rolling(low_values, window = period, center = False).min()
return (close_values - low_min) / (hight_max - low_min) * 100
def GetRSI(values, window):
values_shift_1 = values.shift(1)
d = values - values_shift_1
p = (d + d.abs()) / 2
n = (-d + d.abs()) / 2
RS = GetEMA(p, window = window) / GetEMA(n, window = window)
return 100 - 100 / (1.0 + RS)
def GetMACD(values, fast_period, slow_period, signal_period):
DIF = GetEMA(values, fast_period) - GetEMA(values, slow_period)
DEM = GetEMA(DIF, signal_period)
OSC = DIF - DEM
return DIF, DEM, OSC
def GetWR(close_values, high_values, low_values, window):
hn = pandas.Series.rolling(high_values, window = window, center = False).max()
ln = pandas.Series.rolling(low_values, window = window, center = False).min()
return (close_values-hn)/(hn-ln)*100
def GetCCI(close_values, high_values, low_values, window):
TP = (high_values+low_values+close_values)/3
SMA = pandas.Series.rolling(TP, window = window, center = False).mean()
MD = TP.rolling(window=window, center=False).apply(lambda x: numpy.fabs(x - x.mean()).mean())
return (TP - SMA) / (.015 * MD)
def GetTR(close_values, high_values, low_values):
prev_close = close_values.shift(1)
prev_close.fillna(method='bfill', inplace=True)
c1 = high_values - low_values
c2 = numpy.abs(high_values - prev_close)
c3 = numpy.abs(low_values - prev_close)
return pandas.Series(numpy.max((c1, c2, c3), axis=0), index = close_values.index)
def GetATR(close_values, high_values, low_values, window):
TR = GetTR(close_values, high_values, low_values)
return GetSMMA(TR, window)
def GetDMI(close_values, high_values, low_values, window=14):
UpMove = high_values - high_values.shift(1)
DownMove = low_values.shift(1) - low_values
pDM = pandas.Series(numpy.zeros(shape=(close_values.count()),dtype=float), index=close_values.index)
nDM = pandas.Series(numpy.zeros(shape=(close_values.count()),dtype=float), index=close_values.index)
for i,v in close_values.items():
if UpMove[i] > DownMove[i] and UpMove[i] > 0:
pDM[i] = UpMove[i]
else:
pDM[i] = 0
if DownMove[i] > UpMove[i] and DownMove[i] > 0:
nDM[i] = DownMove[i]
else:
nDM[i] = 0
pDI = 100 * GetEMA(pDM, window) / GetATR(close_values, high_values, low_values, window)
nDI = 100 * GetEMA(nDM, window) / GetATR(close_values, high_values, low_values, window)
DX = 100 * numpy.fabs((pDI - nDI) / (pDI + nDI))
ADX = GetEMA(DX, window)
ADXR = GetEMA(ADX, window)
return pDI, nDI, ADX, ADXR
def GetTEMA(values, window):
single = GetEMA(values, window)
double = GetEMA(single, window)
triple = GetEMA(double, window)
return 3*single - 3*double + triple
def GetVR(close_values, volume_values, window):
prev_close = close_values.shift(1)
prev_close.fillna(method='bfill', inplace=True)
av = pandas.Series(numpy.where(close_values-prev_close>0, volume_values, 0), index=close_values.index)
avs = pandas.Series.rolling(av, window = window, center = False).sum()
bv = pandas.Series(numpy.where(close_values-prev_close<0, volume_values, 0), index=close_values.index)
bvs = pandas.Series.rolling(bv, window = window, center = False).sum()
cv = pandas.Series(numpy.where(close_values-prev_close==0, volume_values, 0), index=close_values.index)
cvs = pandas.Series.rolling(cv, window = window, center = False).sum()
return (avs + cvs / 2) / (bvs + cvs / 2) * 100
def GetRollingVar(values, window):
return values.rolling(window = window).var()
def GetLogReturn(values):
return numpy.log(values) - numpy.log(values.shift(1))
def GetDiff(values):
return values.diff()
def GetChange(values):
return values.pct_change() * 100
def GetBollingerBands(rm, rstd):
upper_band = rm + rstd * 2
lower_band = rm - rstd * 2
return upper_band, lower_band
def GetKDJ(close_values, high_values, low_values, period):
rsv = GetRSV(close_values, high_values, low_values, period)
k = pandas.Series(_calc_kd(rsv))
d = pandas.Series(_calc_kd(k))
j = 3 * d - 2 * k
return k, d, j
def _calc_kd(val, weight = 1 / 3.0):
"""
k[0]=50
for i in range(1,len(val)):
if pandas.isnull(val[i]):
k[i] = 50
else:
k[i] = 2/3.0*k[i-1]+1/3.0*val[i]
return k
"""
k = 50.0
for i in weight * val:
if pandas.isnull(i):
yield k
else:
k = (1 - weight) * k + i
yield k
| 2.234375 | 2 |
db_migrations/versions/7404cf411ca9_init.py | HBPSP8Repo/i2b2-setup | 0 | 12768284 | <filename>db_migrations/versions/7404cf411ca9_init.py
"""init
Revision ID: 7404cf411ca9
Create Date: 2017-02-16 11:22:22.388919
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = None
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('code_lookup',
sa.Column('table_cd', sa.VARCHAR(length=100), nullable=False),
sa.Column('column_cd', sa.VARCHAR(length=100), nullable=False),
sa.Column('code_cd', sa.VARCHAR(length=50), nullable=False),
sa.Column('name_char', sa.VARCHAR(length=650), nullable=True),
sa.Column('lookup_blob', sa.TEXT(), nullable=True),
sa.Column('upload_date', sa.TIMESTAMP(), nullable=True),
sa.Column('update_date', sa.TIMESTAMP(), nullable=True),
sa.Column('download_date', sa.TIMESTAMP(), nullable=True),
sa.Column('import_date', sa.TIMESTAMP(), nullable=True),
sa.Column('sourcesystem_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('upload_id', sa.INTEGER(), nullable=True),
sa.PrimaryKeyConstraint('table_cd', 'column_cd', 'code_cd')
)
op.create_index(op.f('ix_code_lookup_name_char'), 'code_lookup', ['name_char'], unique=False)
op.create_index(op.f('ix_code_lookup_upload_id'), 'code_lookup', ['upload_id'], unique=False)
op.create_table('concept_dimension',
sa.Column('concept_path', sa.VARCHAR(length=700), nullable=False),
sa.Column('concept_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('name_char', sa.VARCHAR(length=2000), nullable=True),
sa.Column('concept_blob', sa.TEXT(), nullable=True),
sa.Column('update_date', sa.TIMESTAMP(), nullable=True),
sa.Column('download_date', sa.TIMESTAMP(), nullable=True),
sa.Column('import_date', sa.TIMESTAMP(), nullable=True),
sa.Column('sourcesystem_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('upload_id', sa.INTEGER(), nullable=True),
sa.PrimaryKeyConstraint('concept_path')
)
op.create_index(op.f('ix_concept_dimension_upload_id'), 'concept_dimension', ['upload_id'], unique=False)
op.create_table('encounter_mapping',
sa.Column('encounter_ide', sa.VARCHAR(length=200), nullable=False),
sa.Column('encounter_ide_source', sa.VARCHAR(length=50), nullable=False),
sa.Column('project_id', sa.VARCHAR(length=50), nullable=False),
sa.Column('encounter_num', sa.INTEGER(), nullable=False),
sa.Column('patient_ide', sa.VARCHAR(length=200), nullable=False),
sa.Column('patient_ide_source', sa.VARCHAR(length=50), nullable=False),
sa.Column('encounter_ide_status', sa.VARCHAR(length=50), nullable=True),
sa.Column('upload_date', sa.TIMESTAMP(), nullable=True),
sa.Column('update_date', sa.TIMESTAMP(), nullable=True),
sa.Column('download_date', sa.TIMESTAMP(), nullable=True),
sa.Column('import_date', sa.TIMESTAMP(), nullable=True),
sa.Column('sourcesystem_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('upload_id', sa.INTEGER(), nullable=True),
sa.PrimaryKeyConstraint('encounter_ide', 'encounter_ide_source', 'project_id', 'patient_ide', 'patient_ide_source')
)
op.create_index('em_idx_encpath', 'encounter_mapping', ['encounter_ide', 'encounter_ide_source', 'patient_ide', 'patient_ide_source', 'encounter_num'], unique=False)
op.create_index(op.f('ix_encounter_mapping_encounter_num'), 'encounter_mapping', ['encounter_num'], unique=False)
op.create_index(op.f('ix_encounter_mapping_upload_id'), 'encounter_mapping', ['upload_id'], unique=False)
op.create_table('modifier_dimension',
sa.Column('modifier_path', sa.VARCHAR(length=700), nullable=False),
sa.Column('modifier_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('name_char', sa.VARCHAR(length=2000), nullable=True),
sa.Column('modifier_blob', sa.TEXT(), nullable=True),
sa.Column('update_date', sa.TIMESTAMP(), nullable=True),
sa.Column('download_date', sa.TIMESTAMP(), nullable=True),
sa.Column('import_date', sa.TIMESTAMP(), nullable=True),
sa.Column('sourcesystem_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('upload_id', sa.INTEGER(), nullable=True),
sa.PrimaryKeyConstraint('modifier_path')
)
op.create_index(op.f('ix_modifier_dimension_upload_id'), 'modifier_dimension', ['upload_id'], unique=False)
op.create_table('observation_fact',
sa.Column('encounter_num', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('patient_num', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('concept_cd', sa.VARCHAR(length=50), nullable=False),
sa.Column('provider_id', sa.VARCHAR(length=50), nullable=False),
sa.Column('start_date', sa.TIMESTAMP(), nullable=False),
sa.Column('modifier_cd', sa.VARCHAR(length=100), server_default='@', nullable=False),
sa.Column('instance_num', sa.INTEGER(), server_default='1', nullable=False),
sa.Column('valtype_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('tval_char', sa.VARCHAR(length=255), nullable=True),
sa.Column('nval_num', sa.DECIMAL(precision=18, scale=5), nullable=True),
sa.Column('valueflag_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('quantity_num', sa.DECIMAL(precision=18, scale=5), nullable=True),
sa.Column('units_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('end_date', sa.TIMESTAMP(), nullable=True),
sa.Column('location_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('observation_blob', sa.TEXT(), nullable=True),
sa.Column('confidence_num', sa.DECIMAL(precision=18, scale=5), nullable=True),
sa.Column('update_date', sa.TIMESTAMP(), nullable=True),
sa.Column('download_date', sa.TIMESTAMP(), nullable=True),
sa.Column('import_date', sa.TIMESTAMP(), nullable=True),
sa.Column('sourcesystem_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('upload_id', sa.INTEGER(), nullable=True),
sa.Column('text_search_index', sa.INTEGER(), nullable=False),
sa.PrimaryKeyConstraint('encounter_num', 'patient_num', 'concept_cd', 'provider_id', 'start_date', 'modifier_cd', 'instance_num'),
sa.UniqueConstraint('text_search_index')
)
op.create_index(op.f('ix_observation_fact_concept_cd'), 'observation_fact', ['concept_cd'], unique=False)
op.create_index(op.f('ix_observation_fact_modifier_cd'), 'observation_fact', ['modifier_cd'], unique=False)
op.create_index(op.f('ix_observation_fact_sourcesystem_cd'), 'observation_fact', ['sourcesystem_cd'], unique=False)
op.create_index(op.f('ix_observation_fact_upload_id'), 'observation_fact', ['upload_id'], unique=False)
op.create_index('of_idx_allobservation_fact', 'observation_fact', ['patient_num', 'encounter_num', 'concept_cd', 'start_date', 'provider_id', 'modifier_cd', 'instance_num', 'valtype_cd', 'tval_char', 'nval_num', 'valueflag_cd', 'quantity_num', 'units_cd', 'end_date', 'location_cd', 'confidence_num'], unique=False)
op.create_index('of_idx_encounter_patient', 'observation_fact', ['encounter_num', 'patient_num', 'instance_num'], unique=False)
op.create_index('of_idx_start_date', 'observation_fact', ['start_date', 'patient_num'], unique=False)
op.create_table('patient_dimension',
sa.Column('patient_num', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('vital_status_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('birth_date', sa.TIMESTAMP(), nullable=True),
sa.Column('death_date', sa.TIMESTAMP(), nullable=True),
sa.Column('sex_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('age_in_years_num', sa.INTEGER(), nullable=True),
sa.Column('language_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('race_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('marital_status_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('religion_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('zip_cd', sa.VARCHAR(length=10), nullable=True),
sa.Column('statecityzip_path', sa.VARCHAR(length=700), nullable=True),
sa.Column('income_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('patient_blob', sa.TEXT(), nullable=True),
sa.Column('update_date', sa.TIMESTAMP(), nullable=True),
sa.Column('download_date', sa.TIMESTAMP(), nullable=True),
sa.Column('import_date', sa.TIMESTAMP(), nullable=True),
sa.Column('sourcesystem_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('upload_id', sa.INTEGER(), nullable=True),
sa.PrimaryKeyConstraint('patient_num')
)
op.create_index(op.f('ix_patient_dimension_upload_id'), 'patient_dimension', ['upload_id'], unique=False)
op.create_index('pd_idx_allpatientdim', 'patient_dimension', ['patient_num', 'vital_status_cd', 'birth_date', 'death_date', 'sex_cd', 'age_in_years_num', 'language_cd', 'race_cd', 'marital_status_cd', 'income_cd', 'religion_cd', 'zip_cd'], unique=False)
op.create_index('pd_idx_dates', 'patient_dimension', ['patient_num', 'vital_status_cd', 'birth_date', 'death_date'], unique=False)
op.create_index('pd_idx_statecityzip', 'patient_dimension', ['statecityzip_path', 'patient_num'], unique=False)
op.create_table('patient_mapping',
sa.Column('patient_ide', sa.VARCHAR(length=200), nullable=False),
sa.Column('patient_ide_source', sa.VARCHAR(length=50), nullable=False),
sa.Column('patient_num', sa.INTEGER(), nullable=True),
sa.Column('patient_ide_status', sa.VARCHAR(length=50), nullable=True),
sa.Column('project_id', sa.VARCHAR(length=50), nullable=False),
sa.Column('upload_date', sa.TIMESTAMP(), nullable=True),
sa.Column('update_date', sa.TIMESTAMP(), nullable=True),
sa.Column('download_date', sa.TIMESTAMP(), nullable=True),
sa.Column('import_date', sa.TIMESTAMP(), nullable=True),
sa.Column('sourcesystem_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('upload_id', sa.INTEGER(), nullable=True),
sa.PrimaryKeyConstraint('patient_ide', 'patient_ide_source', 'project_id'),
sa.UniqueConstraint('patient_num')
)
op.create_table('provider_dimension',
sa.Column('provider_id', sa.VARCHAR(length=50), nullable=False),
sa.Column('provider_path', sa.VARCHAR(length=700), nullable=False),
sa.Column('name_char', sa.VARCHAR(length=850), nullable=True),
sa.Column('provider_blob', sa.TEXT(), nullable=True),
sa.Column('update_date', sa.TIMESTAMP(), nullable=True),
sa.Column('download_date', sa.TIMESTAMP(), nullable=True),
sa.Column('import_date', sa.TIMESTAMP(), nullable=True),
sa.Column('sourcesystem_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('upload_id', sa.INTEGER(), nullable=True),
sa.PrimaryKeyConstraint('provider_id', 'provider_path')
)
op.create_index(op.f('ix_provider_dimension_upload_id'), 'provider_dimension', ['upload_id'], unique=False)
op.create_index('pd_idx_name_char', 'provider_dimension', ['provider_id', 'name_char'], unique=False)
op.create_table('visit_dimension',
sa.Column('encounter_num', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('patient_num', sa.INTEGER(), autoincrement=False, nullable=False),
sa.Column('active_status_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('start_date', sa.TIMESTAMP(), nullable=True),
sa.Column('end_date', sa.TIMESTAMP(), nullable=True),
sa.Column('inout_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('location_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('location_path', sa.VARCHAR(length=900), nullable=True),
sa.Column('length_of_stay', sa.INTEGER(), nullable=True),
sa.Column('visit_blob', sa.TEXT(), nullable=True),
sa.Column('update_date', sa.TIMESTAMP(), nullable=True),
sa.Column('download_date', sa.TIMESTAMP(), nullable=True),
sa.Column('import_date', sa.TIMESTAMP(), nullable=True),
sa.Column('sourcesystem_cd', sa.VARCHAR(length=50), nullable=True),
sa.Column('upload_id', sa.INTEGER(), nullable=True),
sa.PrimaryKeyConstraint('encounter_num', 'patient_num')
)
op.create_index(op.f('ix_visit_dimension_upload_id'), 'visit_dimension', ['upload_id'], unique=False)
op.create_index('vd_idx_allvisitdim', 'visit_dimension', ['encounter_num', 'patient_num', 'inout_cd', 'location_cd', 'start_date', 'length_of_stay', 'end_date'], unique=False)
op.create_index('vd_idx_dates', 'visit_dimension', ['encounter_num', 'start_date', 'end_date'], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('vd_idx_dates', table_name='visit_dimension')
op.drop_index('vd_idx_allvisitdim', table_name='visit_dimension')
op.drop_index(op.f('ix_visit_dimension_upload_id'), table_name='visit_dimension')
op.drop_table('visit_dimension')
op.drop_index('pd_idx_name_char', table_name='provider_dimension')
op.drop_index(op.f('ix_provider_dimension_upload_id'), table_name='provider_dimension')
op.drop_table('provider_dimension')
op.drop_table('patient_mapping')
op.drop_index('pd_idx_statecityzip', table_name='patient_dimension')
op.drop_index('pd_idx_dates', table_name='patient_dimension')
op.drop_index('pd_idx_allpatientdim', table_name='patient_dimension')
op.drop_index(op.f('ix_patient_dimension_upload_id'), table_name='patient_dimension')
op.drop_table('patient_dimension')
op.drop_index('of_idx_start_date', table_name='observation_fact')
op.drop_index('of_idx_encounter_patient', table_name='observation_fact')
op.drop_index('of_idx_allobservation_fact', table_name='observation_fact')
op.drop_index(op.f('ix_observation_fact_upload_id'), table_name='observation_fact')
op.drop_index(op.f('ix_observation_fact_sourcesystem_cd'), table_name='observation_fact')
op.drop_index(op.f('ix_observation_fact_modifier_cd'), table_name='observation_fact')
op.drop_index(op.f('ix_observation_fact_concept_cd'), table_name='observation_fact')
op.drop_table('observation_fact')
op.drop_index(op.f('ix_modifier_dimension_upload_id'), table_name='modifier_dimension')
op.drop_table('modifier_dimension')
op.drop_index(op.f('ix_encounter_mapping_upload_id'), table_name='encounter_mapping')
op.drop_index(op.f('ix_encounter_mapping_encounter_num'), table_name='encounter_mapping')
op.drop_index('em_idx_encpath', table_name='encounter_mapping')
op.drop_table('encounter_mapping')
op.drop_index(op.f('ix_concept_dimension_upload_id'), table_name='concept_dimension')
op.drop_table('concept_dimension')
op.drop_index(op.f('ix_code_lookup_upload_id'), table_name='code_lookup')
op.drop_index(op.f('ix_code_lookup_name_char'), table_name='code_lookup')
op.drop_table('code_lookup')
# ### end Alembic commands ###
| 1.875 | 2 |
mysql_dbcon.py | uniaim-event-team/pullre-kun | 3 | 12768285 | import codecs
from typing import Optional
from sqlalchemy.engine import create_engine
from config import webapp_settings
from model import Session
codecs.register(
lambda name: codecs.lookup('utf8') if name == 'utf8mb4' else None)
class ConnectionPooling(object):
def __init__(self, **params):
self.engine = create_engine(webapp_settings['mysql_connection'], **params)
c = ConnectionPooling(max_overflow=50, pool_size=20, pool_recycle=3600, **webapp_settings.get('mysql_extra_param', {}))
class Connection:
execution_options = None
def __init__(self, execution_options=None):
self.execution_options = execution_options
self.s: Optional[Session] = None
def __enter__(self):
"""
create connection object
:return:
"""
self.engine = c.engine
Session.configure(bind=self.engine)
self.s = Session()
if self.execution_options:
self.s.connection(execution_options=self.execution_options)
return self
def __exit__(self, exc_type, exc_val, exc_tb):
"""
remove session
"""
Session.remove()
def upsert_from_form(self, model_class, form):
args_dict = {}
for column in model_class.__table__.columns:
if hasattr(form, column.name) and getattr(form, column.name).data:
# TODO Noneや''の時に更新されない
args_dict[column.name] = getattr(form, column.name).data
if form.id.data:
# update (if form has id)
id_ = form.id.data
self.s.query(model_class).filter(model_class.id == id_).update(
args_dict, synchronize_session=False)
else:
# create
new_model = model_class(**args_dict)
self.s.add(new_model)
self.s.flush()
self.s.refresh(new_model)
id_ = new_model.id
self.s.commit()
return id_
| 2.40625 | 2 |
train_rl_MMI.py | eyalbd2/LanguageModel-UsingRL | 6 | 12768286 | <reponame>eyalbd2/LanguageModel-UsingRL
import os
import random
import logging
import numpy as np
from tensorboardX import SummaryWriter
import torch
import torch.optim as optim
import torch.nn.functional as F
import argparse
from libbots import data, model, utils
from model_test import run_test_mutual
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-SAVES_DIR', type=str, default='saves', help='Save directory')
parser.add_argument('-name', type=str, default='RL_Mutual', help='Specific model saves directory')
parser.add_argument('-BATCH_SIZE', type=int, default=32, help='Batch Size for training')
parser.add_argument('-LEARNING_RATE', type=float, default=1e-4, help='Learning Rate')
parser.add_argument('-MAX_EPOCHES', type=int, default=10000, help='Number of training iterations')
parser.add_argument('-CROSS_ENT_PROB', type=float, default=0.3, help='Probability to run a CE batch')
parser.add_argument('-TEACHER_PROB', type=float, default=0.8, help='Probability to run an imitation batch in case '
'of using CE')
parser.add_argument('-data', type=str, default='comedy', help='Genre to use - for data')
parser.add_argument('-num_of_samples', type=int, default=4, help='Number of samples per per each example')
parser.add_argument('-load_seq2seq_path', type=str, default='Final_Saves/seq2seq/epoch_090_0.800_0.107.dat',
help='Pre-trained seq2seq model location')
parser.add_argument('-laod_b_seq2seq_path', type=str, default='Final_Saves/backward_seq2seq/epoch_080_0.780_0.104.dat',
help='Pre-trained backward seq2seq model location')
args = parser.parse_args()
saves_path = os.path.join(args.SAVES_DIR, args.name)
os.makedirs(saves_path, exist_ok=True)
log = logging.getLogger("train")
logging.basicConfig(format="%(asctime)-15s %(levelname)s %(message)s", level=logging.INFO)
phrase_pairs, emb_dict = data.load_data(genre_filter=args.data)
data.save_emb_dict(saves_path, emb_dict)
train_data = data.encode_phrase_pairs(phrase_pairs, emb_dict)
rand = np.random.RandomState(data.SHUFFLE_SEED)
rand.shuffle(train_data)
train_data, test_data = data.split_train_test(train_data)
log.info("Obtained %d phrase pairs with %d uniq words", len(phrase_pairs), len(emb_dict))
log.info("Training data converted, got %d samples", len(train_data))
rev_emb_dict = {idx: word for word, idx in emb_dict.items()}
# Load pre-trained nets
net = model.PhraseModel(emb_size=model.EMBEDDING_DIM, dict_size=len(emb_dict),
hid_size=model.HIDDEN_STATE_SIZE).to(device)
net.load_state_dict(torch.load(args.load_seq2seq_path))
back_net = model.PhraseModel(emb_size=model.EMBEDDING_DIM, dict_size=len(emb_dict),
hid_size=model.HIDDEN_STATE_SIZE).to(device)
back_net.load_state_dict(torch.load(args.laod_b_seq2seq_path))
rl_net = model.PhraseModel(emb_size=model.EMBEDDING_DIM, dict_size=len(emb_dict),
hid_size=model.HIDDEN_STATE_SIZE).to(device)
rl_net.load_state_dict(torch.load(args.load_seq2seq_path))
writer = SummaryWriter(comment="-" + args.name)
# BEGIN & END tokens
beg_token = torch.LongTensor([emb_dict[data.BEGIN_TOKEN]]).to(device)
end_token = emb_dict[data.END_TOKEN]
optimiser = optim.Adam(rl_net.parameters(), lr=args.LEARNING_RATE, eps=1e-3)
batch_idx = 0
best_mutual = None
for epoch in range(args.MAX_EPOCHES):
dial_shown = False
random.shuffle(train_data)
total_samples = 0
mutuals_argmax = []
mutuals_sample = []
for batch in data.iterate_batches(train_data, args.BATCH_SIZE):
batch_idx += 1
optimiser.zero_grad()
input_seq, out_seq_list, input_batch, output_batch = model.pack_batch(batch, rl_net.emb, device)
enc = rl_net.encode(input_seq)
net_policies = []
net_actions = []
net_advantages = []
beg_embedding = rl_net.emb(beg_token)
if random.random() < args.CROSS_ENT_PROB:
net_results = []
net_targets = []
for idx, out_seq in enumerate(out_seq_list):
ref_indices = output_batch[idx][1:]
enc_item = rl_net.get_encoded_item(enc, idx)
if random.random() < args.TEACHER_PROB:
r = rl_net.decode_teacher(enc_item, out_seq)
else:
r, seq = rl_net.decode_chain_argmax(enc_item, out_seq.data[0:1],
len(ref_indices))
net_results.append(r)
net_targets.extend(ref_indices)
results_v = torch.cat(net_results)
targets_v = torch.LongTensor(net_targets).to(device)
loss_v = F.cross_entropy(results_v, targets_v)
loss_v.backward()
for param in rl_net.parameters():
param.grad.data.clamp_(-0.2, 0.2)
optimiser.step()
else:
for idx, inp_idx in enumerate(input_batch):
total_samples += 1
ref_indices = output_batch[idx][1:]
item_enc = rl_net.get_encoded_item(enc, idx)
r_argmax, actions = rl_net.decode_chain_argmax(item_enc, beg_embedding, data.MAX_TOKENS,
stop_at_token=end_token)
argmax_mutual = utils.calc_mutual(net, back_net, inp_idx, actions)
mutuals_argmax.append(argmax_mutual)
if not dial_shown:
log.info("Input: %s", utils.untokenize(data.decode_words(inp_idx, rev_emb_dict)))
ref_words = [utils.untokenize(data.decode_words([ref], rev_emb_dict)) for ref in ref_indices]
log.info("Refer: %s", " ".join(ref_words))
log.info("Argmax: %s, mutual=%.4f",
utils.untokenize(data.decode_words(actions, rev_emb_dict)), argmax_mutual)
for _ in range(args.num_of_samples):
r_sample, actions = rl_net.decode_chain_sampling(item_enc, beg_embedding,
data.MAX_TOKENS, stop_at_token=end_token)
sample_mutual = utils.calc_mutual(net, back_net, inp_idx, actions)
if not dial_shown:
log.info("Sample: %s, mutual=%.4f",
utils.untokenize(data.decode_words(actions, rev_emb_dict)), sample_mutual)
net_policies.append(r_sample)
net_actions.extend(actions)
net_advantages.extend([sample_mutual - argmax_mutual] * len(actions))
mutuals_sample.append(sample_mutual)
dial_shown = True
if not net_policies:
continue
policies_v = torch.cat(net_policies)
actions_t = torch.LongTensor(net_actions).to(device)
adv_v = torch.FloatTensor(net_advantages).to(device)
log_prob_v = F.log_softmax(policies_v, dim=1)
log_prob_actions_v = adv_v * log_prob_v[range(len(net_actions)), actions_t]
loss_policy_v = -log_prob_actions_v.mean()
loss_v = loss_policy_v
loss_v.backward()
for param in rl_net.parameters():
param.grad.data.clamp_(-0.2, 0.2)
optimiser.step()
mutual_test = run_test_mutual(test_data, rl_net, net, back_net, beg_token, end_token, device)
mutual = np.mean(mutuals_argmax)
writer.add_scalar("mutual_test", mutual_test, batch_idx)
writer.add_scalar("mutual_argmax", mutual, batch_idx)
writer.add_scalar("mutual_sample", np.mean(mutuals_sample), batch_idx)
writer.add_scalar("epoch", batch_idx, epoch)
log.info("Epoch %d, test mutual: %.3f", epoch, mutual_test)
if best_mutual is None or best_mutual < mutual_test:
best_mutual = mutual_test
log.info("Best mutual updated: %.4f", best_mutual)
torch.save(rl_net.state_dict(), os.path.join(saves_path, "mutual_%.3f_%02d.dat" % (mutual_test, epoch)))
if epoch % 10 == 0:
torch.save(rl_net.state_dict(),
os.path.join(saves_path, "epoch_%03d_%.3f_%.3f.dat" % (epoch, mutual, mutual_test)))
writer.close() | 2.0625 | 2 |
mdit_py_plugins/wordcount/__init__.py | cjolowicz/mdit-py-plugins | 32 | 12768287 | <filename>mdit_py_plugins/wordcount/__init__.py
import string
from typing import Callable, List
from markdown_it import MarkdownIt
from markdown_it.rules_core import StateCore
def basic_count(text: str) -> int:
"""Split the string and ignore punctuation only elements."""
return sum([el.strip(string.punctuation).isalpha() for el in text.split()])
def wordcount_plugin(
md: MarkdownIt,
*,
per_minute: int = 200,
count_func: Callable[[str], int] = basic_count,
store_text: bool = False
):
"""Plugin for computing and storing the word count.
Stores in the ``env`` e.g.::
env["wordcount"] = {
"words": 200
"minutes": 1,
}
If "wordcount" is already in the env, it will update it.
:param per_minute: Words per minute reading speed
:param store_text: store all text under a "text" key, as a list of strings
"""
def _word_count_rule(state: StateCore) -> None:
text: List[str] = []
words = 0
for token in state.tokens:
if token.type == "text":
words += count_func(token.content)
if store_text:
text.append(token.content)
elif token.type == "inline":
for child in token.children or ():
if child.type == "text":
words += count_func(child.content)
if store_text:
text.append(child.content)
data = state.env.setdefault("wordcount", {})
if store_text:
data.setdefault("text", [])
data["text"] += text
data.setdefault("words", 0)
data["words"] += words
data["minutes"] = int(round(data["words"] / per_minute))
md.core.ruler.push("wordcount", _word_count_rule)
| 2.71875 | 3 |
python_module/megengine/_internal/version.py | stoneMo/MegEngine | 2 | 12768288 | <filename>python_module/megengine/_internal/version.py
# -*- coding: utf-8 -*-
# MegEngine is Licensed under the Apache License, Version 2.0 (the "License")
#
# Copyright (c) 2014-2020 Megvii Inc. All rights reserved.
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT ARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
"""version information for MegBrain package"""
import collections
from . import mgb as _mgb
class Version(
collections.namedtuple("VersionBase", ["major", "minor", "patch", "dev"])
):
"""simple sematic version object"""
@classmethod
def __normalize(cls, v):
if isinstance(v, str):
v = v.split(".")
a, b, c = map(int, v)
return cls(a, b, c)
def __eq__(self, rhs):
return super().__eq__(self.__normalize(rhs))
def __ne__(self, rhs):
return super().__ne__(self.__normalize(rhs))
def __lt__(self, rhs):
return super().__lt__(self.__normalize(rhs))
def __le__(self, rhs):
return super().__le__(self.__normalize(rhs))
def __gt__(self, rhs):
return super().__gt__(self.__normalize(rhs))
def __ge__(self, rhs):
return super().__ge__(self.__normalize(rhs))
def __str__(self):
rst = "{}.{}.{}".format(self.major, self.minor, self.patch)
if self.dev:
rst += "-dev{}".format(self.dev)
return rst
Version.__new__.__defaults__ = (0,) # dev defaults to 0
version_info = Version(*_mgb._get_mgb_version())
__version__ = str(version_info)
| 2.046875 | 2 |
test/test_lifescan.py | garberw/glucometerutils_gw | 1 | 12768289 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""Tests for the LifeScan OneTouch Ultra Mini driver."""
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__copyright__ = 'Copyright © 2013-2017, <NAME>'
__license__ = 'MIT'
import array
import os
import sys
import unittest
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from glucometerutils.support import lifescan
class TestChecksum(unittest.TestCase):
def test_crc(self):
self.assertEqual(
0x41cd,
lifescan.crc_ccitt(b'\x02\x06\x06\x03'))
def test_crc_array(self):
cmd_array = array.array('B', b'\x02\x06\x08\x03')
self.assertEqual(
0x62C2,
lifescan.crc_ccitt(cmd_array))
if __name__ == '__main__':
unittest.main()
| 2.03125 | 2 |
Algorithms/GraphAlgorithms/floydwarshall.py | abhijit-c/AlgorithmsTopicReview | 0 | 12768290 | <filename>Algorithms/GraphAlgorithms/floydwarshall.py
def floydwarshall(G):
"""
Compute and return the all pairs shortest paths solution.
Notice the returned path cost matrix P has modified entries. For
example, P[i][j] contains a tuple (c, v1) where c is the cost of the
shortest path from i to j, and v1 is the first vertex along said path
after i. If there is no such vertex, then it is -1.
"""
N = len(G)
P = [[(G[i][j],j) for j in range(N)] for i in range(N)]
for i in range(N):
P[i][i] = (0, -1)
for k in range(N):
for i in range(N):
for j in range(N):
alt = P[i][k][0] + P[k][j][0]
if P[i][j][0] > alt:
P[i][j] = (alt, P[i][k][1])
return P
def printpath(P, u, w):
"""
Given modified path cost matrix (see floydwarshall) return shortest path
from i to j.
"""
path = [u]
while P[u][w][1] != -1:
path.append(P[u][w][1])
u = P[u][w][1]
return path
def pprint(matrix): #https://stackoverflow.com/questions/13214809/
"""
Pretty print matrix with proper spacing. Don't worry about this.
"""
s = [[str(e) for e in row] for row in matrix]
lens = [max(map(len, col)) for col in zip(*s)]
fmt = '\t'.join('{{:{}}}'.format(x) for x in lens)
table = [fmt.format(*row) for row in s]
print('\n'.join(table))
'''
Let us create the following weighted graph (graph art stolen from g4g)
10
(0)------->(3)
| /|\
5 | |
| | 1
\|/ |
(1)------->(2)
3
'''
inf = float('inf')
G = [ [inf , 5 , inf, 10 ],
[inf , inf, 3 , inf],
[inf , inf, inf, 1 ],
[inf , inf, inf, inf]]
print('Edge Cost Matrix')
pprint(G)
P = floydwarshall(G)
print('Path Cost Matrix')
pprint(P)
print(printpath(P, 0, 2))
| 3.921875 | 4 |
openclean/data/serialize.py | remram44/openclean-core | 4 | 12768291 | <gh_stars>1-10
# This file is part of the Data Cleaning Library (openclean).
#
# Copyright (C) 2018-2021 New York University.
#
# openclean is released under the Revised BSD License. See file LICENSE for
# full license details.
from histore.archive.serialize.base import COMPACT # noqa: F401
from histore.document.json.reader import default_decoder # noqa: F401
from histore.document.json.writer import DefaultEncoder # noqa: F401
| 1.210938 | 1 |
peaksIdentification/peacks_testannotated.py | andreagia/peaks-identification | 0 | 12768292 | <filename>peaksIdentification/peacks_testannotated.py
import numpy as np
import sys
import re
import plotly.express as px
import pandas as pd
import plotly.graph_objects as go
# from peaksIdentification.peaks_assignement import generate_data, reassign_peaks
from core.peak_manager_V2 import PeakManager, Spectra
class TestAnnotated:
def __init__(self, path="./data/Annotated_spectra/"):
self.path = path
#fin1 = self.path + files[0] + ".txt"
#fin2 = self.path + files[1] + ".txt"
#print("*****************************+", fin1, fin2, "*****************************")
#self.ass1 = self.__readtxt(fin1)
#self.ass2 = self.__readtxt(fin2)
def getAssignmentData(self, filein1, filein2):
fin1 = self.path + filein1 + ".txt"
fin2 = self.path + filein2 + ".txt"
return self.__readtxt(fin1), self.__readtxt(fin2)
def __readtxt(self, filein):
parse = open(filein, "r").readlines()
sparse = filter(lambda x: re.match(r"^H/N[ \t]+\w+[ \t]+\w+[ \t]+[0-9 .]+[ \t]+[0-9 .]+", x), parse)
mapout = dict(map(lambda x: [re.split(r"[ \t]+", x)[1],
[re.split(r"[ \t]+", x)[3], re.split(r"[ \t]+", x)[4]]], sparse))
return mapout
def set_spe1(self,spe1):
print("setted spe1")
self.__spe1 = spe1
def set_spe2(self,spe2):
self.__spe2 = spe2
def setSearch_depth(self, search_depth):
self.search_depth = search_depth
def normSpect(self, in1, in2):
if in1.shape[0] > in2.shape[0]:
gra = in1
pic = in2
inv = False
elif in1.shape[0] < in2.shape[0]:
gra = in2
pic = in1
inv = True
else:
return in1, in2
dim = gra.shape[0]
diff = gra.shape[0] - pic.shape[0]
zpeaks = np.zeros((dim, 2))
zpeaks[:-diff, :] = pic
if inv:
return zpeaks, gra
else:
return gra, zpeaks
def findkey(self, dict, valuein):
return [name for name, value in dict.items() if (valuein[0] == float(value[0][0]) and valuein[1] == float(value[0][1]))]
def getResult(self):
return self.results
# def plot(self):
def getDataFrame(self):
if (self.__spe1 is not None) and (self.__spe2 is not None):
return self.__spe1, self.__spe2
def readsimple(self):
for fi in self.list_files:
fin1 = self.path + fi[0] + ".txt"
fin2 = self.path + fi[1] + ".txt"
print("*****************************+", fin1, fin2, "*****************************")
ass1 = self.readtxt(fin1)
ass2 = self.readtxt(fin2)
print(ass1)
sys.exit()
npa1 = []
npa2 = []
for name, value in ass1.items():
if name in ass2.keys():
npa1.append(value[0])
npa2.append(ass2[name][0])
for name, value in ass1.items():
if name not in ass2.keys():
npa1.append(value[0])
for name, value in ass2.items():
if name not in ass1.keys():
npa2.append(value[0])
npa1t = np.array(list(map(lambda x: x[0], ass1.values())))
npa2t = np.array(list(map(lambda x: x[0], ass2.values())))
npa1 = np.array(npa1)
npa2 = np.array(npa2)
npa1 = np.asfarray(npa1, float)
npa2 = np.asfarray(npa2, float)
print("Setting spe1")
self.set_spe1(npa1)
self.set_spe2(npa2)
def read(self):
resultcomp = {}
for fi in self.list_files:
fin1 = self.path + fi[0] + ".txt"
fin2 = self.path + fi[1] + ".txt"
print("*****************************+", fin1, fin2, "*****************************")
ass1 = self.readtxt(fin1) # dictionary [{'aa': (x, y) }, {}]
ass2 = self.readtxt(fin2) # dictionary
npa1 = []
npa2 = []
for name, value in ass1.items():
if name in ass2.keys():
npa1.append(value[0]) # value[0] e' la tupla
npa2.append(ass2[name][0]) # ass2[name][0] e' la nuova tupla
for name, value in ass1.items():
if name not in ass2.keys():
npa1.append(value[0])
for name, value in ass2.items():
if name not in ass1.keys():
npa2.append(value[0])
npa1t = np.array( list(map(lambda x: x[0], ass1.values())) )
npa2t = np.array( list(map(lambda x: x[0], ass2.values())) )
npa1 = np.array(npa1)
npa2 = np.array(npa2)
npa1 = np.asfarray(npa1, float)
npa2 = np.asfarray(npa2, float)
print("Setting spe1")
self.set_spe1(npa1)
self.set_spe2(npa2)
# np.savetxt("save1.csv",npa1,delimiter=',')
# for i,a in zip(npa2, ass2.keys()):
# print(i,a, ass2[a], findkey(ass2, i))
# print(npa1)
# print(npa1t)
print(npa1.shape, npa1t.shape, npa2.shape, npa2t.shape)
# v1, v2 = self.normSpect(npa1, npa2)
# allineo i vettori
# perc = 0.01
# peakmanager, score = reassign_peaks(v1, v2, perc, DEBUG=True)
# X, Y = peakmanager.get_couples()
old_spectra = Spectra(npa1, suffix='p')
new_spectra = Spectra(npa2, suffix='s')
# pm = PeakManager()
# pm = PeakManager(search_depth=self.search_depth)
pm = PeakManager(search_depth=4, max_search_per_level=10, log=False)
X, Y = pm.getAssociations(old_spectra, new_spectra)
self.x = X
self.y = Y
good = 0
bad = 0
assfin = []
s1name = []
s2name = []
hdist = []
hdistg = []
hdistn = []
hdisti = []
##################################################################
for x, y in zip(X, Y):
# estra il nome del aa dal dizionario
key1 = self.findkey(ass1, x)
key2 = self.findkey(ass2, y)
s1name.append(key1[0])
s2name.append(key2[0])
# print("PICCHI",x,y)
# print(np.sqrt((x[0]-y[0])**2+(x[1]-y[1])**2))
distt = np.sqrt( (x[0] - y[0]) ** 2 + (x[1] - y[1]) ** 2 )
#
if key1[0] in ass1.keys() and key1[0] in ass2.keys():
# print(ass1[key1[0]])
# print(ass2[key1[0]])
x1 = float( ass1[key1[0]] [0][0] )
x2 = float( ass1[key1[0]] [0][1] )
y1 = float( ass2[key1[0]] [0][0] )
y2 = float( ass2[key1[0]] [0][1] )
distp = np.sqrt((x1 - y1) ** 2 + (x2 - y2) ** 2)
hdist.append(distt)
hdistg.append(distp)
hdistn.append(key1[0])
strnkey1 = ''.join(char for char in key1[0] if char.isnumeric())
hdisti.append(int(strnkey1))
else:
distp = 100.0
# print("distt ->",distt)
# print("distp ->",distp)
if self.findkey(ass1, x) == self.findkey(ass2, y):
good = good + 1
status = "good"
else:
bad = bad + 1
status = "bad"
# print(x, "\t-->", y, "keyx ", self.findkey(ass1,x), "keyy ", self.findkey(ass2,y))
assfin.append([self.findkey(ass1, x), self.findkey(ass2, y)])
resultcomp[key1[0]] = {"key2": key2[0], "staus": status, "distt": distt, "distp": distp}
##################################################################
print("Good = ", good)
print("Bad = ", bad)
print("Perc good", good / (bad + good))
print("deepsearch= ", self.search_depth)
# results[fi[0]+"_"+fi[1]] = {"Good": good, "Bad": bad, "Perc ": good/(bad+good), "assfin": assfin}
self.results[fi[0] + "_" + fi[1]] = {"Good": good, "Bad": bad, "Perc ": good / (bad + good),
"dim": [npa1.shape, npa2.shape], "statiscic": resultcomp}
fig = go.Figure()
fig.update_layout(width=1300, height=1000)
fig['layout']['xaxis']['autorange'] = "reversed"
fig['layout']['yaxis']['autorange'] = "reversed"
fig.add_trace(
go.Scatter(
mode='markers+text',
x=X[:, 0],
y=X[:, 1],
marker=dict(
color='LightSkyBlue',
size=4,
line=dict(
color='MediumPurple',
width=1
)
),
name='Spectra1',
text=s1name, textposition="bottom center"
))
fig.add_trace(
go.Scatter(
mode='markers+text',
x=Y[:, 0],
y=Y[:, 1],
marker=dict(
color='Coral',
size=4,
line=dict(
color='MediumPurple',
width=1
)
),
name='Spectra2',
text=s2name, textposition="bottom center"
))
for x, y in zip(X, Y):
key1 = self.findkey(ass1, x)
key2 = self.findkey(ass2, y)
if key1[0] == key2[0]:
fig.add_trace(go.Scatter(x=[x[0], y[0]], y=[x[1], y[1]], mode='lines', showlegend=False,
line=dict(color="MediumPurple")))
else:
fig.add_trace(go.Scatter(x=[x[0], y[0]], y=[x[1], y[1]], mode='lines', showlegend=False,
line=dict(color="red")))
fig.show()
print(hdist)
print(hdistn)
print(hdisti)
fig1 = go.Figure(data=[go.Histogram(x=hdist)])
fig1.show()
df1 = pd.DataFrame({"DistanceC": hdist, "DistanceG": hdistg, "Index": hdisti, "Name": hdistn})
df1 = df1.sort_values(by=['Index'])
print(df1)
fig2 = px.bar(df1, x="Name", y="DistanceC")
fig2.show()
fig3 = px.bar(df1, x="Name", y="DistanceG")
fig3.show()
fig4 = go.Figure()
fig4.update_layout(width=1300, height=1000)
fig4.add_trace(go.Bar(
x=df1["Name"],
y=df1["DistanceG"],
name='DistanceG',
marker_color='blue'
))
fig4.add_trace(go.Bar(
x=df1["Name"],
y=df1["DistanceC"],
name='DistanceC',
marker_color='red'
))
# Here we modify the tickangle of the xaxis, resulting in rotated labels.
fig4.update_layout(barmode='group', xaxis_tickangle=-45)
fig4.show()
# return self.results
# print(self.results)
| 2.46875 | 2 |
schemas/sav_15.py | SathyaBhat/hades_save_editor | 49 | 12768293 | <filename>schemas/sav_15.py
import zlib
from construct import *
from constant import FILE_SIGNATURE, SAVE_DATA_V15_LENGTH
sav15_save_data_schema = Struct(
"version" / Int32ul,
"location" / PascalString(Int32ul, "utf8"),
"runs" / Int32ul,
"active_meta_points" / Int32ul,
"active_shrine_points" / Int32ul,
"god_mode_enabled" / Byte,
"hell_mode_enabled" / Byte,
"lua_keys" / PrefixedArray(
Int32ul,
PascalString(Int32ul, "utf8")
),
"current_map_name" / PascalString(Int32ul, "utf8"),
"start_next_map" / PascalString(Int32ul, "utf8"),
"lua_state" / PrefixedArray(Int32ul, Byte)
)
sav15_schema = Struct(
"signature" / Const(FILE_SIGNATURE),
"checksum_offset" / Tell,
"checksum" / Padding(4),
"save_data" / RawCopy(
Padded(
SAVE_DATA_V15_LENGTH,
sav15_save_data_schema
)
),
"checksum" / Pointer(
this.checksum_offset,
Checksum(
Int32ul,
lambda data: zlib.adler32(data, 1),
this.save_data.data
)
)
)
| 1.9375 | 2 |
visual/polyFit.py | dparnold/data-analysis | 0 | 12768294 | def polyFit(xData, yData, degree):
pass
fitValues = np.polyfit(xData, yData, degree)
yFit = np.zeros(len(xData))
for i in range(degree+1):
yFit = yFit + xData**(degree-i)*fitValues[i]
def function(x):
func = 0
for i in fitValues:
func = func*x + i
return func
return (fitValues,function)
if __name__ == "__main__":
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import pltStyle # used for formatting the plots
# read some data
data = pd.read_csv("polyFit.csv", header=None, names=["x","y"])
# create a new figure object
fig = plt.figure()
# create axis and a new subplot within the figure
ax = fig.add_subplot(1, 1, 1)
# plot the measurement data
ax.plot(data.x, data.y,marker="+", label="Measurement data")
# add polynomial fits with different degrees
for i in range(1,7,1):
ax.plot(data.x, polyFit(data.x,data.y,i)[1](data.x), label="Polynomial fit degree = "+str(i))
# create the legend and set its position
ax.legend(loc="lower left")
# manually set the axes limits and label them
ax.set_xlim([0,12])
ax.set_ylim([-2,1.1])
ax.set_xlabel(r'x axis label using \TeX\ and SI-units such as an upright $\si{\micro}$')
ax.set_ylabel(r'unusual symbols {\"a} \c{s} \AE\ \~{n}')
ax.grid(True)
#plt.tight_layout()
plt.savefig("polyFit.png")
| 3.765625 | 4 |
Population.py | MarcusRainbow/EconomySimulation | 0 | 12768295 | <reponame>MarcusRainbow/EconomySimulation<filename>Population.py
import random
import math
import numpy as np
from typing import Callable, Tuple, List
import matplotlib.pyplot as plt
def simulation(
simulations: int,
draws: int,
population: int,
game: Callable) -> Tuple[List[float], List[float]]:
# run the simulations
amount = np.full((simulations, population), 100.0)
rows = np.arange(simulations)
for _ in range(draws):
winners = np.random.randint(population, size=simulations)
losers = np.random.randint(population, size=simulations)
game(amount, rows, winners, losers)
# sort each row of the amount matrix
# print(f"after simulation: {amount}")
sorted = np.sort(amount)
# print(f"after sort: {amount}")
return np.mean(sorted, axis=0)
def arithmetic_game(amount, rows, winners, losers):
# print(f"arithmetic_game({amount}, {winners}, {losers})")
WIN = 20
amount[rows, winners] += WIN
amount[rows, losers] -= WIN
# print(f" results: {amount}")
def geometric_game(amount, rows, winners, losers):
# print(f"arithmetic_game({amount}, {winners}, {losers})")
WIN = 0.2
winnings = np.minimum(amount[rows, winners], amount[rows, losers]) * WIN
amount[rows, winners] += winnings
amount[rows, losers] -= winnings
# print(f" results: {amount}")
# def geometric_game(prev1: float, prev2: float, draw: bool) -> Tuple[float, float]:
# WIN = 0.2
# smallest = min(prev1, prev2)
# win = WIN * smallest
# if draw:
# return prev1 + win, prev2 - win
# else:
# return prev1 - win, prev2 + win
def graph(points: List[float], axis: str):
x = range(len(points))
plt.plot(x, points)
#plt.xlabel('x - axis')
plt.ylabel(axis)
#plt.title(title)
#plt.show(block = False)
def test_simulate_arithmetic() -> List[float]:
SIMULATIONS = 10000
DRAWS = 100
POPULATION = 100
mean = simulation(SIMULATIONS, DRAWS, POPULATION, arithmetic_game)
#print(f"test_simulate_arithmetic: mean={mean} stdev={stdev}")
graph(mean, "test_simulate_arithmetic: means")
def test_simulate_geometric():
SIMULATIONS = 10000
DRAWS = 10000
POPULATION = 100
mean = simulation(SIMULATIONS, DRAWS, POPULATION, geometric_game)
#print(f"test_simulate_geometric: mean={mean} stdev={stdev}")
graph(mean, "test_simulate_geometric: means")
if __name__ == "__main__":
test_simulate_arithmetic()
test_simulate_geometric()
plt.show()
| 4.0625 | 4 |
todo/migrations/0012_filer_file_20201217_1211.py | paiuolo/django-todo | 0 | 12768296 | # Generated by Django 2.2.16 on 2020-12-17 12:11
from django.db import migrations, models
import django.db.models.deletion
import filer.fields.file
import todo.models
import todo.storage
class Migration(migrations.Migration):
dependencies = [
('filer', '0012_file_mime_type'),
('todo', '0011_auto_20190724_1130'),
]
operations = [
migrations.AddField(
model_name='attachment',
name='filer_file',
field=filer.fields.file.FilerFileField(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, to='filer.File'),
),
migrations.AlterField(
model_name='attachment',
name='file',
field=models.FileField(max_length=255, storage=todo.storage.PrivateFileSystemStorage(), upload_to=todo.models.get_attachment_upload_dir),
),
]
| 1.648438 | 2 |
server/src/repositories/FilmTop5.py | alexfire008/centrale | 1 | 12768297 | <reponame>alexfire008/centrale<gh_stars>1-10
""" Defines the User repository """
from models import Notation
from models import db
from sqlalchemy import func
class FilmTop5Repository:
"""The repository for the notation average truc"""
@staticmethod
def get():
return db.session.query(Notation.movie_title, func.avg(Notation.note)).group_by(Notation.movie_title).order_by(func.avg(Notation.note).desc()).limit(5).all()
| 2.375 | 2 |
Python/binary_search.py | cavigna/Sort_Order | 0 | 12768298 | def binary_search(array: list, valor_buscado, inicio=0, fin=None):
if fin == None:
fin = len(array)-1
if inicio > fin:
return f"No se encontró el valor: {valor_buscado}"
medio = (inicio + fin)//2
if valor_buscado == array[medio]:
return f"El valor {valor_buscado} está el indice {medio} "
if valor_buscado < array[medio]:
return binary_search(array, valor_buscado, inicio, medio-1)
return binary_search(array, valor_buscado, medio+1, fin)
array = [-23, 4, 7, 12, 52, 94]
print(binary_search(array, 0)) #No se encontró el valor: 0
print(binary_search(array, 52))# Encontrado en el indice 4
| 3.859375 | 4 |
shexer/core/shexing/strategy/direct_and_inverse_shexing_strategy.py | DaniFdezAlvarez/dbpedia-shexer | 0 | 12768299 | <filename>shexer/core/shexing/strategy/direct_and_inverse_shexing_strategy.py
from shexer.core.shexing.strategy.asbtract_shexing_strategy import AbstractShexingStrategy
from shexer.utils.shapes import build_shapes_name_for_class_uri
from shexer.model.statement import Statement
from shexer.model.shape import Shape
_POS_FEATURES_DIRECT = 0
_POS_FEATURES_INVERSE = 1
class DirectAndInverseShexingStrategy(AbstractShexingStrategy):
def __init__(self, class_shexer):
super().__init__(class_shexer)
self._class_profile_dict = self._class_shexer._class_profile_dict
self._shapes_namespace = self._class_shexer._shapes_namespace
self._class_counts_dict = self._class_shexer._class_counts_dict
def remove_statements_to_gone_shapes(self, shape, shape_names_to_remove):
shape.direct_statements = self._statements_without_shapes_to_remove(
original_statements=shape.direct_statements,
shape_names_to_remove=shape_names_to_remove)
shape.inverse_statements = self._statements_without_shapes_to_remove(
original_statements=shape.inverse_statements,
shape_names_to_remove=shape_names_to_remove)
def yield_base_shapes(self, acceptance_threshold):
for a_class_key in self._class_profile_dict:
name = build_shapes_name_for_class_uri(class_uri=a_class_key,
shapes_namespace=self._shapes_namespace)
number_of_instances = float(self._class_counts_dict[a_class_key])
direct_statements = self._build_base_direct_statements(acceptance_threshold, a_class_key,
number_of_instances)
inverse_statements = self._build_base_inverse_statements(acceptance_threshold=acceptance_threshold,
class_key=a_class_key,
number_of_instances=number_of_instances)
yield Shape(name=name,
class_uri=a_class_key,
statements=direct_statements + inverse_statements)
def set_valid_shape_constraints(self, shape):
valid_statements = self._select_valid_statements_of_shape(shape.direct_statements)
valid_statements += self._select_valid_statements_of_shape(shape.inverse_statements)
self._tune_list_of_valid_statements(valid_statements=valid_statements)
shape.statements = valid_statements
def _build_base_inverse_statements(self, acceptance_threshold, class_key, number_of_instances):
result = []
for a_prop_key in self._class_profile_dict[class_key][_POS_FEATURES_INVERSE]:
for a_type_key in self._class_profile_dict[class_key][_POS_FEATURES_INVERSE][a_prop_key]:
for a_cardinality in self._class_profile_dict[class_key][_POS_FEATURES_INVERSE][a_prop_key][a_type_key]:
frequency = self._compute_frequency(number_of_instances,
self._class_profile_dict
[class_key]
[_POS_FEATURES_INVERSE]
[a_prop_key]
[a_type_key]
[a_cardinality])
if frequency >= acceptance_threshold:
result.append(Statement(st_property=a_prop_key,
st_type=a_type_key,
cardinality=a_cardinality,
probability=frequency,
is_inverse=True))
return result
def _build_base_direct_statements(self, acceptance_threshold, class_key, number_of_instances):
result = []
for a_prop_key in self._class_profile_dict[class_key][_POS_FEATURES_DIRECT]:
for a_type_key in self._class_profile_dict[class_key][_POS_FEATURES_DIRECT][a_prop_key]:
for a_cardinality in self._class_profile_dict[class_key][_POS_FEATURES_DIRECT][a_prop_key][a_type_key]:
frequency = self._compute_frequency(number_of_instances,
self._class_profile_dict
[class_key]
[_POS_FEATURES_DIRECT]
[a_prop_key]
[a_type_key]
[a_cardinality])
if frequency >= acceptance_threshold:
result.append(Statement(st_property=a_prop_key,
st_type=a_type_key,
cardinality=a_cardinality,
probability=frequency,
is_inverse=False))
return result
# def _set_serializer_object_for_statements(self, statement):
# statement.serializer_object = BaseStatementSerializer(
# instantiation_property_str=self._instantiation_property_str,
# disable_comments=self._disable_comments,
# is_inverse=statement.is_inverse)
#
# def _get_serializer_for_choice_statement(self):
# return FixedPropChoiceStatementSerializer(
# instantiation_property_str=self._instantiation_property_str,
# disable_comments=self._disable_comments,
# is_inverse=statement.is_inverse)
| 2.109375 | 2 |
bayeso_benchmarks/inf_dim_ackley.py | jungtaekkim/bayeso-benchmarks | 20 | 12768300 | <reponame>jungtaekkim/bayeso-benchmarks
#
# author: <NAME> (<EMAIL>)
# last updated: February 8, 2021
#
import numpy as np
from bayeso_benchmarks.benchmark_base import Function
def fun_target(bx, dim_bx,
a=20.0,
b=0.2,
c=2.0*np.pi
):
assert len(bx.shape) == 1
assert bx.shape[0] == dim_bx
assert isinstance(a, float)
assert isinstance(b, float)
assert isinstance(c, float)
y = -a * np.exp(-b * np.linalg.norm(bx, ord=2, axis=0) * np.sqrt(1.0 / dim_bx)) - np.exp(1.0 / dim_bx * np.sum(np.cos(c * bx), axis=0)) + a + np.exp(1.0)
return y
class Ackley(Function):
def __init__(self, dim_problem, seed=None):
assert isinstance(dim_problem, int)
assert isinstance(seed, (type(None), int))
dim_bx = np.inf
bounds = np.array([
[-32.768, 32.768],
])
global_minimizers = np.array([
[0.0],
])
global_minimum = 0.0
dim_problem = dim_problem
function = lambda bx: fun_target(bx, dim_problem)
Function.__init__(self, dim_bx, bounds, global_minimizers, global_minimum, function, dim_problem=dim_problem, seed=seed)
| 2.28125 | 2 |
TestingScripts/Looped/LoopedBlobDetect/loopedBlobPicsOnly.py | griffincalme/MicroDeconvolution | 2 | 12768301 | <reponame>griffincalme/MicroDeconvolution
#Hematoxylin (Basic blue)= binds to nuclei
#Cytokines are GBI Red Chromagen
#CD3 (TIL) are DAB
import numpy as np
import matplotlib.pyplot as plt
from skimage import data
from skimage.color import separate_stains
from skimage.exposure import rescale_intensity
from skimage.feature import blob_dog
from skimage.color import rgb2grey
from numpy import linalg
import csv
from math import sqrt
import os
#Enter the master directory
PictureDirectory = r'C:\Users\Griffin\Documents\+School\UROP\LumLab\Images\AllUsablePics'
def get_filepaths(directory):
file_paths = [] # List which will store all of the full filepaths.
# Walk the tree.
for root, directories, files in os.walk(directory):
for filename in files:
# Join the two strings in order to form the full filepath.
filepath = os.path.join(root, filename)
file_paths.append(filepath) # Add it to the list.
return file_paths # Self-explanatory.
# Run the above function and store its results in a variable.
full_file_paths = get_filepaths(PictureDirectory)
# Main loop of the image analysis
for image_path in full_file_paths:
if image_path.endswith(".jpg"):
try:
pic_file_name = os.path.basename(image_path)
file_path = image_path
#Import picture
ihc_rgb = data.imread(file_path)
# Normalized optical density matrix
# Hematoxylin(0), Red(1), DAB(2)
rgb_from_hrd = np.array([[0.644, 0.710, 0.285],
[0.0326, 0.873, 0.487],
[0.270, 0.562, 0.781]])
hrd_from_rgb = linalg.inv(rgb_from_hrd)
#Stain space conversion
ihc_hrd = separate_stains(ihc_rgb, hrd_from_rgb)
'''DAB'''
#Rescale signals
#[:, :, 012 color]
dab_rescale = rescale_intensity(ihc_hrd[:, :, 2], out_range=(0, 1))
dab_array = np.dstack((np.zeros_like(dab_rescale), dab_rescale, dab_rescale))
#Blob detection
image2d = rgb2grey(dab_array)
blobs_DoG_DAB = blob_dog(image2d, min_sigma=1, max_sigma=25, threshold=.3, overlap=0.9)
blobs_DoG_DAB[:, 2] = blobs_DoG_DAB[:, 2] * sqrt(2)
blobs = [blobs_DoG_DAB]
colors = ['red']
titles = ['Difference of Gaussian: DAB']
sequence = zip(blobs, colors, titles)
for blobs, color, title in sequence:
fig, ax = plt.subplots(1, 1)
ax.set_title(title)
ax.imshow(image2d, interpolation='nearest', cmap=plt.cm.gray)
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=1, fill=False)
ax.add_patch(c)
SaveFileName = str(pic_file_name[:-4]) + '_DAB'
plt.savefig(SaveFileName)
plt.close('all')
'''Hematoxylin'''
# Rescale signals
# [:, :, 012 color]
hema_rescale = rescale_intensity(ihc_hrd[:, :, 0], out_range=(0, 1))
hema_array = np.dstack((np.zeros_like(hema_rescale), hema_rescale, hema_rescale))
# Blob detection
image2d = rgb2grey(hema_array)
blobs_DoG_Hema = blob_dog(image2d, min_sigma=10, max_sigma=15, threshold=.1, overlap=0.9)
blobs_DoG_Hema[:, 2] = blobs_DoG_Hema[:, 2] * sqrt(2)
blobs = [blobs_DoG_Hema]
colors = ['red']
titles = ['Difference of Gaussian: Hematoxylin']
sequence = zip(blobs, colors, titles)
for blobs, color, title in sequence:
fig, ax = plt.subplots(1, 1)
ax.set_title(title)
ax.imshow(image2d, interpolation='nearest', cmap=plt.cm.gray)
for blob in blobs:
y, x, r = blob
c = plt.Circle((x, y), r, color=color, linewidth=1, fill=False)
ax.add_patch(c)
SaveFileName = str(pic_file_name[:-4]) + '_Hema'
plt.savefig(SaveFileName)
plt.close('all')
num_blobsD = len(blobs_DoG_DAB)
num_blobsH = len(blobs_DoG_Hema)
print('----------')
print(pic_file_name)
print('The picture was from directory: ' + str(file_path))
print('Number of DAB (CD3+) blobs detected: ' + str(num_blobsD))
print('Number of Hematoxylin (Total Nuclei) blobs detected: ' + str(num_blobsH))
PercentPos = (100 * num_blobsD / num_blobsH) // 1
print('Percentage (DAB/Hematoxylin): ' + str(PercentPos) + '%')
if PercentPos < 5:
print('Proportion Score: 0')
elif PercentPos >= 5:
print('Proportion Score: 1')
elif PercentPos >= 21:
print('Proportion Score: 2')
elif PercentPos >= 41:
print('Proportion Score: 3')
elif PercentPos >= 61:
print('Proportion Score: 4')
elif PercentPos >= 81:
print('Proportion Score: 5')
except Exception as e:
pic_file_name = os.path.basename(image_path)
text_file = open("ErrorImages.txt", "w")
text_file.write("\n error " + str(e) + " in image " + pic_file_name)
text_file.close()
pass | 2.265625 | 2 |
wgan_mnist.py | Fellfalla/minimal_wgan | 0 | 12768302 | """Minimal implementation of Wasserstein GAN for MNIST."""
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.contrib import layers
from tensorflow.examples.tutorials.mnist import input_data
import threading
from rendering import draw_figure, export_video
def leaky_relu(x):
return tf.maximum(x, 0.2 * x)
def generator(z):
with tf.variable_scope('generator'):
z = layers.fully_connected(z, num_outputs=4096)
z = tf.reshape(z, [-1, 4, 4, 256])
z = layers.conv2d_transpose(z, num_outputs=128, kernel_size=5, stride=2)
z = layers.conv2d_transpose(z, num_outputs=64, kernel_size=5, stride=2)
z = layers.conv2d_transpose(z, num_outputs=1, kernel_size=5, stride=2,
activation_fn=tf.nn.sigmoid)
return z[:, 2:-2, 2:-2, :]
def discriminator(x, reuse):
with tf.variable_scope('discriminator', reuse=reuse):
x = layers.conv2d(x, num_outputs=64, kernel_size=5, stride=2,
activation_fn=leaky_relu)
x = layers.conv2d(x, num_outputs=128, kernel_size=5, stride=2,
activation_fn=leaky_relu)
x = layers.conv2d(x, num_outputs=256, kernel_size=5, stride=2,
activation_fn=leaky_relu)
x = layers.flatten(x)
return layers.fully_connected(x, num_outputs=1, activation_fn=None)
############# Create Tensorflow Graph ###############
with tf.name_scope('placeholders'):
x_true = tf.placeholder(tf.float32, [None, 28, 28, 1])
z = tf.placeholder(tf.float32, [None, 128])
x_generated = generator(z)
d_true = discriminator(x_true, reuse=False)
d_generated = discriminator(x_generated, reuse=True)
with tf.name_scope('regularizer'):
epsilon = tf.random_uniform([50, 1, 1, 1], 0.0, 1.0)
x_hat = epsilon * x_true + (1 - epsilon) * x_generated
d_hat = discriminator(x_hat, reuse=True)
gradients = tf.gradients(d_hat, x_hat)[0]
ddx = tf.sqrt(tf.reduce_sum(gradients ** 2, axis=[1, 2]))
d_regularizer = tf.reduce_mean((ddx - 1.0) ** 2)
with tf.name_scope('loss'):
g_loss = tf.reduce_mean(d_generated)
d_loss = (tf.reduce_mean(d_true) - tf.reduce_mean(d_generated) +
10 * d_regularizer)
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(learning_rate=1e-4, beta1=0, beta2=0.9)
g_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='generator')
g_train = optimizer.minimize(g_loss, var_list=g_vars)
d_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='discriminator')
d_train = optimizer.minimize(d_loss, var_list=d_vars)
#####################################################
############# Initialize Variables ###############
session = tf.InteractiveSession()
# session = tf.InteractiveSession(config=tf.ConfigProto(log_device_placement=True))
tf.global_variables_initializer().run()
mnist = input_data.read_data_sets('MNIST_data')
generated_images = []
export_video_nth_frame = 30
height, width, channels = (28, 28, 1)
#####################################################
############# Start Rendering Thread ###############
drawing_thread = threading.Thread(target=draw_figure, args=(generated_images,))
drawing_thread.setDaemon(True)
drawing_thread.start()
#####################################################
############# Train ###############
for i in range(20000):
batch = mnist.train.next_batch(50)
images = batch[0].reshape([-1, height, width, channels])
z_train = np.random.randn(50, 128)
session.run(g_train, feed_dict={z: z_train})
for j in range(5):
session.run(d_train, feed_dict={x_true: images, z: z_train})
print('iter={}/20000'.format(i))
z_validate = np.random.randn(1, 128)
generated = x_generated.eval(feed_dict={z: z_validate}).squeeze()
generated = np.uint8(generated*255) # hand over to thread
generated_images.append(generated)
if i%export_video_nth_frame == 0:
pass
export_video(generated_images)
#####################################################
################ Finalize #####################
export_video(generated_images)
#####################################################
| 2.78125 | 3 |
polyaxon/scheduler/spawners/horovod_spawner.py | wbuchwalter/polyaxon | 0 | 12768303 | from polyaxon_schemas.environments import HorovodClusterConfig
from polyaxon_schemas.polyaxonfile.specification.frameworks import HorovodSpecification
from polyaxon_schemas.utils import TaskType
from scheduler.spawners.experiment_spawner import ExperimentSpawner
class HorovodSpawner(ExperimentSpawner):
MASTER_SERVICE = True
WORKER_SERVICE = True
@property
def resources(self):
cluster, is_distributed, = self.spec.cluster_def
worker_resources = HorovodSpecification.get_worker_resources(
environment=self.spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
return {
TaskType.MASTER: {0: self.spec.master_resources},
TaskType.WORKER: worker_resources,
}
@property
def node_selectors(self):
cluster, is_distributed, = self.spec.cluster_def
worker_node_selectors = HorovodSpecification.get_worker_node_selectors(
environment=self.spec.environment,
cluster=cluster,
is_distributed=is_distributed
)
return {
TaskType.MASTER: {0: self.spec.master_node_selectors},
TaskType.WORKER: worker_node_selectors,
}
def get_resources(self, task_type, task_idx):
return self.resources.get(task_type, {}).get(task_idx)
def get_node_selectors(self, task_type, task_idx):
return self.node_selectors.get(task_type, {}).get(task_idx)
def get_n_pods(self, task_type):
return self.spec.cluster_def[0].get(task_type, 0)
def start_experiment(self):
experiment = super().start_experiment()
experiment[TaskType.WORKER] = self.create_multi_jobs(task_type=TaskType.WORKER,
add_service=self.WORKER_SERVICE)
return experiment
def stop_experiment(self):
super().stop_experiment()
self.delete_multi_jobs(task_type=TaskType.WORKER, has_service=self.WORKER_SERVICE)
def get_cluster(self):
cluster_def, _ = self.spec.cluster_def
job_name = self.pod_manager.get_job_name(task_type=TaskType.MASTER, task_idx=0)
cluster_config = {
TaskType.MASTER: [self._get_pod_address(job_name)]
}
workers = []
for i in range(cluster_def.get(TaskType.WORKER, 0)):
job_name = self.pod_manager.get_job_name(task_type=TaskType.WORKER, task_idx=i)
workers.append(self._get_pod_address(job_name))
cluster_config[TaskType.WORKER] = workers
return HorovodClusterConfig.from_dict(cluster_config).to_dict()
| 1.90625 | 2 |
src/runners/iris_runner.py | shivanip14/semisupclustering | 0 | 12768304 | from src.seededkm.seededkm import SeededKMeans
from src.constrainedkm.constrainedkm import ConstrainedKMeans
from sklearn import datasets
from src.utils.runnerutils import run_algo, run_KMeans
def cluster(n_clusters, seed_fraction, noise_fraction, incompleteness_fraction, manually_annotate, n_fold, run_KM):
iris = datasets.load_iris()
X = iris.data
y = iris.target
skm = SeededKMeans(seed_fraction, noise_fraction, incompleteness_fraction, n_clusters, 'iris')
seeded_ari, seeded_ami = run_algo(skm, 'Seeded K-Means', X, y, n_fold, manually_annotate)
skm.visualise_results()
ckm = ConstrainedKMeans(seed_fraction, noise_fraction, incompleteness_fraction, n_clusters, 'iris')
constrained_ari, constrained_ami = run_algo(ckm, 'Constrained K-Means', X, y, n_fold, manually_annotate)
ckm.visualise_results()
kmeans_ari = 0
kmeans_ami = 0
if run_KM:
kmeans_ari, kmeans_ami = run_KMeans(X, y, n_clusters, n_fold)
return seeded_ari, seeded_ami, constrained_ari, constrained_ami, kmeans_ari, kmeans_ami
| 2.546875 | 3 |
pycomponents/html/__init__.py | expert-m/pycomponents | 0 | 12768305 | from .base import *
from .basic import *
from .forms import *
from .frames import *
from .images import *
from .audio_and_video import *
from .links import *
from .lists import *
from .tables import *
from .semantics import *
from .meta import *
from .formatting import *
from .programming import *
from .other import *
| 0.917969 | 1 |
electrum_gui/common/basic/request/exceptions.py | huazhouwang/electrum | 0 | 12768306 | <gh_stars>0
from requests import Response
class ResponseException(IOError):
def __init__(self, message: str, response: Response):
self.message = message
self.response = response
super(ResponseException, self).__init__(message)
| 2.5625 | 3 |
tugs/utils.py | johnyf/gr1experiments | 3 | 12768307 | """Parsing of signal logs from experiments, and version logging."""
import datetime
import importlib
import json
import logging
import os
import pprint
import subprocess
import time
import git
import numpy as np
# these should be moved to other (optional) module
from openpromela import logic
from openpromela import slugs
logger = logging.getLogger(__name__)
CONFIG_FILE = 'config.json'
def git_version(path):
"""Return SHA-dirty for repo under `path`."""
repo = git.Repo(path)
sha = repo.head.commit.hexsha
dirty = repo.is_dirty()
return sha + ('-dirty' if dirty else '')
def snapshot_versions(check=True):
"""Log versions of software used."""
d = dict()
d['slugs'] = slugs_version()
# versions of python packages
packages = [
'dd', 'omega', 'tugs',
'openpromela', 'promela']
for s in packages:
pkg = importlib.import_module(s)
d[s] = pkg.__version__
t_now = time.strftime('%Y-%b-%d-%A-%T-%Z')
d['time'] = t_now
d['platform'] = os.uname()
if not check:
return d
# existing log ?
try:
with open(CONFIG_FILE, 'r') as f:
d_old = json.load(f)
except IOError:
d_old = None
# check versions
compare = list(packages)
compare.append('slugs')
if d_old is not None:
for k in compare:
assert d[k] == d_old[k], (
('versions differ from {cfg}:\n\n'
'NEW: {d}'
'\n -----\n\n'
'OLD: {d_old}').format(
cfg=CONFIG_FILE,
d=pprint.pformat(d),
d_old=pprint.pformat(d_old)))
# dump
with open(CONFIG_FILE, 'w') as f:
json.dump(d, f, indent=4)
return d
def slugs_version():
cmd = ['slugs', '--version']
try:
p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
except OSError as e:
if e.errno == os.errno.ENOENT:
print('Warning: `slugs` not found on path')
return
else:
raise
p.wait()
if p.returncode != 0:
print('`{cmd}` returned {r}'.format(
cmd=' '.join(cmd),
r=p.returncode))
return
version = p.stdout.read().strip()
return version
def add_logfile(fname, logger_name):
h = logging.FileHandler(fname, mode='w')
log = logging.getLogger(logger_name)
log.addHandler(h)
return h
def close_logfile(h, logger_name):
log = logging.getLogger(logger_name)
log.removeHandler(h)
h.close()
def load_log_file(fname):
data = dict()
with open(fname, 'r') as f:
for line in f:
if "'time'" not in line:
continue
try:
d = eval(line)
split_data(d, data)
except:
continue
for k, v in data.iteritems():
for q, r in v.iteritems():
try:
data[k][q] = np.array(r, dtype=float)
except:
pass
return data
def split_data(d, data):
"""Store sample in `d` as a signal in `data`.
@type d: `dict`
@type data: `dict(dict(time=list(), value=list()))`
"""
t = d['time']
for k, v in d.iteritems():
if k == 'time':
continue
# is a signal
# new ?
if k not in data:
data[k] = dict(time=list(), value=list())
data[k]['time'].append(t)
data[k]['value'].append(v)
def get_signal(name, data):
return data[name]['time'], data[name]['value']
def inspect_data(data):
for k in data:
t = data[k]['time']
v = data[k]['value']
print(k, len(t), len(v))
def translate_promela_to_slugsin(code):
"""Return SlugsIn code from Promela `code`."""
t0 = time.time()
spec = logic.compile_spec(code)
aut = slugs._symbolic._bitblast(spec)
s = slugs._to_slugs(aut)
t1 = time.time()
dt = datetime.timedelta(seconds=t1 - t0)
logger.info('translated Promela -> SlugsIn in {dt}.'.format(dt=dt))
return s
| 2.171875 | 2 |
fairness/algorithms/zafar/fair-classification-master/preferential_fairness/synthetic_data_demo/plot_synthetic_boundaries.py | yashwarlord/fairness-comparison | 146 | 12768308 | <gh_stars>100-1000
import matplotlib
import matplotlib.pyplot as plt # for plotting stuff
import os
import numpy as np
matplotlib.rcParams['text.usetex'] = True # for type-1 fonts
def get_line_coordinates(w, x1, x2):
y1 = (-w[0] - (w[1] * x1)) / w[2]
y2 = (-w[0] - (w[1] * x2)) / w[2]
return y1,y2
def plot_data(X, y, x_sensitive, w_arr, label_arr, lt_arr, fname, title, group=None):
# print fp_fn_arr
plt.figure()
num_to_draw = 200 # we will only draw a small number of points to avoid clutter
fs = 20 # font size for labels and legends
x_draw = X[:num_to_draw]
y_draw = y[:num_to_draw]
x_sensitive_draw = x_sensitive[:num_to_draw]
x_lim = [min(x_draw[:,-2]) - np.absolute(0.3*min(x_draw[:,-2])), max(x_draw[:,-2]) + np.absolute(0.5 * max(x_draw[:,-2]))]
y_lim = [min(x_draw[:,-1]) - np.absolute(0.3*min(x_draw[:,-1])), max(x_draw[:,-1]) + np.absolute(0.7 * max(x_draw[:,-1]))]
X_s_0 = x_draw[x_sensitive_draw == 0.0]
X_s_1 = x_draw[x_sensitive_draw == 1.0]
y_s_0 = y_draw[x_sensitive_draw == 0.0]
y_s_1 = y_draw[x_sensitive_draw == 1.0]
if w_arr is not None: # we are plotting the boundaries of a trained classifier
plt.scatter(X_s_0[y_s_0==1.0][:, -2], X_s_0[y_s_0==1.0][:, -1], color='green', marker='x', s=70, linewidth=2)
plt.scatter(X_s_0[y_s_0==-1.0][:, -2], X_s_0[y_s_0==-1.0][:, -1], color='red', marker='x', s=70, linewidth=2)
plt.scatter(X_s_1[y_s_1==1.0][:, -2], X_s_1[y_s_1==1.0][:, -1], color='green', marker='o', facecolors='none', s=70, linewidth=2)
plt.scatter(X_s_1[y_s_1==-1.0][:, -2], X_s_1[y_s_1==-1.0][:, -1], color='red', marker='o', facecolors='none', s=70, linewidth=2)
for i in range(0, len(w_arr)):
w = w_arr[i]
l = label_arr[i]
lt = lt_arr[i]
x1,x2 = min(x_draw[:,1]), max(x_draw[:,1])
y1,y2 = get_line_coordinates(w, x1, x2)
plt.plot([x1,x2], [y1,y2], lt, linewidth=3, label = l)
plt.title(title, fontsize=fs)
else: # just plotting the data
plt.scatter(X_s_0[y_s_0==1.0][:, -2], X_s_0[y_s_0==1.0][:, -1], color='green', marker='x', s=70, linewidth=2, label= "group-0 +ve")
plt.scatter(X_s_0[y_s_0==-1.0][:, -2], X_s_0[y_s_0==-1.0][:, -1], color='red', marker='x', s=70, linewidth=2, label= "group-0 -ve")
plt.scatter(X_s_1[y_s_1==1.0][:, -2], X_s_1[y_s_1==1.0][:, -1], color='green', marker='o', facecolors='none', s=70, linewidth=2, label= "group-1 +ve")
plt.scatter(X_s_1[y_s_1==-1.0][:, -2], X_s_1[y_s_1==-1.0][:, -1], color='red', marker='o', facecolors='none', s=70, linewidth=2, label= "group-1 -ve")
if True: # turn the ticks on or off
plt.tick_params(axis='x', which='both', bottom='off', top='off', labelbottom='off') # dont need the ticks to see the data distribution
plt.tick_params(axis='y', which='both', left='off', right='off', labelleft='off')
plt.legend(loc=2, fontsize=fs)
plt.xlim(x_lim)
plt.ylim(y_lim)
plt.savefig(fname)
plt.show()
| 2.640625 | 3 |
test/test_coverage_wrapper.py | vasili-v/distcovery | 0 | 12768309 | <filename>test/test_coverage_wrapper.py<gh_stars>0
import unittest
import sys
import StringIO
import __builtin__
import os
from distutils import log
# Reload module to run its global section under coverage supervision
import distcovery.coverage_wrapper
reload(distcovery.coverage_wrapper)
from distcovery.coverage_wrapper import _DummyCoverage, Coverage, \
_NO_COVERAGE_PACKAGE_WARNING
class _MockDistribution(object):
def __init__(self, py_modules=None, packages=None):
self.py_modules = py_modules if py_modules else []
self.packages = packages if packages else []
class Test_DummyCoverage(unittest.TestCase):
def test_creation(self):
coverage = _DummyCoverage(source=[])
self.assertTrue(isinstance(coverage, _DummyCoverage))
def test_start(self):
coverage = _DummyCoverage()
self.assertEqual(coverage.start(), None)
def test_stop(self):
coverage = _DummyCoverage()
self.assertEqual(coverage.stop(), None)
def test_report(self):
coverage = _DummyCoverage()
self.assertEqual(coverage.report(), None)
class _MockCoverageModule(object):
def __init__(self, coverage):
self.coverage = coverage
class _MockCoverage(object):
def __init__(self):
self.creations = []
self.starts = 0
self.stops = 0
def __call__(self, *args, **kwargs):
self.creations.append((args, kwargs))
return self
def start(self):
self.starts +=1
def stop(self):
self.stops += 1
def report(self):
print '\tThe report'
class TestCoverage(unittest.TestCase):
def setUp(self):
super(TestCoverage, self).setUp()
self.__threshold = log.set_threshold(log.INFO)
self.__stdout = sys.stdout
self.stdout = StringIO.StringIO()
sys.stdout = self.stdout
self.__stderr = sys.stderr
self.stderr = StringIO.StringIO()
sys.stderr = self.stderr
self.__import = __builtin__.__import__
def tearDown(self):
__builtin__.__import__ = self.__import
sys.stderr = self.__stderr
sys.stdout = self.__stdout
log.set_threshold(self.__threshold)
super(TestCoverage, self).tearDown()
def __no_coverage_import(self, name, *args):
if name == 'coverage':
raise ImportError('test')
return self.__import(name, *args)
def __mock_coverage_import(self, name, *args):
if name == 'coverage':
return _MockCoverageModule(self.__coverage)
return self.__import(name, *args)
def test_creation_disabled(self):
__builtin__.__import__ = self.__no_coverage_import
coverage = Coverage(True, '', _MockDistribution())
self.assertTrue(isinstance(coverage, Coverage))
self.assertEqual(self.stderr.getvalue(), '')
def test_creation_no_coverage(self):
__builtin__.__import__ = self.__no_coverage_import
coverage = Coverage(False, '', _MockDistribution())
self.assertTrue(isinstance(coverage, Coverage))
self.assertEqual(self.stderr.getvalue(),
_NO_COVERAGE_PACKAGE_WARNING % 'test' + '\n')
def test_creation(self):
self.__coverage = _MockCoverage()
__builtin__.__import__ = self.__mock_coverage_import
coverage = Coverage(False, 'test',
_MockDistribution(['xxx', 'yyy', 'zzz'],
['xxx', 'xxx.yyy', 'yyy']))
self.assertTrue(isinstance(coverage, Coverage))
self.assertEqual(self.__coverage.creations,
[((), {'source': [os.path.join('test', 'xxx.py'),
os.path.join('test', 'yyy.py'),
os.path.join('test', 'zzz.py'),
os.path.join('test', 'xxx'),
os.path.join('test', 'yyy')]})])
def test_context(self):
self.__coverage = _MockCoverage()
__builtin__.__import__ = self.__mock_coverage_import
first_path = sys.path[0]
test_path = os.path.join(first_path, 'test')
coverage = Coverage(False, test_path, _MockDistribution(['xxx']))
self.assertEqual(self.__coverage.starts, 0)
self.assertEqual(self.__coverage.stops, 0)
with coverage:
self.assertEqual(sys.path[0], test_path)
self.assertEqual(self.__coverage.starts, 1)
self.assertEqual(self.__coverage.stops, 0)
self.assertEqual(sys.path[0], first_path)
self.assertEqual(self.__coverage.starts, 1)
self.assertEqual(self.__coverage.stops, 1)
def test_report(self):
self.__coverage = _MockCoverage()
__builtin__.__import__ = self.__mock_coverage_import
coverage = Coverage(False, 'test', _MockDistribution(['xxx']))
coverage.report()
self.assertEqual(self.stdout.getvalue(),
'\nCoverage report:\n\tThe report\n')
def test_report_coverage_disabled(self):
coverage = Coverage(True, '', _MockDistribution())
coverage.report()
self.assertEqual(self.stdout.getvalue(), '')
if __name__ == '__main__':
unittest.main()
| 2.203125 | 2 |
evtx.py | analyzeDFIR/analyzeEVTX-structures | 0 | 12768310 | ## -*- coding: UTF-8 -*-
## evtx.py
##
## Copyright (c) 2018 analyzeDFIR
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files (the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
try:
from shared_structures.windows.misc import *
except ImportError:
from .shared_structures.windows.misc import *
EVTXRecordHeader = Struct(
'Signature' / Const(b'\x2a\x2a\x00\x00'),
'Size' / Int32ul,
'EventRecordID' / Int64ul,
'RawWriteTime' / NTFSFILETIME
)
'''
EVTX Chunk Header: evtx data chunk header
Signature: signature of evtx header (constant: 'ElfChnk\x00')
FirstEventRecordNumber: number of first event record in data chunk
LastEventRecordNumber: number of last event record in data chunk
FirstEventRecordID: ID of first event record in data chunk
LastEventRecordID: ID of last event record in data chunk
HeaderSize: size of chunk header (constant: 128 bytes)
LastEventRecordOffset: offset from beginning of chunk header to data of last event record in chunk
FreeSpaceOffset: offset to free space in the chunk
EventRecordsChecksum: CRC32 checksum of event records data
Checksum: CRC32 checksum of first 120 bytes of header and bytes 128 to 512 of the chunk
'''
EVTXChunkHeader = Struct(
'Signature' / Const(b'ElfChnk\x00'),
'FirstEventRecordNumber' / Int64ul,
'LastEventRecordNumber' / Int64ul,
'FirstEventRecordID' / Int64ul,
'LastEventRecordID' / Int64ul,
'HeaderSize' / Const(128, Int32ul),
'LastEventRecordOffset' / Int32ul,
'FreeSpaceOffset' / Int32ul,
'EventRecordsChecksum' / Int32ul,
Padding(68),
'Checksum' / Int32ul,
Padding(384)
)
'''
EVTX File Header: header of evtx file
Signature: signature of evtx file (constant: 'ElfFile\x00')
FirstChunkNumber: number of first chunk in file
LastChunkNumber: number of last chunk in file
NextRecordID: event log record ID of next record to be written
HeaderSize: used size of evtx file header (constant: 128 bytes)
MinorVersion: minor version of evtx format (constant: 1)
MajorVersion: major version of evtx format (constant: 3)
FirstChunkOffset: offset from beginning of file to first data chunk (constant: 4096 bytes)
Flags: flags indicating whether the file is dirty and/or full
Checksum: CRC32 checksum of first 120 bytes of the file header
'''
EVTXFileHeader = Struct(
'Signature' / Const(b'ElfFile\x00'),
'FirstChunkNumber' / Int64ul,
'LastChunkNumber' / Int64ul,
'NextEventRecordID' / Int64ul,
'HeaderSize' / Const(128, Int32ul),
'MinorVersion' / Int16ul,
'MajorVersion' / Int16ul,
'FirstChunkOffset' / Const(4096, Int16ul),
'ChunkCount' / Int16ul,
Padding(76),
'Flags' / FlagsEnum(Int32ul,\
DIRTY = 0x0001,
FULL = 0x0002\
),
'Checksum' / Int32ul,
Padding(3968)
)
| 1.570313 | 2 |
intent/judge_intent.py | ricosr/travel_consult_chatbot | 0 | 12768311 | <gh_stars>0
# -*- coding: utf-8 -*-
from rasa.nlu.model import Interpreter
from intent.intent_temrs import intent_terms_dict
class Intent:
def __init__(self, model_name='models1'):
self.interpreter = Interpreter.load("intent/{}/nlu".format(model_name))
self.threshold = 0.7 # TODO: need to test
def get_intent(self, utterance):
intent_dict = self.interpreter.parse(utterance)
intent = intent_dict["intent"]["name"]
confidence = intent_dict["intent"]["confidence"]
print(0, confidence)
for rule_intent, terms_ls in intent_terms_dict.items():
for term in terms_ls:
if term in utterance:
return rule_intent, intent_dict["entities"]
if confidence < self.threshold:
for rule_intent, terms_ls in intent_terms_dict.items():
for term in terms_ls:
if term in utterance:
return rule_intent, intent_dict["entities"]
for entity in intent_dict["entities"]:
if entity["entity"] == "food":
intent = "consult_food"
for entity in intent_dict["entities"]:
if entity["entity"] == "departure" or entity["entity"] == "destination":
intent = "consult_traffic"
return intent, intent_dict["entities"]
| 2.78125 | 3 |
getLogFilePath.py | mismailzz/Extract-filenames-from-Strace-result | 0 | 12768312 | <filename>getLogFilePath.py
from array import *
filepath = '/root/Desktop/output.txt'
#indexes value
quote_index = 0
slash_index = 1
alpha_numeric = 2
#print("\"ismail")
#2D list
# -1 mean nothing
table = [[1, -1, -1], [-1, 2, -1], [-1, -1, 3], [4, 2, 3]]
path_list = [] #to store table paths
with open(filepath) as fp:
line = fp.readline()
while line:
line = fp.readline() #read line by line from file
#print(line)
if(("(No such file or directory)" in line) == False):
temp_string = ""
flag = 0
current_index_row = 0
current_index_col = 0
count = 0
special_chFlag = 0 #special character flag to redefine the state 3
for i in range(len(line)):
if(line[i] == "\""):
current_index_row = table[current_index_row][quote_index]
temp_string = temp_string + "\""
count = i + 1
#print(temp_string)
while(True):
#print(current_index_row, ":" ,current_index_col)
if(line[count] == "\""):
if(current_index_row == 4):
flag = 1
break
current_index_col = quote_index
current_index_row = table[current_index_row][current_index_col]
temp_string = temp_string + "\""
#print(temp_string)
if(current_index_row == -1):
break
elif( (line[count].isalpha() or line[count].isdigit()) and special_chFlag == 0 ):
special_chFlag = 1
# now we allow special character to come
if(current_index_row == 4):
flag = 1
break
current_index_col = alpha_numeric
current_index_row = table[current_index_row][current_index_col]
temp_string = temp_string + line[count]
#print(temp_string)
if(current_index_row == -1):
break
#redundant due to special character other than backslash because it is not allowed as it represent the directory and linux also do not allow it for rename
elif( (line[count].isalpha() or line[count].isdigit() or line[count] in "`~!@#$%^&*()_+=-[]{}|;:'\"/?.>,<") and special_chFlag == 1 ):
if(current_index_row == 4):
flag = 1
break
current_index_col = alpha_numeric
current_index_row = table[current_index_row][current_index_col]
temp_string = temp_string + line[count]
#print(temp_string)
if(current_index_row == -1):
break
elif(line[count] == "/"):
if(current_index_row == 4):
flag = 1
break
current_index_col = slash_index
current_index_row = table[current_index_row][current_index_col]
temp_string = temp_string + "/"
#print(temp_string)
if(current_index_row == -1):
break
count = count + 1
if(flag == 1):
break
if(current_index_row == 4 and flag == 1 and len(temp_string)>3 ):
#print(temp_string) #print paths
path_list.append(temp_string) #store paths in a list
print("Before Number of Paths: ", len(path_list))
print("----------------------------------------")
print("Remove Redundancy")
print("----------------------------------------")
path_list = list(set(path_list))
print("After Number of Paths: ", len(path_list))
print("----------------------------------------")
for i in range(len(path_list)):
print(path_list[i])
| 3.140625 | 3 |
tests/server/test_aiohttp.py | dapper91/pjrpc | 10 | 12768313 | import pytest
from aiohttp import web
from pjrpc import exc
from pjrpc.common import v20
from pjrpc.server.integration import aiohttp as integration
from tests.common import _
@pytest.fixture
def path():
return '/test/path'
@pytest.fixture
def json_rpc(path):
json_rpc = integration.Application(path)
return json_rpc
@pytest.mark.parametrize(
'request_id, params, result', [
(
1,
(1, 1.1, 'str', {}, False),
[1, 1.1, 'str', {}, False],
),
(
'abc',
{'int': 1, 'float': 1.1, 'str': 'str', 'dict': {}, 'bool': False},
{'int': 1, 'float': 1.1, 'str': 'str', 'dict': {}, 'bool': False},
),
],
)
async def test_request(json_rpc, path, mocker, aiohttp_client, request_id, params, result):
method_name = 'test_method'
mock = mocker.Mock(name=method_name, return_value=result)
json_rpc.dispatcher.add(mock, method_name)
cli = await aiohttp_client(json_rpc.app)
raw = await cli.post(path, json=v20.Request(method=method_name, params=params, id=request_id).to_json())
assert raw.status == 200
resp = v20.Response.from_json(await raw.json())
if isinstance(params, dict):
mock.assert_called_once_with(kwargs=params)
else:
mock.assert_called_once_with(args=params)
assert resp.id == request_id
assert resp.result == result
async def test_notify(json_rpc, path, mocker, aiohttp_client):
params = [1, 2]
method_name = 'test_method'
mock = mocker.Mock(name=method_name, return_value='result')
json_rpc.dispatcher.add(mock, method_name)
cli = await aiohttp_client(json_rpc.app)
raw = await cli.post(path, json=v20.Request(method=method_name, params=params).to_json())
assert raw.status == 200
assert raw.content_type != 'application/json'
assert await raw.read() == b''
async def test_errors(json_rpc, path, mocker, aiohttp_client):
request_id = 1
params = (1, 2)
method_name = 'test_method'
def error_method(*args, **kwargs):
raise exc.JsonRpcError(code=1, message='message')
mock = mocker.Mock(name=method_name, side_effect=error_method)
json_rpc.dispatcher.add(mock, method_name)
cli = await aiohttp_client(json_rpc.app)
# method not found
raw = await cli.post(path, json=v20.Request(method='unknown_method', params=params, id=request_id).to_json())
assert raw.status == 200
resp = v20.Response.from_json(await raw.json())
assert resp.id is request_id
assert resp.is_error is True
assert resp.error == exc.MethodNotFoundError(data="method 'unknown_method' not found")
# customer error
raw = await cli.post(path, json=v20.Request(method=method_name, params=params, id=request_id).to_json())
assert raw.status == 200
resp = v20.Response.from_json(await raw.json())
mock.assert_called_once_with(args=params)
assert resp.id == request_id
assert resp.is_error is True
assert resp.error == exc.JsonRpcError(code=1, message='message')
# content type error
raw = await cli.post(path, data='')
assert raw.status == 415
# malformed json
raw = await cli.post(path, headers={'Content-Type': 'application/json'}, data='')
assert raw.status == 200
resp = v20.Response.from_json(await raw.json())
assert resp.id is None
assert resp.is_error is True
assert resp.error == exc.ParseError(data=_)
# decoding error
raw = await cli.post(path, headers={'Content-Type': 'application/json'}, data=b'\xff')
assert raw.status == 400
async def test_context(json_rpc, path, mocker, aiohttp_client):
request_id = 1
params = (1, 2)
method_name = 'test_method'
# test list parameters
mock = mocker.Mock(name=method_name, return_value='result')
json_rpc.dispatcher.add(mock, method_name, context='request')
cli = await aiohttp_client(json_rpc.app)
raw = await cli.post(path, json=v20.Request(method=method_name, params=params, id=request_id).to_json())
assert raw.status == 200
mock.assert_called_once()
call_args = mock.call_args[1]
context, args = call_args['request'], call_args['args']
assert isinstance(context, web.Request)
assert args == params
# test dict parameters
params = {'param1': 1, 'param2': 2}
mock.reset_mock()
cli = await aiohttp_client(json_rpc.app)
raw = await cli.post(path, json=v20.Request(method=method_name, params=params, id=request_id).to_json())
assert raw.status == 200
mock.assert_called_once()
call_args = mock.call_args[1]
context, kwargs = call_args['request'], call_args['kwargs']
assert isinstance(context, web.Request)
assert kwargs == params
| 2.0625 | 2 |
share/extended-cpt/analysis/vis/run.py | ucla-pbpl/pbpl-compton | 2 | 12768314 | #!/usr/bin/env python
import os
import toml
import sys
os.system('rm -f *wrl *h5')
# print('### RUNNING GEANT4 (design.wrl) ###')
# conf = toml.load('sfqed.toml')
# A = conf['PrimaryGenerator']
# A['PythoGenerator'] = 'sfqed.pattern_spray'
# A['NumEvents'] = 100
# with open('temp.toml', 'w') as fout:
# toml.dump(conf, fout)
# os.system('pbpl-compton-mc temp.toml vis.mac > /dev/null 2>&1')
# os.system('pbpl-compton-extrude-vrml g4_00.wrl --radius=0.2 --num-points=8 --output=design.wrl')
# os.system('rm -f temp.toml g4*wrl')
print('### RUNNING GEANT4 (gamma-10MeV.wrl) ###')
conf = toml.load('sfqed.toml')
A = conf['PrimaryGenerator']
A['PythonGenerator'] = 'pbpl.compton.generators.repeater'
A['PythonGeneratorArgs'] = ['gamma', '10*MeV', '[0,0,-100*mm]', '[0,0,1]']
A['NumEvents'] = 20000
with open('temp.toml', 'w') as fout:
toml.dump(conf, fout)
os.system('pbpl-compton-mc temp.toml vis.mac')
#os.system('pbpl-compton-mc temp.toml vis.mac > /dev/null 2>&1')
os.system('pbpl-compton-extrude-vrml g4_00.wrl --radius=0.8 --num-points=8 --output=gamma-10MeV.wrl')
# os.system('rm -f temp.toml g4*wrl')
# print('### RUNNING GEANT4 (gamma-2GeV.wrl) ###')
# conf = toml.load('sfqed.toml')
# A = conf['PrimaryGenerator']
# A['PythonGenerator'] = 'pbpl.compton.generators.repeater'
# A['PythonGeneratorArgs'] = ['gamma', '2*GeV', '[0,0,-100*mm]', '[0,0,1]']
# A['NumEvents'] = 20000
# with open('temp.toml', 'w') as fout:
# toml.dump(conf, fout)
# os.system('pbpl-compton-mc temp.toml vis.mac')
# #os.system('pbpl-compton-mc temp.toml vis.mac > /dev/null 2>&1')
# os.system('pbpl-compton-extrude-vrml g4_00.wrl --radius=0.8 --num-points=8 --output=gamma-2GeV.wrl')
# os.system('rm -f temp.toml g4*wrl')
| 2.15625 | 2 |
interbotix_ros_xslocobots/examples/python_demos/joint_position_control.py | jywilson2/interbotix_ros_rovers | 13 | 12768315 | from interbotix_xs_modules.locobot import InterbotixLocobotXS
# This script commands some arbitrary positions to the arm joints
#
# To get started, open a terminal and type...
# 'roslaunch interbotix_xslocobot_control xslocobot_python.launch robot_model:=locobot_wx250s show_lidar:=true'
# Then change to this directory and type 'python joint_position_control.py'
def main():
joint_positions = [-1.0, 0.5 , 0.5, 0, -0.5, 1.57]
locobot = InterbotixLocobotXS(robot_model="locobot_wx250s", arm_model="mobile_wx250s")
locobot.arm.go_to_home_pose()
locobot.arm.set_joint_positions(joint_positions)
locobot.arm.go_to_home_pose()
locobot.arm.go_to_sleep_pose()
if __name__=='__main__':
main()
| 2.53125 | 3 |
src/bin/algos_contrib/LatentDirichletAllocation.py | xynazog/mltk-algo-contrib | 65 | 12768316 | '''
Once newer version of sklearn is used will need to change k alias from n_topics to n_components
https://stackoverflow.com/a/48121678
'''
from sklearn.decomposition import LatentDirichletAllocation as _LatentDirichletAllocation
from base import BaseAlgo, TransformerMixin
from codec import codecs_manager
from util.param_util import convert_params
class LatentDirichletAllocation(TransformerMixin, BaseAlgo):
def __init__(self, options):
self.handle_options(options)
out_params = convert_params(
options.get('params', {}),
floats=['doc_topic_prior','learning_decay','learning_offset','perp_tol','mean_change_tol'],
strs=['learning_method'],
ints=['k','max_iter','batch_size','evaluate_every','total_samples','max_doc_update_iter','n_jobs','verbose','random_state'],
aliases={'k': 'n_topics'}
)
self.estimator = _LatentDirichletAllocation(**out_params)
def rename_output(self, default_names, new_names):
if new_names is None:
new_names = 'LDA'
output_names = ['{}_{}'.format(new_names, i+1) for i in xrange(len(default_names))]
return output_names
@staticmethod
def register_codecs():
from codec.codecs import SimpleObjectCodec
codecs_manager.add_codec('algos_contrib.LatentDirichletAllocation', 'LatentDirichletAllocation', SimpleObjectCodec)
codecs_manager.add_codec('sklearn.decomposition.online_lda', 'LatentDirichletAllocation', SimpleObjectCodec)
| 1.960938 | 2 |
packit_service/service/api/webhooks.py | jscotka/packit-service | 0 | 12768317 | # MIT License
#
# Copyright (c) 2019 Red Hat, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import hmac
from hashlib import sha1
from http import HTTPStatus
from logging import getLogger
from flask import request
from flask_restplus import Namespace, Resource, fields
from packit_service.celerizer import celery_app
from packit_service.config import ServiceConfig
from packit_service.service.api.errors import ValidationFailed
logger = getLogger("packit_service")
config = ServiceConfig.get_service_config()
ns = Namespace("webhooks", description="Webhooks")
# Just to be able to specify some payload in Swagger UI
ping_payload = ns.model(
"Github webhook ping",
{
"zen": fields.String(required=False),
"hook_id": fields.String(required=False),
"hook": fields.String(required=False),
},
)
@ns.route("/github")
class GithubWebhook(Resource):
@ns.response(HTTPStatus.OK, "Webhook accepted, returning reply")
@ns.response(HTTPStatus.ACCEPTED, "Webhook accepted, request is being processed")
@ns.response(HTTPStatus.BAD_REQUEST, "Bad request data")
@ns.response(HTTPStatus.UNAUTHORIZED, "X-Hub-Signature validation failed")
# Just to be able to specify some payload in Swagger UI
@ns.expect(ping_payload)
def post(self):
"""
A webhook used by Packit-as-a-Service GitHub App.
"""
msg = request.json
if not msg:
logger.debug("/webhooks/github: we haven't received any JSON data.")
return "We haven't received any JSON data.", HTTPStatus.BAD_REQUEST
if all([msg.get("zen"), msg.get("hook_id"), msg.get("hook")]):
logger.debug(f"/webhooks/github received ping event: {msg['hook']}")
return "Pong!", HTTPStatus.OK
try:
self.validate_signature()
except ValidationFailed as exc:
logger.info(f"/webhooks/github {exc}")
return str(exc), HTTPStatus.UNAUTHORIZED
if not self.interested():
return "Thanks but we don't care about this event", HTTPStatus.ACCEPTED
# TODO: define task names at one place
celery_app.send_task(
name="task.steve_jobs.process_message", kwargs={"event": msg}
)
return "Webhook accepted. We thank you, Github.", HTTPStatus.ACCEPTED
@staticmethod
def validate_signature():
"""
https://developer.github.com/webhooks/securing/#validating-payloads-from-github
https://developer.github.com/webhooks/#delivery-headers
"""
if "X-Hub-Signature" not in request.headers:
if config.validate_webhooks:
msg = "X-Hub-Signature not in request.headers"
logger.warning(msg)
raise ValidationFailed(msg)
else:
# don't validate signatures when testing locally
logger.debug("Ain't validating signatures")
return
sig = request.headers["X-Hub-Signature"]
if not sig.startswith("sha1="):
msg = f"Digest mode in X-Hub-Signature {sig!r} is not sha1"
logger.warning(msg)
raise ValidationFailed(msg)
webhook_secret = config.webhook_secret.encode()
if not webhook_secret:
msg = "webhook_secret not specified in config"
logger.error(msg)
raise ValidationFailed(msg)
signature = sig.split("=")[1]
mac = hmac.new(webhook_secret, msg=request.get_data(), digestmod=sha1)
digest_is_valid = hmac.compare_digest(signature, mac.hexdigest())
if digest_is_valid:
logger.debug("payload signature OK.")
else:
msg = "payload signature validation failed."
logger.warning(msg)
logger.debug(f"X-Hub-Signature: {sig!r} != computed: {mac.hexdigest()}")
raise ValidationFailed(msg)
@staticmethod
def interested():
"""
Check X-GitHub-Event header for events we know we give a f...
...finely prepared response to.
:return: False if we are not interested in this kind of event
"""
uninteresting_events = {
"integration_installation",
"integration_installation_repositories",
}
event_type = request.headers.get("X-GitHub-Event")
uuid = request.headers.get("X-GitHub-Delivery")
_interested = event_type not in uninteresting_events
logger.debug(
f"{event_type} {uuid}{' (not interested)' if not _interested else ''}"
)
return _interested
| 1.625 | 2 |
core/operators.py | Aareon/Akilang | 0 | 12768318 | from collections import namedtuple
from enum import Enum, unique
from functools import lru_cache
from core.errors import ParseError
@unique
class Associativity(Enum):
UNDEFINED = 0
LEFT = 1
RIGHT = 2
BinOpInfo = namedtuple('BinOpInfo', ['precedence', 'associativity'])
BUILTIN_OP = {
'=': BinOpInfo(2, Associativity.RIGHT),
'==': BinOpInfo(10, Associativity.LEFT),
'+=': BinOpInfo(10, Associativity.LEFT),
'-=': BinOpInfo(10, Associativity.LEFT),
'!=': BinOpInfo(10, Associativity.LEFT),
'and': BinOpInfo(5, Associativity.LEFT),
'or': BinOpInfo(5, Associativity.LEFT),
'xor': BinOpInfo(5, Associativity.LEFT),
'<': BinOpInfo(10, Associativity.LEFT),
'<=': BinOpInfo(10, Associativity.LEFT),
'>': BinOpInfo(10, Associativity.LEFT),
'>=': BinOpInfo(10, Associativity.LEFT),
'+': BinOpInfo(20, Associativity.LEFT),
'-': BinOpInfo(20, Associativity.LEFT),
'*': BinOpInfo(40, Associativity.LEFT),
'/': BinOpInfo(40, Associativity.LEFT),
}
BUILTIN_UNARY_OP = {
'not',
'-'
}
UNASSIGNED = {
'!','$','%','`','^','&','|','\','
}
FALSE_BINOP_INFO = BinOpInfo(-1, Associativity.UNDEFINED)
def builtin_operators():
return sorted(BUILTIN_OP.keys())
_binop_map = dict(BUILTIN_OP)
def binop_info(tok):
kind, value, _, position = tok
try:
return _binop_map[value]
except KeyError:
from core.lexer import TokenKind, PUNCTUATORS
if kind == TokenKind.PUNCTUATOR and value not in PUNCTUATORS:
raise ParseError(f'Undefined operator: "{value}"', position)
# Return a false binop info that has no precedence
return FALSE_BINOP_INFO
def set_binop_info(op, precedence, associativity):
_binop_map[op] = BinOpInfo(precedence, associativity) | 2.765625 | 3 |
open-hackathon-server/src/hackathon/storage/local_storage.py | 99Kies/open-hackathon | 37 | 12768319 | # -*- coding: utf-8 -*-
"""
This file is covered by the LICENSING file in the root of this project.
"""
import sys
sys.path.append("..")
import os
from os.path import realpath, dirname, isfile, abspath
import json
import time
import uuid
from werkzeug.datastructures import FileStorage
from io import IOBase
from hackathon.constants import FILE_TYPE, HEALTH_STATUS, HEALTH
from hackathon.storage.storage import Storage
__all__ = ["LocalStorage"]
class LocalStorage(Storage):
"""Hackathon file storage that saves all templates on local disk
files will be save at "<src_dir>/open-hackathon-server/src/hackathon/upload/<file_type>"
"""
def save(self, context):
"""Save a file to storage
:type context: Context
:param context: the execution context of file saving
:rtype context
:return the updated context which should including the full path of saved file
"""
context = self.__generate_paths(context)
self.__save_file(context.content, context.physical_path)
self.log.debug("file saved at:" + context.physical_path)
return context
def delete(self, url):
"""Delete file from storage
:type url: str|unicode
:param url: the url of file to be deleted which are created in 'save'
:rtype bool
:return True if successfully deleted else False
"""
path = self.__convert_url_to_physical_path(url)
if isfile(path):
os.remove(path)
return True
else:
self.log.warn("try to remove dir or non-existed file")
return False
def report_health(self):
"""The status of local storage should be always True"""
return {
HEALTH.STATUS: HEALTH_STATUS.OK,
"type": "LocalStorage"
}
def __init__(self):
self.base_dir = self.__get_storage_base_dir()
@staticmethod
def __ensure_dir(file_path):
"""Make sure the directory of target file exists"""
path = dirname(file_path)
if path and not (os.path.exists(path)):
os.makedirs(path)
return path
def __save_file(self, content, path):
"""Dump file to disk
An existing file with the same name will be erased
:type content: file | dict | FileStorage
:param content: the content of file to be saved. Can be a file object or a dict
:type path: str | unicode
:param path: the file path
"""
self.__ensure_dir(path)
with open(path, 'w') as f:
if isinstance(content, dict):
json.dump(content, f)
elif isinstance(content, IOBase) or isinstance(content, FileStorage):
content.save(path)
else:
f.write(str(content))
@staticmethod
def __get_storage_base_dir():
"""Get the base directory of storage"""
return abspath("%s/.." % dirname(realpath(__file__)))
def __generate_paths(self, context):
"""Generate file new name ,physical path and uri
:type context: Context
:param context: execution context
:return updated context
"""
hackathon_name = context.hackathon_name if "hackathon_name" in context else None
# replace file_name with new random name
context.file_name = self.__generate_file_name(context.file_name, context.file_type, hackathon_name)
context.physical_path = self.__generate_physical_path(context.file_name, context.file_type)
context.url = self.__convert_physical_path_to_url(context.physical_path)
return context
def __convert_url_to_physical_path(self, url):
"""Return the physical_path according to its url
:type url: str|unicode
:param url: the absolute physical path of the file
:rtype str
:return public accessable URI
"""
# physical_path example: <base_dir>/static/upload/hack_image/hack01/20150708/win10-20140708-1234.jpg
# url example: http://localhost:15000/static/upload/hack_image/hack01/20150708/win10-20140708-1234.jpg
i = url.index("static")
path = url[i:]
return "%s/%s" % (self.base_dir, path)
def __convert_physical_path_to_url(self, physical_path):
"""Return the http URI according to physical_path
:type physical_path: str|unicode
:param physical_path: the absolute physical path of the file
:rtype str
:return public accessable URI
"""
# physical_path example: <base_dir>/static/upload/hack_image/hack01/20150708/win10-20140708-1234.jpg
# url example: http://localhost:15000/static/upload/hack_image/hack01/20150708/win10-20140708-1234.jpg
i = physical_path.index("static")
path = physical_path[i:]
return self.util.get_config("endpoint") + "/" + path
def __generate_physical_path(self, file_name, file_type, hackathon_name=None):
"""Return the physical path of file including directory and file name
files are saved at <base_dir>/static/upload/<file_type>/
:type file_name: str|unicode
:param file_name: the original file name
:type file_type: str | unicode
:param file_type: type of file which decides the directories where file is saved.
:rtype str
:return physical path of the file to be saved
"""
# <base_dir>/static/upload/<file_type>/<hackathon_name>/<date>/<file_name>
path = "%s/static/upload/%s%s/%s/%s" % (
self.base_dir,
file_type,
"/" + hackathon_name if hackathon_name else "",
time.strftime("%Y%m%d"),
file_name)
return path
@staticmethod
def __generate_file_name(origin_name, file_type, hackathon_name=None):
"""Generate a random file name if file_type is hack_image
:type origin_name: str | unicode
:param origin_name the origin name of file
:type file_type: str|unicode
:param file_type: type of file, defined by FILTE_TYPE in constants.py
:type hackathon_name: str | unicode
:param hackathon_name: name of hackathon related to this file
:rtype str
:return a random file name which includes hackathon_name and time as parts
"""
if not hackathon_name:
hackathon_name = ""
# handle uploaded images only since the uploaded file name can be very strange or contains Chinese
if file_type == FILE_TYPE.HACK_IMAGE:
extension = os.path.splitext(origin_name)[1]
new_name = "%s-%s-%s%s" % (
hackathon_name,
time.strftime("%Y%m%d"),
str(uuid.uuid1())[0:8],
extension
)
return new_name.strip('-')
else:
return origin_name
| 2.515625 | 3 |
ctf/routes/scoreboard.py | owenofengland/lCTF-Platform | 0 | 12768320 | from flask import Blueprint, render_template
from flask_login import login_required, current_user
from ctf.models.Score import Score
from ctf import db
from sys import path
path.append("..")
scoreboard = Blueprint("scoreboard", __name__)
@scoreboard.route("/scoreboard")
def scoreboard_out():
scores = Score.query.all()
scores.sort(key=lambda x: x.score, reverse=True)
return render_template("scoreboard.html", scores=scores)
| 2.109375 | 2 |
flatpak-manager-steamos/flatpakmanager_steamos/gui.py | sharkwouter/flatpak-manager | 1 | 12768321 | import pygame
import pygameMenu
import flatpakmanager_steamos
import pyflatpak
class gui():
def __init__(self, window_width, window_height, title):
self.window_width = window_width
self.window_height = window_height
self.title = title
self.framerate = 30
self.running = False
self.menu_available_page = 1
self.menu_installed_page = 1
self.__init_pygame()
self.__init_joysticks()
self.__flatpak_manager = pyflatpak.manager()
self.__init_menu()
def __init_pygame(self):
pygame.init()
self.window = pygame.display.set_mode((self.window_width, self.window_height))
pygame.display.set_caption(self.title)
self.clock = pygame.time.Clock()
self.__draw_splash_screen()
def __init_joysticks(self):
pygame.joystick.init()
joysticks = [pygame.joystick.Joystick(x) for x in range(pygame.joystick.get_count())]
for joystick in joysticks:
joystick.init()
def __init_menu(self):
self.menu_available = self.__generate_available_application_menu()
#self.menu_installed = self.__generate_installed_application_menu()
self.menu_main = self._create_menu(self.title)
self.menu_main.add_option("Available Software", self.menu_available)
#self.menu_main.add_option("Installed Software", self.menu_installed)
self.menu_main.add_option("Exit", pygameMenu.events.EXIT)
def __draw_background(self):
self.window.fill(flatpakmanager_steamos.color.background)
def __draw_splash_screen(self):
self.__draw_background()
# draw logo
logo = pygame.image.load(flatpakmanager_steamos.config.logo)
logo_x = self.window_width / 2 - logo.get_width() / 2
logo_rect = pygame.Rect(logo_x, 0, logo.get_width(), logo.get_height())
self.window.blit(logo, logo_rect)
# draw title
font = pygame.font.Font(flatpakmanager_steamos.config.font, 64)
text = font.render(self.title, False, flatpakmanager_steamos.color.text_title)
text_rectangle = text.get_rect()
text_rectangle.center = (self.window_width / 2, self.window_height - 64)
self.window.blit(text, text_rectangle)
pygame.display.update()
def __draw_load_screen(self, title):
self.__draw_background()
# draw title
font = pygame.font.Font(flatpakmanager_steamos.config.font, 64)
text = font.render(title, False, flatpakmanager_steamos.color.text_title)
text_rectangle = text.get_rect()
text_rectangle.center = (self.window_width / 2, self.window_height / 2)
self.window.blit(text, text_rectangle)
pygame.display.update()
def __generate_available_application_menu(self, label=None, page=None):
application_list = self.__flatpak_manager.applications_available
# Create menu
menu = self._create_menu("Available applications")
# Change the page and make sure we're on an existing page
if page > 1:
print(page)
self.menu_available_page = page
# add page changer
page_list = []
last_page = len(application_list) / flatpakmanager_steamos.config.applications_per_page + 1
for number in range(1, last_page + 1):
page_list.append(("Page {}/{}".format(number, last_page), number))
menu.add_selector("", page_list, onchange=self.__generate_available_application_menu,
selector_id='page_selector{}'.format(self.menu_available_page))
# add application buttons to menu
page_content = self.__get_page(application_list, self.menu_available_page)
for application in page_content:
menu.add_option(str(application), pygameMenu.events.BACK)
return menu
def _create_menu(self, title):
return pygameMenu.Menu(self.window, self.window_width, self.window_height,
flatpakmanager_steamos.config.font, self.title,
dopause=False,
menu_width=self.window_width,
menu_height=self.window_height,
menu_color=flatpakmanager_steamos.color.background,
menu_color_title=flatpakmanager_steamos.color.title,
color_selected=flatpakmanager_steamos.color.selected,
menu_alpha=100
)
def __get_page(self, application_list, page):
output = []
first_index = flatpakmanager_steamos.config.applications_per_page * (page - 1)
last_index = first_index + flatpakmanager_steamos.config.applications_per_page
for index in range(first_index, last_index):
if not index < len(application_list):
break
output.append(application_list[index])
print(output)
return output
def __read_input(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
def run(self):
self.running = True
while self.running:
self.clock.tick(flatpakmanager_steamos.config.framerate)
self.menu_main.mainloop()
pygame.display.update()
def stop(self):
self.running = False
| 2.703125 | 3 |
karsender/tests.py | smirnoffs/karsender | 0 | 12768322 | # -*- coding: utf-8 -*-
from karsender.database import get_collection
from karsender.services import validate_emails
__author__ = '<NAME> <<EMAIL>>'
from unittest import TestCase
class TestServices(TestCase):
def test_validate_emails(self):
validate_emails() | 2.140625 | 2 |
mechroutines/models/ene.py | lpratalimaffei/moldriver | 1 | 12768323 | """ calculates certain quantities of interest using MESS+filesytem
"""
import os
import autofile
from mechanalyzer.inf import rxn as rinfo
from mechanalyzer.inf import spc as sinfo
from mechanalyzer.inf import thy as tinfo
from mechlib.amech_io import printer as ioprinter
from mechroutines.models import typ
from mechroutines.models import _vib as vib
# Functions to hand reading and formatting energies of single species
def read_energy(spc_dct_i, pf_filesystems,
spc_model_dct_i, run_prefix,
read_ene=True, read_zpe=True, conf=None, saddle=False):
""" Get the energy for a species on a channel
"""
# Read the electronic energy and ZPVE
e_elec = None
if read_ene:
e_elec = electronic_energy(
spc_dct_i, pf_filesystems, spc_model_dct_i, conf=conf)
# ioprinter.debug_message('e_elec in models ene ', e_elec)
e_zpe = None
if read_zpe:
e_zpe = zero_point_energy(
spc_dct_i, pf_filesystems, spc_model_dct_i,
run_prefix, saddle=saddle)
# ioprinter.debug_message('zpe in models ene ', e_zpe)
# Return the total energy requested
ene = None
if read_ene and read_zpe:
if e_elec is not None and e_zpe is not None:
ene = e_elec + e_zpe
elif read_ene and not read_zpe:
ene = e_elec
elif read_ene and not read_zpe:
ene = e_zpe
return ene
def electronic_energy(spc_dct_i, pf_filesystems, spc_model_dct_i, conf=None):
""" get high level energy at low level optimized geometry
"""
ioprinter.info_message('- Calculating electronic energy')
# spc_dct_i = spc_dct[spc_name]
rxn_info = spc_dct_i.get('rxn_info', None)
if rxn_info is not None:
spc_info = rinfo.ts_info(rxn_info)
else:
spc_info = sinfo.from_dct(spc_dct_i)
# Get the harmonic filesys information
if conf:
cnf_path = conf[1]
else:
[_, cnf_path, _, _, _] = pf_filesystems['harm']
# Get the electronic energy levels
ene_levels = tuple(val[1] for key, val in spc_model_dct_i['ene'].items()
if 'lvl' in key)
# Read the energies from the filesystem
e_elec = None
if os.path.exists(cnf_path):
e_elec = 0.0
# ioprinter.info_message('lvls', ene_levels)
for (coeff, level) in ene_levels:
# Build SP filesys
mod_thy_info = tinfo.modify_orb_label(level, spc_info)
sp_save_fs = autofile.fs.single_point(cnf_path)
sp_save_fs[-1].create(mod_thy_info[1:4])
# Read the energy
sp_path = sp_save_fs[-1].path(mod_thy_info[1:4])
if os.path.exists(sp_path):
ioprinter.reading('Energy', sp_path)
ene = sp_save_fs[-1].file.energy.read(mod_thy_info[1:4])
e_elec += (coeff * ene)
else:
ioprinter.warning_message('No energy at path')
e_elec = None
break
else:
ioprinter.warning_message('No conformer to calculate the energy')
return e_elec
def zero_point_energy(spc_dct_i,
pf_filesystems, spc_model_dct_i,
run_prefix, saddle=False):
""" compute the ZPE including torsional and anharmonic corrections
"""
ioprinter.info_message('- Calculating zero-point energy')
# Calculate ZPVE
is_atom = False
if not saddle:
if typ.is_atom(spc_dct_i):
is_atom = True
if is_atom:
zpe = 0.0
else:
_, _, zpe, _ = vib.vib_analysis(
spc_dct_i, pf_filesystems, spc_model_dct_i,
run_prefix, zrxn=(None if not saddle else 'placeholder'))
return zpe
def rpath_ref_idx(ts_dct, scn_vals, coord_name, scn_prefix,
ene_info1, ene_info2):
""" Get the reference energy along a reaction path
"""
# Set up the filesystem
zma_fs = autofile.fs.zmatrix(scn_prefix)
zma_path = zma_fs[-1].path([0])
scn_fs = autofile.fs.scan(zma_path)
ene_info1 = ene_info1[1][0][1]
ene_info2 = ene_info2[0]
ioprinter.debug_message('mod_eneinf1', ene_info1)
ioprinter.debug_message('mod_eneinf2', ene_info2)
mod_ene_info1 = tinfo.modify_orb_label(
sinfo.from_dct(ts_dct), ene_info1)
mod_ene_info2 = tinfo.modify_orb_label(
sinfo.from_dct(ts_dct), ene_info2)
ene1, ene2, ref_val = None, None, None
for val in reversed(scn_vals):
locs = [[coord_name], [val]]
path = scn_fs[-1].path(locs)
hs_fs = autofile.fs.high_spin(path)
if hs_fs[-1].file.energy.exists(mod_ene_info1[1:4]):
ene1 = hs_fs[-1].file.energy.read(mod_ene_info1[1:4])
if hs_fs[-1].file.energy.exists(mod_ene_info2[1:4]):
ene2 = hs_fs[-1].file.energy.read(mod_ene_info2[1:4])
if ene1 is not None and ene2 is not None:
ref_val = val
break
if ref_val is not None:
scn_idx = scn_vals.index(ref_val)
return scn_idx, ene1, ene2
| 2.3125 | 2 |
17/main.py | nybbles/advent-of-code-2021 | 0 | 12768324 | <gh_stars>0
#! python
from enum import Enum
import re
from itertools import count
from collections import namedtuple
from more_itertools import ilen
input = "target area: x=20..30, y=-10..-5"
input = open("17/input.txt", "r").read()
# Split the problem up into x and y coordinates separately.
# x coordinate: x = (dx (dx + 1)) / 2
# At each x within the target, the trajectory that maximizes y is the one that
# has smallest dx. If there were a trajectory with larger dx, then y could have
# been made higher by aiming the probe higher.
# Compute smallest dx for each x in the target area. This is independent of y.
# There will be some set of initial dx where the probe ends up in the target
# area and is slow (in fact, possibly zero somewhere).
# Then for each x in the target area, for its smallest dx, find the highest y
# trajectory. I'm not sure how to do that though.
# Each x in the target area has some number of time steps it takes to get there
# (where we are minimizing the final dx, as above). The number of time steps can
# be used to solve for y. Need to find some initial dy such that after that
# number of time steps, the final y is within the target.
# y = (dy * n) - (n * (n-1)) / 2
def parse_input(input):
match = re.search(
"target area: x=([-0-9]+)..([-0-9]+), y=([-0-9]+)..([-0-9]+)", input)
if not match:
raise Exception("Unable to parse input")
return (int(match.group(1)), int(match.group(2))), (int(match.group(3)),
int(match.group(4)))
def cumsum_iter(start):
acc = 0
for i in count(start):
acc = i + acc
yield acc
def cumsum(n):
assert (n >= 0)
return int(n * (n + 1) / 2)
def determine_dx(target_x):
x0, x1 = target_x
initial_dx = None
x_stalled = None
result = []
for i, x in enumerate(cumsum_iter(1)):
if x >= x0:
initial_dx = i + 1
x_stalled = x
break
for x in range(x0, min(x1, x_stalled) + 1):
dx = x_stalled - x
result.append((x, initial_dx, dx))
if x1 <= x_stalled:
return result
final_x = x_stalled
while True:
assert (final_x < x1)
x_stalled = final_x + 1
initial_dx = initial_dx + 1
final_x = cumsum(initial_dx)
for x in range(x_stalled, min(x1, final_x) + 1):
dx = final_x - x
result.append((x, initial_dx, dx))
if x1 <= final_x:
return result
def get_ntimesteps(initial_dx, dx):
# The number of timesteps passed can be computed from the starting speed and
# the ending speed, since speed decreases by 1 on each timestep.
return initial_dx - dx
def dy_to_maxy(dy):
if dy <= 0:
return dy
return cumsum(dy)
def determine_maxy_trajectory(initial_dx, dx, target_y):
# TODO: Need special handling for dx = 0, because they can use more timesteps
# y = (dy * n) - (n * (n-1)) / 2
# y = dy * n - constant0
# y + constant0 = dy * constant1
# dy = (y + constant0) / constant1
# if dx = 0, then timesteps >= n.
# Assumption: Trajectories where dx=0, but dy>0 are not valid.
# cumsum(n) to get to the apex, then cumsum(m) to descend
y1, y0 = target_y
assert (y0 >= y1)
n = get_ntimesteps(initial_dx, dx)
if (dx == 0):
# print(initial_dx, dx, n)
# TODO: Start from smallest, then go to largest, until no longer in target?
def check_if_vertical_descent_hits_target(max_y, target_y):
y1, y0 = target_y
assert (max_y >= y0)
ys_on_descent = map(lambda i: max_y - i, cumsum_iter(0))
for final_y in ys_on_descent:
if final_y < y1:
return False, None
if final_y <= y0:
return True, final_y
best_result = None
for initial_dy in count(0):
max_y = cumsum(initial_dy)
hits_target, final_y = check_if_vertical_descent_hits_target(
max_y, target_y)
if not hits_target:
return best_result
# print("Found better dx=0 trajectory", final_y, initial_dy, max_y)
if best_result is None or best_result[2] < max_y:
best_result = final_y, initial_dy, max_y
raise Exception("Not expected to get here")
else:
dys = []
for y in range(y1, y0 + 1):
dy = (y + cumsum(n - 1)) / n
if not dy.is_integer():
continue
dys.append((y, int(dy)))
if len(dys) == 0:
return None
best_dy = max(dys, key=lambda x: x[1])
return best_dy[0], best_dy[1], dy_to_maxy(best_dy[1])
target_x, target_y = parse_input(input)
print(target_x, target_y)
# print(determine_dx(target_x))
# raise Exception("WTF")
on_target_trajectories = []
for trajectory in determine_dx(target_x):
x, initial_dx, dx = trajectory
result = determine_maxy_trajectory(initial_dx, dx, target_y)
if result is not None:
y, initial_dy, max_y = result
on_target_trajectories.append((initial_dx, initial_dy, x, y, max_y))
on_target_trajectories.sort(key=lambda x: x[-1], reverse=True)
# print(on_target_trajectories[0])
print(on_target_trajectories)
TrajectoryElement = namedtuple('TrajectoryElement',
['dx', 'dy', 'x', 'y', 'max_y', 'status'])
TrajectoryStatus = Enum('TrajectoryStatus',
'IN_TRANSIT IN_TARGET OVERSHOT_TARGET')
def run_trajectory(target_x, target_y, initial_dx, initial_dy):
x, y, max_y = 0, 0, 0
dx = initial_dx
dy = initial_dy
trajectory = []
while True:
x0, x1 = target_x
y1, y0 = target_y
status = None
if x >= x0 and x <= x1 and y <= y0 and y >= y1:
status = TrajectoryStatus.IN_TARGET
elif x > x1 or y < y1:
status = TrajectoryStatus.OVERSHOT_TARGET
else:
status = TrajectoryStatus.IN_TRANSIT
trajectory_element = TrajectoryElement(dx, dy, x, y, max_y, status)
trajectory.append(trajectory_element)
match trajectory_element.status:
case TrajectoryStatus.IN_TARGET | TrajectoryStatus.OVERSHOT_TARGET:
return trajectory
case TrajectoryStatus.IN_TRANSIT:
x = x + dx
y = y + dy
max_y = max(y, max_y)
dx = max(dx - 1, 0)
dy = dy - 1
y1, y0 = target_y
initial_dy = abs(y1) - 1
ans = cumsum(initial_dy)
print(ans)
def on_target_initial_velocities(target_x, target_y):
x0, x1 = target_x
y1, y0 = target_y
max_initial_dx = x1
min_initial_dx = 1
max_initial_dy = abs(y1) - 1 # from previous solution
min_initial_dy = y1
for dx in range(min_initial_dx, max_initial_dx + 1):
for dy in range(min_initial_dy, max_initial_dy + 1):
trajectory = run_trajectory(target_x, target_y, dx, dy)
end_point = trajectory[-1]
assert(end_point.status != TrajectoryStatus.IN_TRANSIT)
if end_point.status == TrajectoryStatus.IN_TARGET:
yield (dx, dy)
print(ilen(on_target_initial_velocities(target_x, target_y)))
| 3.1875 | 3 |
_archive/reference/pi_server.py | ranstotz/surveillance_system | 0 | 12768325 | import cv2
import numpy as np
import zmq
import base64
import time
class serverViewer(object):
def __init__(self, ip, port):
self.ip = ip
self.port = port
self.footage_socket = ""
print "Initiallizing context and socket."
self.context = zmq.Context()
self.footage_socket = self.context.socket(zmq.SUB)
print "Context and socket initialized. \nBinding to port."
self.footage_socket.bind('tcp://*:5050')
self.footage_socket.setsockopt_string(zmq.SUBSCRIBE, np.unicode(''))
print "Port initialized.\n"
def capture_stream(self):
connectionFlag = False
while True:
try:
frame = self.footage_socket.recv_string()
if connectionFlag == False:
print "Connection made. Now streaming.\n"
connectionFlag = True
img = base64.b64decode(frame)
npimg = np.fromstring(img, dtype=np.uint8)
source = cv2.imdecode(npimg, 1)
#cv2.imshow("Stream", source)
#cv2.waitKey(1)
print "captured data"
except KeyboardInterrupt:
cv2.destroyAllWindows()
break
def get_frame(self):
try:
self.frames = open("stream.jpg", 'wb+')
frame = self.footage_socket.recv_string()
img = base64.b64decode(frame)
npimg = np.fromstring(img, dtype=np.uint8)
source = cv2.imdecode(npimg, 1)
cv2.imwrite("stream.jpg", source)
return self.frames.read()
except:
pass
| 2.609375 | 3 |
questions/scripts/seed_questions_v1.py | nyctophiliacme/edtech-backend | 0 | 12768326 | <reponame>nyctophiliacme/edtech-backend
import csv
import django
import os
import sys
sys.path.append("/home/ubuntu/edtech-backend")
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'superteacher.settings')
django.setup()
from questions.views import QuestionPostView
file_path = '/home/ubuntu/edtech-backend/questions/questions_csvs/' + sys.argv[1]
row_number = 1
with open(file_path) as csv_file:
csv_reader = csv.DictReader(csv_file, delimiter=',')
line_count = 0
for row in csv_reader:
question_dict = {
'question_text': row['question_text'],
'question_img_url': row['question_img_url'],
'difficulty_level': row['difficulty_level'],
'explanation': row['explanation'],
'explanation_img_url': row['explanation_img_url'],
'chapter_id': row['chapter_id'],
'question_choice_1': row['question_choice_1'],
'question_choice_2': row['question_choice_2'],
'question_choice_3': row['question_choice_3'],
'question_choice_4': row['question_choice_4'],
'correct_choice': row['correct_choice'],
'question_choice_1_image_url': row['question_choice_1_image_url'],
'question_choice_2_image_url': row['question_choice_2_image_url'],
'question_choice_3_image_url': row['question_choice_3_image_url'],
'question_choice_4_image_url': row['question_choice_4_image_url'],
}
response = QuestionPostView.save_question_data(data=question_dict)
if response.status_code == 400:
print("Error processing row number: ", row_number)
row_number += 1
| 2.484375 | 2 |
cursos_complementarios/estructuras_datos_lineales_python/modulo_II_arrays/utils/array.py | EdinsonRequena/articicial-inteligence-and-data-science | 30 | 12768327 | """
Tema: Arrays
Curso: Estructura de Datos Lineales (Python).
Plataforma: Platzi.
Profesor: <NAME>.
Alumno: @edinsonrequena.
"""
class Array(object):
"""A simple array"""
def __init__(self, capacity: int, fill_value=None) -> None:
self.items = list()
for i in range(capacity):
self.items.append(fill_value)
def __len__(self) -> int:
"""
Method to Know the array's lenght
"""
count = 0
for i in self.items:
count += 1
return count
def __str__(self) -> str:
"""Returns string representation of the array"""
return str(self.items)
def __iter__(self):
"""
Method to iter the array
"""
current = 0
while current < len(self.items):
yield current
current += 1
def __getitem__(self, index: any) -> any:
"""
returns a specific index
"""
return self.items[index]
def __setitem__(self, index, new_item):
"""
set item in a specific index
"""
self.items[index] = new_item
return self.items
def __fillslots__(self):
"""
return a sequence of numbers according to the array's length
"""
slots = self.items
for i in range(len(slots)):
slots[i] = i + 1
return slots
def __sumlements__(self) -> list or None:
"""
return the sum of all array's elements if and only if the elements are integers
"""
arr = self.items
try:
for i in range(len(arr)):
if type(arr[i]) != int:
raise TypeError('Solo se pueden sumar enteros')
return sum(arr)
except TypeError as e:
print(e)
def __add__(self, index, item):
"""
returns the array with de new element
"""
arr = self.items
return arr[:index] + [item] + arr[index:]
def __append__(self, item):
"""
returns the array with de new element at the end
"""
arr = self.items
return arr[:] + [item]
def __pop__(self, index):
"""
returns the array without the select element
"""
arr = self.items
arr.pop()
return arr
| 4.3125 | 4 |
tests/test_init.py | exxamalte/python-georss-client | 0 | 12768328 | """Tests for base classes."""
import datetime
import unittest
from unittest import mock
from unittest.mock import MagicMock
import requests
from georss_client import (
UPDATE_ERROR,
UPDATE_OK,
FeedEntry,
GeoRssDistanceHelper,
GeoRssFeed,
)
from georss_client.xml_parser.geometry import Point, Polygon
from tests import MockGeoRssFeed
from tests.utils import load_fixture
HOME_COORDINATES_1 = (-31.0, 151.0)
HOME_COORDINATES_2 = (-37.0, 150.0)
class TestGeoRssFeed(unittest.TestCase):
@mock.patch("requests.Request")
@mock.patch("requests.Session")
def test_update_ok(self, mock_session, mock_request):
"""Test updating feed is ok."""
mock_session.return_value.__enter__.return_value.send.return_value.ok = True
mock_session.return_value.__enter__.return_value.send.return_value.text = (
load_fixture("generic_feed_1.xml")
)
feed = MockGeoRssFeed(HOME_COORDINATES_1, None)
assert (
repr(feed) == "<MockGeoRssFeed(home=(-31.0, 151.0), "
"url=None, radius=None, categories=None)>"
)
status, entries = feed.update()
assert status == UPDATE_OK
self.assertIsNotNone(entries)
assert len(entries) == 5
feed_entry = entries[0]
assert feed_entry.title == "Title 1"
assert feed_entry.external_id == "1234"
assert feed_entry.category == "Category 1"
assert feed_entry.published == datetime.datetime(2018, 9, 23, 8, 30)
assert feed_entry.updated == datetime.datetime(2018, 9, 23, 8, 35)
assert feed_entry.coordinates == (-37.2345, 149.1234)
self.assertAlmostEqual(feed_entry.distance_to_home, 714.4, 1)
feed_entry = entries[1]
assert feed_entry.title == "Title 2"
assert feed_entry.external_id == "2345"
self.assertIsNone(feed_entry.attribution)
assert repr(feed_entry) == "<FeedEntry(id=2345)>"
feed_entry = entries[2]
assert feed_entry.title == "Title 3"
assert feed_entry.external_id == "Title 3"
feed_entry = entries[3]
self.assertIsNone(feed_entry.title)
assert feed_entry.external_id == hash(feed_entry.coordinates)
feed_entry = entries[4]
assert feed_entry.title == "Title 5"
assert feed_entry.external_id == "5678"
@mock.patch("requests.Request")
@mock.patch("requests.Session")
def test_update_ok_feed_2(self, mock_session, mock_request):
"""Test updating feed is ok."""
mock_session.return_value.__enter__.return_value.send.return_value.ok = True
mock_session.return_value.__enter__.return_value.send.return_value.text = (
load_fixture("generic_feed_2.xml")
)
feed = MockGeoRssFeed(HOME_COORDINATES_1, None)
status, entries = feed.update()
assert status == UPDATE_OK
self.assertIsNotNone(entries)
assert len(entries) == 1
feed_entry = entries[0]
assert feed_entry.title == "Title 1"
assert feed_entry.external_id == "1234"
assert feed_entry.category == "Category 1"
assert feed_entry.coordinates == (-37.2345, 149.1234)
self.assertAlmostEqual(feed_entry.distance_to_home, 714.4, 1)
@mock.patch("requests.Request")
@mock.patch("requests.Session")
def test_update_ok_feed_3(self, mock_session, mock_request):
"""Test updating feed is ok."""
mock_session.return_value.__enter__.return_value.send.return_value.ok = True
mock_session.return_value.__enter__.return_value.send.return_value.text = (
load_fixture("generic_feed_3.xml")
)
feed = MockGeoRssFeed(HOME_COORDINATES_1, None)
status, entries = feed.update()
assert status == UPDATE_OK
self.assertIsNotNone(entries)
assert len(entries) == 3
feed_entry = entries[0]
assert feed_entry.external_id == "1234"
assert feed_entry.coordinates == (-34.93728111547821, 148.59710883878262)
self.assertAlmostEqual(feed_entry.distance_to_home, 491.7, 1)
feed_entry = entries[1]
assert feed_entry.external_id == "2345"
assert feed_entry.coordinates == (-34.937170989, 148.597182317)
self.assertAlmostEqual(feed_entry.distance_to_home, 491.8, 1)
feed_entry = entries[2]
assert feed_entry.external_id == "3456"
assert feed_entry.coordinates == (-29.962746645660683, 152.43090880416074)
self.assertAlmostEqual(feed_entry.distance_to_home, 176.5, 1)
@mock.patch("requests.Request")
@mock.patch("requests.Session")
def test_update_ok_feed_6(self, mock_session, mock_request):
"""Test updating feed is ok."""
mock_session.return_value.__enter__.return_value.send.return_value.ok = True
mock_session.return_value.__enter__.return_value.send.return_value.text = (
load_fixture("generic_feed_6.xml")
)
feed = MockGeoRssFeed(HOME_COORDINATES_1, None)
status, entries = feed.update()
assert status == UPDATE_OK
self.assertIsNotNone(entries)
assert len(entries) == 1
feed_entry = entries[0]
assert feed_entry.title == "Title 1"
assert feed_entry.external_id == "1234"
assert feed_entry.category == "Category 1"
assert feed_entry.coordinates == (-37.2345, 149.1234)
self.assertAlmostEqual(feed_entry.distance_to_home, 714.4, 1)
@mock.patch("requests.Request")
@mock.patch("requests.Session")
def test_update_ok_with_radius_filtering(self, mock_session, mock_request):
"""Test updating feed is ok."""
mock_session.return_value.__enter__.return_value.send.return_value.ok = True
mock_session.return_value.__enter__.return_value.send.return_value.text = (
load_fixture("generic_feed_1.xml")
)
feed = MockGeoRssFeed(HOME_COORDINATES_2, None, filter_radius=90.0)
status, entries = feed.update()
assert status == UPDATE_OK
self.assertIsNotNone(entries)
assert len(entries) == 4
self.assertAlmostEqual(entries[0].distance_to_home, 82.0, 1)
self.assertAlmostEqual(entries[1].distance_to_home, 77.0, 1)
self.assertAlmostEqual(entries[2].distance_to_home, 84.6, 1)
@mock.patch("requests.Request")
@mock.patch("requests.Session")
def test_update_ok_with_radius_and_category_filtering(
self, mock_session, mock_request
):
"""Test updating feed is ok."""
mock_session.return_value.__enter__.return_value.send.return_value.ok = True
mock_session.return_value.__enter__.return_value.send.return_value.text = (
load_fixture("generic_feed_1.xml")
)
feed = MockGeoRssFeed(
HOME_COORDINATES_2,
None,
filter_radius=90.0,
filter_categories=["Category 2"],
)
status, entries = feed.update()
assert status == UPDATE_OK
self.assertIsNotNone(entries)
assert len(entries) == 1
self.assertAlmostEqual(entries[0].distance_to_home, 77.0, 1)
feed = MockGeoRssFeed(
HOME_COORDINATES_2,
None,
filter_radius=90.0,
filter_categories=["Category 4"],
)
status, entries = feed.update()
assert status == UPDATE_OK
self.assertIsNotNone(entries)
assert len(entries) == 0
@mock.patch("requests.Request")
@mock.patch("requests.Session")
def test_update_error(self, mock_session, mock_request):
"""Test updating feed results in error."""
mock_session.return_value.__enter__.return_value.send.return_value.ok = False
feed = MockGeoRssFeed(HOME_COORDINATES_1, None)
status, entries = feed.update()
assert status == UPDATE_ERROR
@mock.patch("requests.Request")
@mock.patch("requests.Session")
def test_update_with_request_exception(self, mock_session, mock_request):
"""Test updating feed raises exception."""
mock_session.return_value.__enter__.return_value.send.side_effect = (
requests.exceptions.RequestException
)
feed = GeoRssFeed(HOME_COORDINATES_1, None)
status, entries = feed.update()
assert status == UPDATE_ERROR
self.assertIsNone(entries)
@mock.patch("requests.Request")
@mock.patch("requests.Session")
def test_update_bom(self, mock_session, mock_request):
"""Test updating feed with BOM (byte order mark) is ok."""
mock_session.return_value.__enter__.return_value.send.return_value.ok = True
mock_session.return_value.__enter__.return_value.send.return_value.text = (
load_fixture("xml_parser_bom_1.xml")
)
feed = MockGeoRssFeed(HOME_COORDINATES_1, None)
assert (
repr(feed) == "<MockGeoRssFeed(home=(-31.0, 151.0), "
"url=None, radius=None, categories=None)>"
)
status, entries = feed.update()
assert status == UPDATE_OK
self.assertIsNotNone(entries)
assert len(entries) == 0
class TestGeoRssDistanceHelper(unittest.TestCase):
"""Tests for the GeoJSON distance helper."""
def test_extract_coordinates_from_point(self):
"""Test extracting coordinates from point."""
mock_point = Point(-30.0, 151.0)
latitude, longitude = GeoRssDistanceHelper.extract_coordinates(mock_point)
assert latitude == -30.0
assert longitude == 151.0
def test_extract_coordinates_from_polygon(self):
"""Test extracting coordinates from polygon."""
mock_polygon = Polygon(
[
Point(-30.0, 151.0),
Point(-30.0, 151.5),
Point(-30.5, 151.5),
Point(-30.5, 151.0),
Point(-30.0, 151.0),
]
)
latitude, longitude = GeoRssDistanceHelper.extract_coordinates(mock_polygon)
self.assertAlmostEqual(latitude, -30.2, 1)
self.assertAlmostEqual(longitude, 151.2, 1)
def test_extract_coordinates_from_unsupported_geometry(self):
"""Test extracting coordinates from unsupported geometry."""
mock_unsupported_geometry = MagicMock()
latitude, longitude = GeoRssDistanceHelper.extract_coordinates(
mock_unsupported_geometry
)
self.assertIsNone(latitude)
self.assertIsNone(longitude)
def test_distance_to_point(self):
"""Test calculating distance to point."""
home_coordinates = [-31.0, 150.0]
mock_point = Point(-30.0, 151.0)
distance = GeoRssDistanceHelper.distance_to_geometry(
home_coordinates, mock_point
)
self.assertAlmostEqual(distance, 146.8, 1)
def test_distance_to_polygon(self):
"""Test calculating distance to point."""
home_coordinates = [-31.0, 150.0]
mock_polygon = Polygon(
[
Point(-30.0, 151.0),
Point(-30.0, 151.5),
Point(-30.5, 151.5),
Point(-30.5, 151.0),
Point(-30.0, 151.0),
]
)
distance = GeoRssDistanceHelper.distance_to_geometry(
home_coordinates, mock_polygon
)
self.assertAlmostEqual(distance, 110.6, 1)
def test_distance_to_unsupported_geometry(self):
"""Test calculating distance to unsupported geometry."""
home_coordinates = [-31.0, 150.0]
mock_unsupported_geometry = MagicMock()
distance = GeoRssDistanceHelper.distance_to_geometry(
home_coordinates, mock_unsupported_geometry
)
assert distance == float("inf")
class TestFeedEntry(unittest.TestCase):
def test_simple_feed_entry(self):
"""Test feed entry behaviour."""
feed_entry = FeedEntry(None, None)
assert repr(feed_entry) == "<FeedEntry(id=None)>"
self.assertIsNone(feed_entry.geometry)
self.assertIsNone(feed_entry.coordinates)
self.assertIsNone(feed_entry.title)
self.assertIsNone(feed_entry.category)
self.assertIsNone(feed_entry.attribution)
self.assertIsNone(feed_entry.description)
self.assertIsNone(feed_entry.published)
self.assertIsNone(feed_entry.updated)
self.assertIsNone(
feed_entry._search_in_external_id(r"External ID (?P<custom_attribute>.+)$")
)
self.assertIsNone(
feed_entry._search_in_title(r"Title (?P<custom_attribute>.+)$")
)
self.assertIsNone(
feed_entry._search_in_description(r"Description (?P<custom_attribute>.+)$")
)
def test_feed_entry_search_in_attributes(self):
"""Test feed entry behaviour."""
rss_entry = mock.MagicMock()
type(rss_entry).guid = mock.PropertyMock(return_value="Test 123")
type(rss_entry).title = mock.PropertyMock(return_value="Title 123")
type(rss_entry).description = mock.PropertyMock(return_value="Description 123")
type(rss_entry).category = mock.PropertyMock(
return_value=["Category 1", "Category 2"]
)
updated = datetime.datetime(2019, 4, 1, 8, 30, tzinfo=datetime.timezone.utc)
type(rss_entry).updated_date = mock.PropertyMock(return_value=updated)
feed_entry = FeedEntry(None, rss_entry)
assert repr(feed_entry) == "<FeedEntry(id=Test 123)>"
assert (
feed_entry._search_in_external_id(r"Test (?P<custom_attribute>.+)$")
== "123"
)
assert feed_entry._search_in_title(r"Title (?P<custom_attribute>.+)$") == "123"
assert (
feed_entry._search_in_description(r"Description (?P<custom_attribute>.+)$")
== "123"
)
assert feed_entry.category == "Category 1"
assert feed_entry.description == "Description 123"
assert feed_entry.updated == updated
| 2.65625 | 3 |
agda/agda/settings/test.py | NBISweden/agda | 1 | 12768329 | from local import * # noqa
########## IN-MEMORY TEST DATABASE
#DATABASES = {
# "default": {
# "ENGINE": "django.db.backends.sqlite3",
# "NAME": ":memory:",
# "USER": "",
# "PASSWORD": "",
# "HOST": "",
# "PORT": "",
# },
#}
LOGGING = {}
| 1.484375 | 1 |
setup.py | josiahls/fast-reinforcement-learning | 42 | 12768330 | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
VERSION = "1.0.1"
setup(name='fast_rl',
version=VERSION,
description='Fastai for computer vision and tabular learning has been amazing. One would wish that this would '
'be the same for RL. The purpose of this repo is to have a framework that is as easy as possible to '
'start, but also designed for testing new agents. ',
url='https://github.com/josiahls/fast-reinforcement-learning',
author='<NAME>',
author_email='<EMAIL>',
python_requires='>=3.6',
long_description=long_description,
long_description_content_type="text/markdown",
license='',
packages=find_packages(),
zip_safe=False,
install_requires=['fastai>=1.0.59', 'gym[box2d, atari]', 'jupyter'],
extras_require={'all': [
'gym-minigrid',
'moviepy'
# 'gym_maze @ git+https://github.com/MattChanTK/gym-maze.git',
# 'pybullet-gym @ git+https://github.com/benelot/pybullet-gym.git'
]},
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
],
)
| 1.304688 | 1 |
FamilyPropertyMS/util/ResponseCode.py | 520MianXiangDuiXiang520/FamilyPropertyManageSystem | 7 | 12768331 | CODE = {
200: {'code': 200, 'msg': "ok"},
201: {'code': 201, 'msg': "no data"},
400: {'code': 400, 'msg': "Bad Request"}, # 请求错误
401: {'code': 401, 'msg': "Unauthorized"}, # 没有用户凭证
403: {'code': 403, 'msg': 'Forbidden'}, # 拒绝授权
418: {'code': 418, 'msg': 'happy new year'},
429: {'code': 429, 'msg': "Too many request"},
460: {'code': 460, 'msg': 'Reach the upper limit'}, # 自定义,达到上限
500: {'code': 500, 'msg': "Internal Server Error"} # 服务器异常
} | 2.015625 | 2 |
accounts/auth_backends.py | zeitcodes/django-accounts | 0 | 12768332 | from django.contrib.auth.backends import ModelBackend
try:
from django.contrib.auth import get_user_model
except ImportError:
from django.contrib.auth.models import User
get_user_model = lambda: User
import re
email_re = re.compile(
r"(^[-!#$%&'*+/=?^_`{}|~0-9A-Z]+(\.[-!#$%&'*+/=?^_`{}|~0-9A-Z]+)*" # dot-atom
r'|^"([\001-\010\013\014\016-\037!#-\[\]-\177]|\\[\001-\011\013\014\016-\177])*"' # quoted-string
r')@(?:[A-Z0-9-]+\.)+[A-Z]{2,6}$', re.IGNORECASE) # domain
class EmailBackend(ModelBackend):
UserModel = get_user_model()
"""Authenticate using email only"""
def authenticate(self, username=None, password=<PASSWORD>):
if email_re.search(username):
try:
user = self.UserModel.objects.get(email=username)
if user.check_password(password):
return user
except self.UserModel.DoesNotExist:
pass
return None
| 2.515625 | 3 |
plaso/formatters/bencode_parser.py | ir4n6/plaso | 1 | 12768333 | <reponame>ir4n6/plaso
# -*- coding: utf-8 -*-
"""The bencode parser event formatters."""
from __future__ import unicode_literals
from plaso.formatters import interface
from plaso.formatters import manager
class TransmissionEventFormatter(interface.ConditionalEventFormatter):
"""Formatter for a Transmission active torrents event."""
DATA_TYPE = 'p2p:bittorrent:transmission'
SOURCE_LONG = 'Transmission Active Torrents'
SOURCE_SHORT = 'TORRENT'
FORMAT_STRING_SEPARATOR = '; '
FORMAT_STRING_PIECES = [
'Saved to {destination}',
'Minutes seeded: {seedtime}']
class UTorrentEventFormatter(interface.ConditionalEventFormatter):
"""Formatter for a BitTorrent uTorrent active torrents event."""
DATA_TYPE = 'p2p:bittorrent:utorrent'
SOURCE_LONG = 'uTorrent Active Torrents'
SOURCE_SHORT = 'TORRENT'
FORMAT_STRING_SEPARATOR = '; '
FORMAT_STRING_PIECES = [
'Torrent {caption}',
'Saved to {path}',
'Minutes seeded: {seedtime}']
manager.FormattersManager.RegisterFormatters([
TransmissionEventFormatter, UTorrentEventFormatter])
| 1.90625 | 2 |
tests/fireworks/user_objects/firetasks/test_dtool_lookup_tasks.py | IMTEK-Simulation/imteksimfw | 0 | 12768334 | <filename>tests/fireworks/user_objects/firetasks/test_dtool_lookup_tasks.py
# coding: utf-8
"""Test dtool lookup server queries integration."""
__author__ = '<NAME>'
__copyright__ = 'Copyright 2020, IMTEK Simulation, University of Freiburg'
__email__ = '<EMAIL>, <EMAIL>'
__date__ = 'Nov 05, 2020'
import glob
import json
import logging
import pytest
from fireworks.utilities.dict_mods import apply_mod
from imteksimfw.utils.logging import _log_nested_dict
from imteksimfw.fireworks.user_objects.firetasks.dataflow_tasks import SearchDictTask
from imteksimfw.fireworks.user_objects.firetasks.dtool_tasks import FetchItemTask
from imteksimfw.fireworks.user_objects.firetasks.dtool_lookup_tasks import (
QueryDtoolTask, ReadmeDtoolTask, ManifestDtoolTask)
from imteksimfw.fireworks.user_objects.firetasks.cmd_tasks import EvalPyEnvTask
from imteksimfw.utils.dict import compare, _make_marker
# from test_dtool_tasks import _compare
@pytest.fixture
def dtool_lookup_config(dtool_config):
"""Provide default dtool lookup config."""
dtool_config.update({
"DTOOL_LOOKUP_SERVER_URL": "https://localhost:5000",
"DTOOL_LOOKUP_SERVER_TOKEN_GENERATOR_URL": "http://localhost:5001/token",
"DTOOL_LOOKUP_SERVER_USERNAME": "testuser",
"DTOOL_LOOKUP_SERVER_PASSWORD": "<PASSWORD>",
"DTOOL_LOOKUP_SERVER_VERIFY_SSL": False,
})
return dtool_config
@pytest.fixture
def dtool_smb_config(dtool_config):
"""Provide default dtool lookup config."""
dtool_config.update({
"DTOOL_SMB_SERVER_NAME_test-share": "localhost",
"DTOOL_SMB_SERVER_PORT_test-share": 4445,
"DTOOL_SMB_USERNAME_test-share": "guest",
"DTOOL_SMB_PASSWORD_test-share": "a-guest-needs-no-password",
"DTOOL_SMB_DOMAIN_test-share": "WORKGROUP",
"DTOOL_SMB_SERVICE_NAME_test-share": "sambashare",
"DTOOL_SMB_PATH_test-share": "dtool"
})
return dtool_config
@pytest.fixture
def default_query_dtool_task_spec(dtool_lookup_config):
"""Provide default test task_spec for QueryDtoolTask."""
return {
'dtool_config': dtool_lookup_config,
'stored_data': True,
'query': {
'base_uri': 'smb://test-share',
'name': {'$regex': 'test'},
},
'loglevel': logging.DEBUG,
}
@pytest.fixture
def default_readme_dtool_task_spec(dtool_lookup_config):
"""Provide default test task_spec for ReadmeDtoolTask."""
return {
'dtool_config': dtool_lookup_config,
'stored_data': True,
'uri': 'smb://test-share/1a1f9fad-8589-413e-9602-5bbd66bfe675',
'loglevel': logging.DEBUG,
}
@pytest.fixture
def default_manifest_dtool_task_spec(dtool_lookup_config):
"""Provide default test task_spec for ManifestDtoolTask."""
return {
'dtool_config': dtool_lookup_config,
'stored_data': True,
'uri': 'smb://test-share/1a1f9fad-8589-413e-9602-5bbd66bfe675',
'loglevel': logging.DEBUG,
}
#
# dtool lookup tasks tests
#
def test_query_dtool_task_run(dtool_lookup_server, default_query_dtool_task_spec, dtool_lookup_config):
"""Will lookup some dataset on the server."""
logger = logging.getLogger(__name__)
logger.debug("Instantiate QueryDtoolTask with '{}'".format(
default_query_dtool_task_spec))
t = QueryDtoolTask(**default_query_dtool_task_spec)
fw_action = t.run_task({})
logger.debug("FWAction:")
_log_nested_dict(logger.debug, fw_action.as_dict())
output = fw_action.stored_data['output']
assert len(output) == 1
# TODO: dataset creation in test
expected_respones = [
{
"base_uri": "smb://test-share",
"created_at": "Sun, 08 Nov 2020 18:38:40 GMT",
"creator_username": "jotelha",
"dtoolcore_version": "3.17.0",
"frozen_at": "Mon, 09 Nov 2020 11:33:41 GMT",
"name": "simple_test_dataset",
"tags": [],
"type": "dataset",
"uri": "smb://test-share/1a1f9fad-8589-413e-9602-5bbd66bfe675",
"uuid": "1a1f9fad-8589-413e-9602-5bbd66bfe675"
}
]
to_compare = {
"base_uri": True,
"created_at": False,
"creator_username": True,
"dtoolcore_version": False,
"frozen_at": False,
"name": True,
"tags": True,
"type": True,
"uri": True,
"uuid": True
}
compares = compare(
output[0],
expected_respones[0],
to_compare
)
assert compares
def test_readme_dtool_task_run(dtool_lookup_server, default_readme_dtool_task_spec, dtool_lookup_config):
"""Will lookup some dataset on the server."""
logger = logging.getLogger(__name__)
logger.debug("Instantiate ReadmeDtoolTask with '{}'".format(
default_readme_dtool_task_spec))
t = ReadmeDtoolTask(**default_readme_dtool_task_spec)
fw_action = t.run_task({})
logger.debug("FWAction:")
_log_nested_dict(logger.debug, fw_action.as_dict())
output = fw_action.stored_data['output']
# TODO: dataset creation in test
expected_respone = {
"creation_date": "2020-11-08",
"description": "testing description",
"expiration_date": "2022-11-08",
"funders": [
{
"code": "testing_code",
"organization": "testing_organization",
"program": "testing_program"
}
],
"owners": [
{
"email": "<EMAIL>",
"name": "<NAME>",
"orcid": "testing_orcid",
"username": "testing_user"
}
],
"project": "testing project"
}
assert compare(output, expected_respone)
def test_manifest_dtool_task_run(dtool_lookup_server, default_manifest_dtool_task_spec, dtool_lookup_config):
"""Will lookup some dataset on the server."""
logger = logging.getLogger(__name__)
logger.debug("Instantiate ManifestDtoolTask with '{}'".format(
default_manifest_dtool_task_spec))
t = ManifestDtoolTask(**default_manifest_dtool_task_spec)
fw_action = t.run_task({})
logger.debug("FWAction:")
_log_nested_dict(logger.debug, fw_action.as_dict())
output = fw_action.stored_data['output']
# TODO: dataset creation in test
expected_respone = {
"dtoolcore_version": "3.18.0",
"hash_function": "md5sum_hexdigest",
"items": {
"eb58eb70ebcddf630feeea28834f5256c207edfd": {
"hash": "2f7d9c3e0cfd47e8fcab0c12447b2bf0",
"relpath": "simple_text_file.txt",
"size_in_bytes": 17,
"utc_timestamp": 1606595093.53965
}
}
}
marker = {
"dtoolcore_version": False,
"hash_function": "md5sum_hexdigest",
"items": {
"eb58eb70ebcddf630feeea28834f5256c207edfd": {
"hash": True,
"relpath": True,
"size_in_bytes": True,
"utc_timestamp": False,
}
}
}
assert compare(output, expected_respone, marker)
# complex workflow
@pytest.fixture
def workflow_initial_fw_spec(dtool_lookup_config):
"""Provide complex workflow test task_spec for QueryDtoolTask."""
return {
'initial_inputs': {
'query': json.dumps({
'base_uri': 'smb://test-share',
'name': {'$regex': 'test'},
}),
'search': {
'relpath': 'simple_text_file.txt',
},
'marker': {
'relpath': True,
},
},
'metadata': {
"creation_date": "2020-11-28",
"description": "description to override",
"project": "derived testing project"
}
}
@pytest.fixture
def workflow_query_dtool_task_spec(dtool_lookup_config):
"""Provide complex workflow test task_spec for QueryDtoolTask."""
return {
'dtool_config': dtool_lookup_config,
'stored_data': True,
'query_key': 'initial_inputs->query',
'limit': 1,
'expand': True,
'sort_key': 'frozen_at',
'sort_direction': -1,
'output': 'query_dtool_task->result',
'loglevel': logging.DEBUG
}
@pytest.fixture
def workflow_readme_dtool_task_spec(dtool_lookup_config):
"""Provide complex workflow test task_spec for QueryDtoolTask."""
return {
'dtool_config': dtool_lookup_config,
'stored_data': True,
'uri': {'key': 'query_dtool_task->result->uri'},
'output': 'metadata',
'metadata_fw_source_key': 'metadata',
'fw_supersedes_dtool': True,
'metadata_dtool_exclusions': {'expiration_date': True},
"metadata_fw_exclusions": {'description': True},
'loglevel': logging.DEBUG
}
@pytest.fixture
def workflow_manifest_dtool_task_spec(dtool_lookup_config):
"""Provide complex workflow test task_spec for ManifestDtoolTask."""
return {
'dtool_config': dtool_lookup_config,
'stored_data': True,
'uri': {'key': 'query_dtool_task->result->uri'},
'output': 'manifest_dtool_task->result',
'loglevel': logging.DEBUG
}
@pytest.fixture
def workflow_search_dict_task_spec():
"""Provide complex workflow test task_spec for SearchDictTask."""
return {
'input_key': 'manifest_dtool_task->result->items',
'search_key': 'initial_inputs->search',
'marker_key': 'initial_inputs->marker',
'limit': 1,
'expand': True,
'stored_data': True,
'output_key': 'search_dict_task->result',
'loglevel': logging.DEBUG
}
@pytest.fixture
def workflow_fetch_item_task_spec(dtool_smb_config):
"""Provide default test task_spec for CopyDatasetTask."""
return {
'item_id': {'key': 'search_dict_task->result'},
'source': {'key': 'query_dtool_task->result->uri'},
'filename': 'fetched_item.txt',
'dtool_config': dtool_smb_config,
'stored_data': True,
}
def test_complex_dtool_workflow(
tempdir, workflow_initial_fw_spec,
workflow_query_dtool_task_spec, workflow_readme_dtool_task_spec, workflow_manifest_dtool_task_spec,
workflow_search_dict_task_spec, workflow_fetch_item_task_spec,
dtool_lookup_server, dtool_lookup_config):
"""Query lookup server for datasets, subsequently query the readme, merge into fw_spec, then fetch specific file."""
logger = logging.getLogger(__name__)
# query
t = QueryDtoolTask(workflow_query_dtool_task_spec)
logger.debug("Instantiated QueryDtoolTask as:")
_log_nested_dict(logger.debug, t.as_dict())
fw_spec = workflow_initial_fw_spec
fw_action = t.run_task(fw_spec)
logger.debug("FWAction:")
_log_nested_dict(logger.debug, fw_action.as_dict())
for mod in fw_action.mod_spec:
apply_mod(mod, fw_spec)
logger.debug("Modified fw_spec:")
_log_nested_dict(logger.debug, fw_spec)
expected_result = {
"base_uri": "smb://test-share",
"created_at": 1604860720.736,
"creator_username": "jotelha",
"dtoolcore_version": "3.17.0",
"frozen_at": 1606595093.551,
"name": "simple_test_dataset",
"tags": [],
"type": "dataset",
"uri": "smb://test-share/1a1f9fad-8589-413e-9602-5bbd66bfe675",
"uuid": "1a1f9fad-8589-413e-9602-5bbd66bfe675"
}
marker = _make_marker(expected_result)
marker['created_at'] = False
marker['frozen_at'] = False
marker['dtoolcore_version'] = False
assert compare(fw_spec['query_dtool_task']['result'], expected_result, marker)
# readme
t = ReadmeDtoolTask(workflow_readme_dtool_task_spec)
logger.debug("Instantiated ReadmeDtoolTask as:")
_log_nested_dict(logger.debug, t.as_dict())
fw_action = t.run_task(fw_spec)
logger.debug("FWAction:")
_log_nested_dict(logger.debug, fw_action.as_dict())
for mod in fw_action.mod_spec:
apply_mod(mod, fw_spec)
logger.debug("Modified fw_spec:")
_log_nested_dict(logger.debug, fw_spec)
expected_result = {
"creation_date": "2020-11-28", # from initial fw_spec
"description": "testing description", # from dtool readme
# no "expiration_date" from dtool readme
"funders": [
{
"code": "testing_code",
"organization": "testing_organization",
"program": "testing_program"
}
],
"owners": [
{
"email": "<EMAIL>",
"name": "<NAME>",
"orcid": "testing_orcid",
"username": "testing_user"
}
],
"project": "derived testing project", # from fw_spec
}
marker = _make_marker(expected_result)
assert compare(fw_spec['metadata'], expected_result, marker)
# manifest
t = ManifestDtoolTask(workflow_manifest_dtool_task_spec)
logger.debug("Instantiated ManifestDtoolTask as:")
_log_nested_dict(logger.debug, t.as_dict())
fw_action = t.run_task(fw_spec)
logger.debug("FWAction:")
_log_nested_dict(logger.debug, fw_action.as_dict())
for mod in fw_action.mod_spec:
apply_mod(mod, fw_spec)
logger.debug("Modified fw_spec:")
_log_nested_dict(logger.debug, fw_spec)
expected_result = {
"dtoolcore_version": "3.18.0",
"hash_function": "md5sum_hexdigest",
"items": {
"eb58eb70ebcddf630feeea28834f5256c207edfd": {
"hash": "2f7d9c3e0cfd47e8fcab0c12447b2bf0",
"relpath": "simple_text_file.txt",
"size_in_bytes": 17,
"utc_timestamp": 1606595093.53965
}
}
}
marker = {
"dtoolcore_version": False,
"hash_function": True,
"items": {
"eb58eb70ebcddf630feeea28834f5256c207edfd": {
"hash": True,
"relpath": True,
"size_in_bytes": True,
"utc_timestamp": False,
}
}
}
assert compare(fw_spec['manifest_dtool_task']['result'], expected_result, marker)
# search
t = SearchDictTask(workflow_search_dict_task_spec)
logger.debug("Instantiated SearchDictTask as:")
_log_nested_dict(logger.debug, t.as_dict())
fw_action = t.run_task(fw_spec)
logger.debug("FWAction:")
_log_nested_dict(logger.debug, fw_action.as_dict())
for mod in fw_action.mod_spec:
apply_mod(mod, fw_spec)
logger.debug("Modified fw_spec:")
_log_nested_dict(logger.debug, fw_spec)
# fetch
t = FetchItemTask(workflow_fetch_item_task_spec)
fw_action = t.run_task(fw_spec)
logger.debug("FWAction:")
_log_nested_dict(logger.debug, fw_action.as_dict())
local_item_files = glob.glob('*.txt')
assert len(local_item_files) == 1
assert local_item_files[0] == 'fetched_item.txt'
with open('fetched_item.txt', 'r') as f:
content = f.read ()
assert content == 'Some test content'
| 2.078125 | 2 |
solog/filmanza/gameplay.py | Namratasingh10/SoloG | 1 | 12768335 | #!/usr/bin/env python
# coding: utf-8
import numpy as np
import pandas as pd
import random as rn
import time
from .game_over import game_over
from .run import run
score = 0 # Total score
h = 3 # Total Help
def question_number(l,n):
j=rn.randint(0,n-1)
if j in l:
question_number(l,n)
else:
l.append(j)
return(int(j))
def help(h1,h2):
print()
print('Starcast:',h1)
print('Release:',h2)
def play_show(q,out,check):
print()
for i in q:
if i in out:
print(f' {i}',end='')
check.append(i)
elif i==' ':
print(' /',end='')
check.append(i)
else:
print(' _',end='')
print()
print()
def play(q,h1,h2):
tries=5
out=['a','e','i','o','u','A','E','I','O','U','/','-']
print()
global h
flg=0
while tries>0:
check=[]
print()
print(f'Tries:{tries}')
print(f'Hints:{h}')
print('Score:',score)
print()
ans=input('INPUT:')
if ans in q:
if ans not in out:
out.append(ans)
elif ans=='qt':
exit()
elif ans=='hm':
run()
#home function
elif ans=='hp':
if flg==0:
if h>0:
hlp(h1,h2)
h-=1
flg+=1
continue
else:
print('Help finished')
continue
else:
print('Help Already Used')
continue
else:
tries-=1
play_show(q,out,check)
if check==list(q):
break
#compare strings to break loop
if tries==0:
print("Out of tries")
print('You Loose')
game_over()
def show(q,h1,h2):
vowels=['a','e','i','o','u','A','E','I','O','U','/','-']
numb=[0,1,2,3,4,5,6,7,8,9]
num_count=0
print()
for i in q:
time.sleep(0.5)
if i in numb:
num_count+=1
if i in vowels:
print(f' {i}',end='')
elif i==' ':
print(' /',end='')
else:
print(' _',end='')
print()
if num_count!=0:
print('It contains number [0-9]')
print()
play(q,h1,h2)
def game_play(n,film,star,dt):
l=[]
global score
for i in range(10):
print("Type 'hp' for hint")
print("Type 'hm' to go home")
print("Type 'qt' to quit")
print()
print(f'Quest.{i+1}')
s=question_number(l,n)
quest =film[s][0]
show(quest.lower(),star[s][0],dt[s][0])
score+=10
def hollywood_hard():
df=pd.read_csv('solog\filmanza\questions\Hollywood-hard.csv')
movie_name=np.asanyarray(df[['Name']])
star_cast=np.asanyarray(df[['Star Cast']])
release=np.asanyarray(df[['Year']])
n=len(movie_name)
game_play(n,movie_name,star_cast,release)
game_over()
#game finishded
def hollywood_medium():
df=pd.read_csv('solog\filmanza\questions\Hollywood-medium.csv')
movie_name=np.asanyarray(df[['Name']])
star_cast=np.asanyarray(df[['Star Cast']])
release=np.asanyarray(df[['Year']])
n=len(movie_name)
game_play(n,movie_name,star_cast,release)
hollywood_hard()
def hollywood_easy():
df=pd.read_csv('solog\filmanza\questions\Hollywood-Easy.csv')
movie_name=np.asanyarray(df[['Name']])
star_cast=np.asanyarray(df[['Star Cast']])
release=np.asanyarray(df[['Year']])
n=len(movie_name)
game_play(n,movie_name,star_cast,release)
hollywood_medium()
def bollywood_hard():
df=pd.read_csv('solog\filmanza\questions\bollywood-hard.csv')
movie_name=np.asanyarray(df[['Name']])
star_cast=np.asanyarray(df[['Starcast']])
release=np.asanyarray(df[['Year']])
n=len(movie_name)
game_play(n,movie_name,star_cast,release)
game_over()
#game finished()
def bollywood_medium():
df=pd.read_csv('solog\filmanza\questions\Bollywood-medium.csv')
movie_name=np.asanyarray(df[['Name']])
star_cast=np.asanyarray(df[['Starcast']])
release=np.asanyarray(df[['Year']])
n=len(movie_name)
game_play(n,movie_name,star_cast,release)
bollywood_hard()
def bollywood_easy():
df=pd.read_csv('solog\filmanza\questions\Bollywood-easy.csv')
movie_name=np.asanyarray(df[['Name']])
star_cast=np.asanyarray(df[['Starcast']])
release=np.asanyarray(df[['Year']])
n=len(movie_name)
game_play(n,movie_name,star_cast,release)
bollywood_medium()
| 3.734375 | 4 |
src/apps/leaderboards/strategies.py | binfeng1018/competitions-v2 | 19 | 12768336 | <reponame>binfeng1018/competitions-v2<filename>src/apps/leaderboards/strategies.py<gh_stars>10-100
import logging
from django.db.models import Sum, Q
from rest_framework.generics import get_object_or_404
from competitions.models import Submission
from leaderboards.models import Leaderboard
logger = logging.getLogger(__name__)
class BaseModeStrategy:
def get_submission_and_phase_and_leaderboard(self, submission_pk):
submission = get_object_or_404(Submission, pk=submission_pk)
phase = submission.phase
leaderboard = phase.leaderboard
return submission, phase, leaderboard
def update_submission(self, submission, submission_pk, leaderboard):
if submission.has_children:
for s in Submission.objects.filter(parent=submission_pk):
s.leaderboard = leaderboard
s.save()
else:
submission.leaderboard = leaderboard
submission.save()
def put_on_leaderboard(self, request, submission_pk):
submission, phase, leaderboard = self.get_submission_and_phase_and_leaderboard(submission_pk=submission_pk)
# process specify logic for different mode(for difference display mode)
self.do_execute(phase, request, submission)
self.update_submission(submission=submission,
submission_pk=submission_pk,
leaderboard=leaderboard)
def do_execute(self, phase, request, submission):
pass
class ManualModeStrategy(BaseModeStrategy):
def put_on_leaderboard(self, request, submission_pk):
"""do nothing by default"""
pass
def __str__(self):
return "ManuallyModeStrategy"
class LastestModeStrategy(BaseModeStrategy):
def do_execute(self, phase, request, submission):
"""add latest submission in leaderboard"""
Submission.objects.filter(phase=phase, owner=submission.owner).update(leaderboard=None)
def __str__(self):
return "LastestModeStrategy"
class AllModeStrategy(BaseModeStrategy):
def __str__(self):
return "AllModeStrategy"
class BestModeStrategy(BaseModeStrategy):
def put_on_leaderboard(self, request, submission_pk):
"""fetch all submission, then choose best submission and put on leaderboard"""
submission, phase, leaderboard = super().get_submission_and_phase_and_leaderboard(submission_pk=submission_pk)
Submission.objects.filter(phase=phase, owner=submission.owner).update(leaderboard=None)
best_submission = self._choose_best_submission(leaderboard=leaderboard, owner=submission.owner, phase=phase)
super().update_submission(submission=best_submission,
submission_pk=best_submission.id,
leaderboard=leaderboard)
def _choose_best_submission(self, leaderboard, owner, phase):
"""choose best submission"""
primary_col = leaderboard.columns.get(index=leaderboard.primary_index)
ordering = [f'{"-" if primary_col.sorting == "desc" else ""}primary_col']
submissions = Submission.objects.filter(phase=phase, owner=owner) \
.select_related('owner').prefetch_related('scores') \
.annotate(primary_col=Sum('scores__score', filter=Q(scores__column=primary_col)))
for column in leaderboard.columns.exclude(id=primary_col.id).order_by('index'):
col_name = f'col{column.index}'
ordering.append(f'{"-" if column.sorting == "desc" else ""}{col_name}')
kwargs = {
col_name: Sum('scores__score', filter=Q(scores__column__index=column.index))
}
submissions = submissions.annotate(**kwargs)
submissions = submissions.order_by(*ordering, 'created_when')
return submissions[0]
def __str__(self):
return "BestModeStrategy"
class StrategyFactory:
@staticmethod
def create_by_submission_rule(submission_rule):
if Leaderboard.FORCE_LAST == submission_rule:
return LastestModeStrategy()
elif Leaderboard.FORCE_LATEST_MULTIPLE == submission_rule:
return AllModeStrategy()
elif Leaderboard.FORCE_BEST == submission_rule:
return BestModeStrategy()
else:
return ManualModeStrategy()
def put_on_leaderboard_by_submission_rule(request, submission_pk, submission_rule):
"""add submission score to leaderboard by display strategy"""
strategy = StrategyFactory.create_by_submission_rule(submission_rule)
strategy.put_on_leaderboard(request, submission_pk)
| 2.125 | 2 |
leetcode/231/231.power-of-two.py | Algorithm-Cracker/Leetcode | 1 | 12768337 | <filename>leetcode/231/231.power-of-two.py
#
# @lc app=leetcode id=231 lang=python3
#
# [231] Power of Two
#
# @lc code=start
class Solution:
def isPowerOfTwo(self, n: int) -> bool:
# 每次向下除2, 看最后的结果
if n < 1: return False
elif n == 1: return True
return self.isPowerOfTwo(n/2)
def test(self):
assert(self.isPowerOfTwo(1)==True)
assert(self.isPowerOfTwo(16)==True)
assert(self.isPowerOfTwo(218)==False)
sol = Solution()
sol.test()
# @lc code=end
| 3.734375 | 4 |
src/docserver/application_methods/delete.py | djpugh/docserver | 1 | 12768338 | import logging
from docserver.api import schemas
from docserver.config import config
from docserver.db import models as db_models
logger = logging.getLogger(__name__)
def delete_package(package: schemas.BasePackage, provided_permissions=None):
db = config.db.local_session()
packages = db_models.Package.read(params=package.dict(), db=db)
deleted = []
for package in packages:
if package.is_authorised(provided_permissions, 'delete'):
package.delete(db=db)
deleted.append(package.name)
return deleted
def delete_version(documentation_version: schemas.BasePackageVersion, provided_permissions=None):
db = config.db.local_session()
packages = db_models.Package.read(params=documentation_version.dict(), db=db)
deleted = []
for package in packages:
package_version = package.get_version(documentation_version.version)
if package_version and package.is_authorised(provided_permissions, 'delete'):
package_version.delete(db=db)
deleted.append(f'{package.name}-{package_version.version}')
db.refresh(package)
if not package.versions:
package.delete(db=db)
deleted.append(package.name)
return deleted
| 2.328125 | 2 |
external/trezor-common/tools/coin_info.py | makotoproject/makoto | 4 | 12768339 | <filename>external/trezor-common/tools/coin_info.py
#!/usr/bin/env python3
from collections import defaultdict, OrderedDict
import re
import os
import json
import glob
import logging
try:
import requests
except ImportError:
requests = None
log = logging.getLogger(__name__)
DEFS_DIR = os.path.abspath(
os.environ.get("DEFS_DIR") or os.path.join(os.path.dirname(__file__), "..", "defs")
)
def load_json(*path):
"""Convenience function to load a JSON file from DEFS_DIR."""
if len(path) == 1 and path[0].startswith("/"):
filename = path[0]
else:
filename = os.path.join(DEFS_DIR, *path)
with open(filename) as f:
return json.load(f, object_pairs_hook=OrderedDict)
# ====== CoinsInfo ======
class CoinsInfo(dict):
"""Collection of information about all known kinds of coins.
It contains the following lists:
`bitcoin` for btc-like coins,
`eth` for ethereum networks,
`erc20` for ERC20 tokens,
`nem` for NEM mosaics,
`misc` for other networks.
Accessible as a dict or by attribute: `info["misc"] == info.misc`
"""
def as_list(self):
return sum(self.values(), [])
def as_dict(self):
return {coin["key"]: coin for coin in self.as_list()}
def __getattr__(self, attr):
if attr in self:
return self[attr]
else:
raise AttributeError(attr)
# ====== coin validation ======
def check_type(val, types, nullable=False, empty=False, regex=None, choice=None):
# check nullable
if val is None:
if nullable:
return
else:
raise ValueError("Missing required value")
# check type
if not isinstance(val, types):
raise TypeError("Wrong type (expected: {})".format(types))
# check empty
if isinstance(val, (list, dict)) and not empty and not val:
raise ValueError("Empty collection")
# check regex
if regex is not None:
if types is not str:
raise TypeError("Wrong type for regex check")
if not re.search(regex, val):
raise ValueError("Value does not match regex {}".format(regex))
# check choice
if choice is not None and val not in choice:
choice_str = ", ".join(choice)
raise ValueError("Value not allowed, use one of: {}".format(choice_str))
def check_key(key, types, optional=False, **kwargs):
def do_check(coin):
if key not in coin:
if optional:
return
else:
raise KeyError("{}: Missing key".format(key))
try:
check_type(coin[key], types, **kwargs)
except Exception as e:
raise ValueError("{}: {}".format(key, e)) from e
return do_check
BTC_CHECKS = [
check_key("coin_name", str, regex=r"^[A-Z]"),
check_key("coin_shortcut", str, regex=r"^t?[A-Z]{3,}$"),
check_key("coin_label", str, regex=r"^[A-Z]"),
check_key("website", str, regex=r"^http.*[^/]$"),
check_key("github", str, regex=r"^https://github.com/.*[^/]$"),
check_key("maintainer", str),
check_key(
"curve_name", str, choice=["secp256k1", "secp256k1_decred", "secp256k1_groestl", "secp256k1_smart"]
),
check_key("address_type", int),
check_key("address_type_p2sh", int),
check_key("maxfee_kb", int),
check_key("minfee_kb", int),
check_key("hash_genesis_block", str, regex=r"^[0-9a-f]{64}$"),
check_key("xprv_magic", int),
check_key("xpub_magic", int),
check_key("xpub_magic_segwit_p2sh", int, nullable=True),
check_key("xpub_magic_segwit_native", int, nullable=True),
check_key("slip44", int),
check_key("segwit", bool),
check_key("decred", bool),
check_key("fork_id", int, nullable=True),
check_key("force_bip143", bool),
check_key("bip115", bool),
check_key("default_fee_b", dict),
check_key("dust_limit", int),
check_key("blocktime_seconds", int),
check_key("signed_message_header", str),
check_key("uri_prefix", str, regex=r"^[a-z]+$"),
check_key("min_address_length", int),
check_key("max_address_length", int),
check_key("bech32_prefix", str, regex=r"^[a-z]+$", nullable=True),
check_key("cashaddr_prefix", str, regex=r"^[a-z]+$", nullable=True),
check_key("bitcore", list, empty=True),
check_key("blockbook", list, empty=True),
]
def validate_btc(coin):
errors = []
for check in BTC_CHECKS:
try:
check(coin)
except Exception as e:
errors.append(str(e))
magics = [
coin[k]
for k in (
"xprv_magic",
"xpub_magic",
"xpub_magic_segwit_p2sh",
"xpub_magic_segwit_native",
)
if coin[k] is not None
]
# each of those must be unique
# therefore length of list == length of set of unique values
if len(magics) != len(set(magics)):
errors.append("XPUB/XPRV magic numbers must be unique")
if coin["address_type"] == coin["address_type_p2sh"]:
errors.append("address_type must be distinct from address_type_p2sh")
if not coin["maxfee_kb"] >= coin["minfee_kb"]:
errors.append("max fee must not be smaller than min fee")
if not coin["max_address_length"] >= coin["min_address_length"]:
errors.append("max address length must not be smaller than min address length")
for bc in coin["bitcore"] + coin["blockbook"]:
if bc.endswith("/"):
errors.append("make sure URLs don't end with '/'")
return errors
# ======= Coin json loaders =======
def _load_btc_coins():
"""Load btc-like coins from `coins/*.json`"""
coins = []
for filename in glob.glob(os.path.join(DEFS_DIR, "coins", "*.json")):
coin = load_json(filename)
coin.update(
name=coin["coin_label"],
shortcut=coin["coin_shortcut"],
key="bitcoin:{}".format(coin["coin_shortcut"]),
icon=filename.replace(".json", ".png"),
)
coins.append(coin)
return coins
def _load_ethereum_networks():
"""Load ethereum networks from `ethereum/networks.json`"""
networks = load_json("ethereum", "networks.json")
for network in networks:
network.update(key="eth:{}".format(network["shortcut"]))
return networks
def _load_erc20_tokens():
"""Load ERC20 tokens from `ethereum/tokens` submodule."""
networks = _load_ethereum_networks()
tokens = []
for network in networks:
if network["name"].startswith("Ethereum Testnet "):
idx = len("Ethereum Testnet ")
chain = network["name"][idx : idx + 3]
else:
chain = network["shortcut"]
chain = chain.lower()
if not chain:
continue
chain_path = os.path.join(DEFS_DIR, "ethereum", "tokens", "tokens", chain)
for filename in glob.glob(os.path.join(chain_path, "*.json")):
token = load_json(filename)
token.update(
chain=chain,
chain_id=network["chain_id"],
address_bytes=bytes.fromhex(token["address"][2:]),
shortcut=token["symbol"],
key="erc20:{}:{}".format(chain, token["symbol"]),
)
tokens.append(token)
return tokens
def _load_nem_mosaics():
"""Loads NEM mosaics from `nem/nem_mosaics.json`"""
mosaics = load_json("nem", "nem_mosaics.json")
for mosaic in mosaics:
shortcut = mosaic["ticker"].strip()
mosaic.update(shortcut=shortcut, key="nem:{}".format(shortcut))
return mosaics
def _load_misc():
"""Loads miscellaneous networks from `misc/misc.json`"""
others = load_json("misc/misc.json")
for other in others:
other.update(key="misc:{}".format(other["shortcut"]))
return others
# ====== support info ======
RELEASES_URL = "https://wallet.trezor.io/data/firmware/{}/releases.json"
MISSING_SUPPORT_MEANS_NO = ("connect", "webwallet")
VERSIONED_SUPPORT_INFO = ("trezor1", "trezor2")
def get_support_data():
"""Get raw support data from `support.json`."""
return load_json("support.json")
def latest_releases():
"""Get latest released firmware versions for Trezor 1 and 2"""
if not requests:
raise RuntimeError("requests library is required for getting release info")
latest = {}
for v in ("1", "2"):
releases = requests.get(RELEASES_URL.format(v)).json()
latest["trezor" + v] = max(tuple(r["version"]) for r in releases)
return latest
def is_token(coin):
return coin["key"].startswith("erc20:")
def support_info_single(support_data, coin):
"""Extract a support dict from `support.json` data.
Returns a dict of support values for each "device", i.e., `support.json`
top-level key.
The support value for each device is determined in order of priority:
* if the coin is a duplicate ERC20 token, all support values are `None`
* if the coin has an entry in `unsupported`, its support is `None`
* if the coin has an entry in `supported` its support is that entry
(usually a version string, or `True` for connect/webwallet)
* otherwise support is presumed "soon"
"""
support_info = {}
key = coin["key"]
dup = coin.get("duplicate")
for device, values in support_data.items():
if dup and is_token(coin):
support_value = False
elif key in values["unsupported"]:
support_value = False
elif key in values["supported"]:
support_value = values["supported"][key]
elif device in MISSING_SUPPORT_MEANS_NO:
support_value = False
elif is_token(coin):
# tokens are implicitly supported in next release
support_value = "soon"
else:
support_value = None
support_info[device] = support_value
return support_info
def support_info(coins):
"""Generate Trezor support information.
Takes a collection of coins and generates a support-info entry for each.
The support-info is a dict with keys based on `support.json` keys.
These are usually: "trezor1", "trezor2", "connect" and "webwallet".
The `coins` argument can be a `CoinsInfo` object, a list or a dict of
coin items.
Support information is taken from `support.json`.
"""
if isinstance(coins, CoinsInfo):
coins = coins.as_list()
elif isinstance(coins, dict):
coins = coins.values()
support_data = get_support_data()
support = {}
for coin in coins:
support[coin["key"]] = support_info_single(support_data, coin)
return support
# ====== data cleanup functions ======
def _ensure_mandatory_values(coins):
"""Checks that every coin has the mandatory fields: name, shortcut, key"""
for coin in coins:
if not all(coin.get(k) for k in ("name", "shortcut", "key")):
raise ValueError(coin)
def symbol_from_shortcut(shortcut):
symsplit = shortcut.split(" ", maxsplit=1)
return symsplit[0], symsplit[1] if len(symsplit) > 1 else ""
def mark_duplicate_shortcuts(coins):
"""Finds coins with identical `shortcut`s.
Updates their keys and sets a `duplicate` field.
The logic is a little crazy.
The result of this function is a dictionary of _buckets_, each of which is
indexed by the duplicated symbol, or `_override`. The `_override` bucket will
contain all coins that are set to `true` in `duplicity_overrides.json`. These
will _always_ be marked as duplicate (and later possibly deleted if they're ERC20).
The rest will disambiguate based on the full shortcut.
(i.e., when `shortcut` is `BTL (Battle)`, the `symbol` is just `BTL`).
If _all tokens_ in the bucket have shortcuts with distinct suffixes, e.g.,
`CAT (BitClave)` and `CAT (Blockcat)`, we DO NOT mark them as duplicate.
These will then be supported and included in outputs.
If even one token in the bucket _does not_ have a distinct suffix, e.g.,
`MIT` and `MIT (Mychatcoin)`, the whole bucket is marked as duplicate.
If a token is set to `false` in `duplicity_overrides.json`, it will NOT
be marked as duplicate in this step, even if it is part of a "bad" bucket.
"""
dup_symbols = defaultdict(list)
dup_keys = defaultdict(list)
def dups_only(dups):
return {k: v for k, v in dups.items() if len(v) > 1}
for coin in coins:
symbol, _ = symbol_from_shortcut(coin["shortcut"].lower())
dup_symbols[symbol].append(coin)
dup_keys[coin["key"]].append(coin)
dup_symbols = dups_only(dup_symbols)
dup_keys = dups_only(dup_keys)
# first deduplicate keys so that we can identify overrides
for values in dup_keys.values():
for i, coin in enumerate(values):
coin["key"] += ":" + str(i)
# load overrides and put them into their own bucket
overrides = load_json("duplicity_overrides.json")
override_bucket = []
for coin in coins:
if overrides.get(coin["key"], False):
coin["duplicate"] = True
override_bucket.append(coin)
# mark duplicate symbols
for values in dup_symbols.values():
splits = (symbol_from_shortcut(coin["shortcut"]) for coin in values)
suffixes = {suffix for _, suffix in splits}
# if 1. all suffixes are distinct and 2. none of them are empty
if len(suffixes) == len(values) and all(suffixes):
# Allow the whole bucket.
# For all intents and purposes these should be considered non-dups
# So we won't mark them as dups here
# But they still have their own bucket, and also overrides can
# explicitly mark them as duplicate one step before, in which case
# they *still* keep duplicate status (and possibly are deleted).
continue
for coin in values:
# allow overrides to skip this; if not listed in overrides, assume True
is_dup = overrides.get(coin["key"], True)
if is_dup:
coin["duplicate"] = True
# again: still in dups, but not marked as duplicate and not deleted
dup_symbols["_override"] = override_bucket
return dup_symbols
def _btc_sort_key(coin):
if coin["name"] in ("Bitcoin", "Testnet"):
return "000000" + coin["name"]
else:
return coin["name"]
def collect_coin_info():
"""Returns all definition as dict organized by coin type.
`coins` for btc-like coins,
`eth` for ethereum networks,
`erc20` for ERC20 tokens,
`nem` for NEM mosaics,
`misc` for other networks.
Automatically removes duplicate symbols from the result.
"""
all_coins = CoinsInfo(
bitcoin=_load_btc_coins(),
eth=_load_ethereum_networks(),
erc20=_load_erc20_tokens(),
nem=_load_nem_mosaics(),
misc=_load_misc(),
)
for k, coins in all_coins.items():
if k == "bitcoin":
coins.sort(key=_btc_sort_key)
elif k == "nem":
# do not sort nem
pass
elif k == "eth":
# sort ethereum networks by chain_id
coins.sort(key=lambda c: c["chain_id"])
else:
coins.sort(key=lambda c: c["key"].upper())
_ensure_mandatory_values(coins)
return all_coins
def coin_info_with_duplicates():
"""Collects coin info, detects duplicates but does not remove them.
Returns the CoinsInfo object and duplicate buckets.
"""
all_coins = collect_coin_info()
buckets = mark_duplicate_shortcuts(all_coins.as_list())
return all_coins, buckets
def coin_info():
"""Collects coin info, marks and prunes duplicate ERC20 symbols, fills out support
info and returns the result.
"""
all_coins, _ = coin_info_with_duplicates()
all_coins["erc20"] = [
coin for coin in all_coins["erc20"] if not coin.get("duplicate")
]
return all_coins
def search(coins, keyword):
kwl = keyword.lower()
if isinstance(coins, CoinsInfo):
coins = coins.as_list()
for coin in coins:
key = coin["key"].lower()
name = coin["name"].lower()
shortcut = coin["shortcut"].lower()
symbol, suffix = symbol_from_shortcut(shortcut)
if (
kwl == key
or kwl in name
or kwl == shortcut
or kwl == symbol
or kwl in suffix
):
yield coin
| 2.484375 | 2 |
config_state/serializers/_pickle.py | nicolaspi/config-state | 2 | 12768340 | <gh_stars>1-10
import pickle
from typing import IO
from config_state import ConfigField
from config_state import register
from config_state.config_state import ObjectState
from config_state.serializers.serializer import Serializer
@register
class Pickle(Serializer):
is_binary: bool = ConfigField(True,
"Whether the serializer is binary",
static=True)
protocol_version: int = ConfigField(pickle.HIGHEST_PROTOCOL, "Pickle's "
"protocol version")
def __init__(self, config=None):
super().__init__(config)
def _dump(self, object_state: ObjectState, stream: IO[bytes]):
pickle.dump(object_state, stream, protocol=self.protocol_version)
def _load_state(self, stream: IO[bytes]) -> ObjectState:
object = pickle.load(stream)
return object
| 2.515625 | 3 |
helpers/os_methods.py | finnurtorfa/aflafrettir.is | 0 | 12768341 | from os import walk, remove
def get_all_files(directory):
""" Method for listing files within a directory
"""
f = []
for (_, _, filenames) in walk(directory):
f.extend(filenames)
return f
def remove_file(filename, directory):
""" Method for removing a file within a directory
"""
try:
remove(directory + '/' + filename)
except FileNotFoundError:
pass
| 4.03125 | 4 |
ejemplo-mqtt-viewer.py | Marcombo/cursio_IoT | 0 | 12768342 | <filename>ejemplo-mqtt-viewer.py
import paho.mqtt.client as mqtt
import time
def on_message(client,userdata,message):
data = str(message.payload.decode("utf-8"))
topic = str(message.topic)
print(topic,data)
broker_user = "xxxx"
broker_password = "<PASSWORD>"
broker_address = "xxxx"
broker_port = 1883
broker_topic = "xxxx"
#print("creating new instance")
mqttclient = mqtt.Client("mqttviewer") #create new instance
mqttclient.username_pw_set(broker_user,broker_password)
#print("connecting to broker")
mqttclient.connect(broker_address,port=broker_port) #connect to broker
mqttclient.loop_start()
#print("Subscribing to topic")
mqttclient.subscribe(broker_topic)
mqttclient.on_message=on_message #attach function to callback
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
print("bye")
mqttclient.disconnect()
mqttclient.loop_stop()
| 3 | 3 |
heat_integration.py | glluk/ci-tests | 0 | 12768343 | # Copyright 2016 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import unittest
import logging
import os
from random import randint
import subprocess
from mos_tests.functions import common as common_functions
import pytest
import paramiko
import scp
from mos_tests.functions.base import OpenStackTestCase
logger = logging.getLogger(__name__)
@pytest.mark.undestructive
class HeatFunctionalTests(OpenStackTestCase):
"""Heat scenario and functional tests."""
def setUp(self):
super(self.__class__, self).setUp()
# Get path on node to 'templates' dir
self.templates_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'templates')
# Get path on node to 'images' dir
self.images_dir = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'images')
self.uid_list = []
def test_heat_functional(self):
"""Run Heat integration tests (1 controller Neutron)
Scenario:
1. Install packages
2. Clone repo with heat-integration tests
3. Configure controller (create heat-net and non admin user)
4. Generate .conf file for tests
5. Run heat integration tests
"""
# create shared net with subnet using heat template (net available for others tenants)
timeout = 20
pool_name = 'someSub'
stack_name = 'heat-stack-' + str(randint(1, 0x7fffffff))
template_content = common_functions.read_template(
self.templates_dir, 'Heat_integration_resource.yaml')
uid = common_functions.create_stack(self.heat, stack_name,
template_content)
self.uid_list.append(uid)
stacks_id = [s.id for s in self.heat.stacks.list()]
self.assertIn(uid, stacks_id)
self.assertTrue(common_functions.check_stack_status(stack_name,
self.heat,
'CREATE_COMPLETE',
timeout))
#Check net
sub_net = self.neutron.list_subnets()
sub_net_names = [x['name'] for x in sub_net['subnets']]
self.assertIn(pool_name, sub_net_names)
#Create and activate image
file_name = 'cirros-0.3.4-x86_64-disk.img.txt'
image_name = 'Test'
# Prepare full path to image file. Return e.g.:
# Like: /root/mos_tests/heat/images/cirros-0.3.4-x86_64-disk.img.txt
image_link_location = os.path.join(self.images_dir, file_name)
# Download image on node. Like: /tmp/cirros-0.3.4-x86_64-disk.img
image_path = common_functions.download_image(image_link_location)
# Create image in Glance
image = self.glance.images.create(name=image_name,
os_distro='Cirros',
disk_format='qcow2',
visibility='public',
container_format='bare')
# Check that status is 'queued'
if image.status != 'queued':
raise AssertionError("ERROR: Image status after creation is:"
"[{0}]. "
"Expected [queued]".format(image.status))
# Put image-file in created Image
with open(image_path, 'rb') as image_content:
self.glance.images.upload(image.id, image_content)
# Check that status of image is 'active'
self.assertEqual(
self.glance.images.get(image.id)['status'],
'active',
'After creation in Glance image status is [{0}]. '
'Expected is [active]'
.format(self.glance.images.get(image.id)['status']))
# Prerapre ssh sessions
cmd = "arp -an | grep fuel-pxe | cut -d ')' -f 1 | cut -d '(' -f 2 | awk '(NR == 1)'"
master = os.popen(cmd).read()
master =master.strip("\n")
port = 22
transport = paramiko.Transport((master, port))
transport.connect(username='root', password='<PASSWORD>')
sftp = paramiko.SFTPClient.from_transport(transport)
remotepath = '/root/prepare-config.sh'
localpath = 'prepare-config.sh'
sftp.put(localpath, remotepath)
remotepath = '/root/contr.sh'
localpath = 'contr.sh'
sftp.put(localpath, remotepath)
sftp.close()
transport.close()
cmd = "chmod +x contr.sh && ./contr.sh "
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(master, username='root', password='<PASSWORD>')
stdin, stdout, stderr = ssh.exec_command(cmd)
print "stderr: " , stderr.readline()
print "pwd: ", stdout.readline()
ssh.close()
| 1.585938 | 2 |
main.py | Assaviv/demo_calculator | 0 | 12768344 | import re
import argparse
from typing import Optional, List
from dataclasses import dataclass
from lark import Lark, Transformer, v_args
USAGE = "A command line calculator"
@dataclass
class Token:
name: str
value: str
calc_grammar = """
?start: sum
| NAME "=" sum -> assign_var
?sum: product
| sum "+" product -> add
| sum "-" product -> sub
?product: atom
| product "*" atom -> mul
| product "/" atom -> div
?atom: NUMBER -> number
| "-" atom -> neg
| NAME -> var
| "(" sum ")"
%import common.CNAME -> NAME
%import common.NUMBER
%import common.WS_INLINE
%ignore WS_INLINE
"""
@v_args(inline=True) # Affects the signatures of the methods
class CalculateTree(Transformer):
from operator import add, sub, mul, truediv as div, neg
number = float
def __init__(self):
self.vars = {}
def assign_var(self, name, value):
self.vars[name] = value
return value
def var(self, name):
try:
return self.vars[name]
except KeyError:
raise Exception("Variable not found: %s" % name)
def calculate(formula):
calc_parser = Lark(calc_grammar, parser='lalr',
transformer=CalculateTree())
result = calc_parser.parse(formula)
return result
def main():
parser = argparse.ArgumentParser("calc")
parser.add_argument("formula", action="store")
args = parser.parse_args()
formula = args.formula
result = calculate(formula)
if result is not None:
print(result)
if __name__ == '__main__':
main()
| 2.84375 | 3 |
app/closeLoop/trainModels-prcp.py | fkwai/geolearn | 0 | 12768345 | <gh_stars>0
from hydroDL import pathSMAP, master, utils
from hydroDL.master import default
from hydroDL.post import plot, stat
import os
import matplotlib.pyplot as plt
import numpy as np
import torch
# training
tLst = [[20150501, 20151001], [20150402, 20160401]]
tagLst = ['2015RK', '2015']
for k in range(len(tLst)):
optData = default.update(
default.optDataSMAP,
varT=['APCP_FORA'],
rootDB=pathSMAP['DB_L3_NA'],
subset='CONUSv2f1',
tRange=tLst[k],
daObs=1)
optModel = default.optLstmClose
optLoss = default.optLossRMSE
optTrain = default.update(default.optTrainSMAP, nEpoch=500)
out = os.path.join(pathSMAP['Out_L3_NA'], 'DA', 'CONUSv2f1_DA_Prcp_' + tagLst[k])
masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
master.runTrain(masterDict, cudaID=(k+1) % 3, screen='DA' + tagLst[k])
optData = default.update(
default.optDataSMAP,
varT=['APCP_FORA'],
rootDB=pathSMAP['DB_L3_NA'],
subset='CONUSv2f1',
tRange=tLst[k])
optModel = default.optLstm
optLoss = default.optLossRMSE
optTrain = default.update(default.optTrainSMAP, nEpoch=500)
out = os.path.join(pathSMAP['Out_L3_NA'], 'DA',
'CONUSv2f1_LSTM_Prcp_'+tagLst[k])
masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
master.runTrain(masterDict, cudaID=(k+1) % 3, screen='LSTM' + tagLst[k])
# training
tLst = [[20150501, 20151001]]
yrLst = ['2015RK']
for k in range(len(tLst)):
optData = default.update(
default.optDataSMAP,
rootDB=pathSMAP['DB_L3_NA'],
subset='CONUSv2f1',
tRange=tLst[k],
daObs=1)
optModel = default.optLstmClose
optLoss = default.optLossRMSE
optTrain = default.update(default.optTrainSMAP, nEpoch=500)
out = os.path.join(pathSMAP['Out_L3_NA'], 'DA', 'CONUSv2f1_DA_' + yrLst[k])
masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
master.runTrain(masterDict, cudaID=2, screen='DA' + yrLst[k])
# optData = default.update(
# default.optDataSMAP,
# rootDB=pathSMAP['DB_L3_NA'],
# subset='CONUSv2f1',
# tRange=tLst[k])
# optModel = default.optLstm
# optLoss = default.optLossRMSE
# optTrain = default.update(default.optTrainSMAP, nEpoch=300)
# out = os.path.join(pathSMAP['Out_L3_NA'], 'DA', 'CONUSv2f1_LSTM'+yrLst[k])
# masterDict = master.wrapMaster(out, optData, optModel, optLoss, optTrain)
# master.runTrain(masterDict, cudaID=k % 3, screen='LSTM' + yrLst[k])
| 1.625 | 2 |
cactus/mime.py | danielchasehooper/Cactus | 1,048 | 12768346 | <filename>cactus/mime.py
import os
import mimetypes
MIMETYPE_MAP = {
'.js': 'text/javascript',
'.mov': 'video/quicktime',
'.mp4': 'video/mp4',
'.m4v': 'video/x-m4v',
'.3gp': 'video/3gpp',
'.woff': 'application/font-woff',
'.eot': 'application/vnd.ms-fontobject',
'.ttf': 'application/x-font-truetype',
'.otf': 'application/x-font-opentype',
'.svg': 'image/svg+xml',
}
MIMETYPE_DEFAULT = 'application/octet-stream'
def guess(path):
if not path:
return MIMETYPE_DEFAULT
base, ext = os.path.splitext(path)
if ext.lower() in MIMETYPE_MAP:
return MIMETYPE_MAP[ext.lower()]
mime_type, encoding = mimetypes.guess_type(path)
if mime_type:
return mime_type
return MIMETYPE_DEFAULT
| 2.34375 | 2 |
TodoApp/todos/urls.py | ExeExzo/Django-ToDo | 0 | 12768347 | from django.urls import path
from . import views
from .views import CustomLoginView, RegisterPage
from django.contrib.auth.views import LogoutView
urlpatterns = [
path('login/', CustomLoginView.as_view(),name='login'),
path('logout/', LogoutView.as_view(next_page='login'),name='logout'),
path('register/', RegisterPage.as_view(),name='register'),
path('list/',views.list_todo_items, name='items'),
path('insert_todo/',views.insert_todo_item, name='insert_todo_item'),
path('delete_todo/<int:todo_id>/',views.delete_todo_item, name='delete_todo_item'),
] | 1.921875 | 2 |
bazaar/__init__.py | ronbeltran/bazaar | 2 | 12768348 | <reponame>ronbeltran/bazaar
import os
try:
from secrets import choice # >= python3.6
except ImportError:
from random import choice # < python3.6
BASE_DIR = os.path.join(os.path.dirname(__file__))
WORDS_DIR = os.path.join(BASE_DIR, 'words')
class Bazaar(object):
def __init__(self):
self.item = self.get_item("items.txt")
self.adj = self.get_item("adj.txt")
self.obj = "{} {}".format(self.adj, self.item)
self.super_item = self.get_item("superitems.txt")
self.super_adj = self.get_item("superadj.txt")
self.super_obj = "{} {}".format(self.super_adj, self.super_item)
self.heroku = "-".join([self.super_adj, self.super_item,
str(choice([x for x in range(1000, 9999)]))])
@property
def Adj(self):
return self.adj.title()
@property
def Item(self):
return self.item.title()
@property
def Obj(self):
return self.obj.title()
@property
def Super_adj(self):
return self.super_adj.title()
@property
def Super_item(self):
return self.super_item.title()
@property
def Super_obj(self):
return self.super_obj.title()
def get_item(self, filename):
path = os.path.join(WORDS_DIR, filename)
with open(path) as f:
content = f.read()
return choice(content.split('\n'))
| 2.484375 | 2 |
tests/job_test.py | mkeilman/sirepo | 49 | 12768349 | <reponame>mkeilman/sirepo<filename>tests/job_test.py
# -*- coding: utf-8 -*-
"""End to end test of running a job.
:copyright: Copyright (c) 2019 RadiaSoft LLC. All Rights Reserved.
:license: http://www.apache.org/licenses/LICENSE-2.0.html
"""
from __future__ import absolute_import, division, print_function
import pytest
import os
# TODO(e-carlin): Tests that need to be implemented
# - agent never starts
# - agent response is bad (ex no req_id)
# - server_req is malformed
# - agent starts but we never get an incoming 'read_for_work' message
# - canceling of requests in the q and running requests
# - using only the resources that are available
# - agent sigterm -> sigkill progression
# - send kill to uknown agent
_REPORT = 'heightWeightReport'
def test_runCancel(fc):
from pykern import pkunit
from pykern.pkdebug import pkdc, pkdp, pkdlog
import time
d = fc.sr_sim_data()
d.models.simulation.name = 'srunit_long_run'
d = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=_REPORT,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
assert d.state != 'error'
if d.state == 'running':
break
time.sleep(d.nextRequestSeconds)
d = fc.sr_post('runStatus', d.nextRequest)
else:
pkunit.pkfail('runStatus: failed to start running: {}', d)
x = d.nextRequest
d = fc.sr_post(
'runCancel',
x,
)
assert d.state == 'canceled'
d = fc.sr_post(
'runStatus',
x,
)
assert d.state == 'canceled'
def test_runSimulation(fc):
from pykern import pkunit
from pykern.pkdebug import pkdp, pkdlog
from sirepo import job
import time
d = fc.sr_sim_data()
d = fc.sr_post(
'runSimulation',
dict(
forceRun=False,
models=d.models,
report=_REPORT,
simulationId=d.models.simulation.simulationId,
simulationType=d.simulationType,
),
)
for _ in range(10):
pkdlog(d)
assert d.state != 'error'
if d.state == 'completed':
break
time.sleep(d.nextRequestSeconds)
d = fc.sr_post('runStatus', d.nextRequest)
else:
pkunit.pkfail('runStatus: failed to complete: {}', d)
# Just double-check it actually worked
assert u'plots' in d
def test_remove_srw_report_dir(fc):
from pykern import pkio
from pykern import pkunit
import sirepo.srdb
m = 'intensityReport'
data = fc.sr_sim_data('NSLS-II ESM beamline')
fc.sr_run_sim(data, m)
g = pkio.sorted_glob(sirepo.srdb.root().join('user', fc.sr_uid, 'srw', '*', m))
pkunit.pkeq(1, len(g))
pkio.unchecked_remove(*g)
fc.sr_run_sim(data, m)
| 2.03125 | 2 |
plugin/python/zui/dlg/fileopen.py | cristivlas/zerobugs | 2 | 12768350 | <reponame>cristivlas/zerobugs
# -------------------------------------------------------------------------
# This file is part of ZeroBugs, Copyright (c) 2010 <NAME>
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# -------------------------------------------------------------------------
import gtk
import os.path
import zero
def set_filename(fileButton, process):
name = process.name()
basename = os.path.basename(name)
btn = fileButton.get_children()[0]
box = btn.get_children()[0]
image = box.get_children()[0]
label = box.get_children()[1]
iconName = "gnome-mime-application-x-executable"
if process.origin() == zero.Process.Origin.Core:
iconName = "gnome-mime-application-x-core"
image.set_from_icon_name(iconName, gtk.ICON_SIZE_MENU)
label.set_text(basename)
class FileOpen(object):
"""
Implements the logic associated with File Chooser Dialog
"""
def __init__(self, fileChooser, filters = ()):
self.__filters = filters
box = gtk.HBox()
try:
fileChooser.set_extra_widget(box)
fileChooser.connect("file-activated", self.on_file_activated)
fileChooser.connect("update-preview", self.on_update_preview, box)
for f in filters:
fileChooser.set_filter(f.filter)
f.connect("update_preview", self.on_update_preview)
except:
fileChooser.connect("response", self.__on_response)
def on_update_preview(self, fileChooser, preview):
#clear the preview:
for w in preview.get_children():
preview.remove(w)
#pass the preview to filters
filename = fileChooser.get_preview_filename()
if filename:
for f in self.__filters:
if f.update_preview(preview, filename):
break
def on_file_activated(self, chooser):
filename = chooser.get_filename()
if filename:
for f in self.__filters:
if f.open(filename):
chooser.hide()
break
def __on_response(self, dlg, respID):
if respID == gtk.RESPONSE_OK:
self.on_file_activated(dlg)
else:
dlg.hide()
| 2 | 2 |