max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
Python_Scripts/addDistricts.py | Vitax/SportsClubForMe | 2 | 12769851 | <gh_stars>1-10
import geocoder
import json
with open('/home/caglar/WebStormProjects/SportsClubForMe/assets/data/SportClubForMe_Districts.json') as file:
data = json.load(file)
for club in data["clubdata"]:
if 'district' in club:
continue
try:
g = geocoder.osm(club["address"] + ", " + club["postcode"])
if (g.error == None):
club["district"] = str(g.city_district)
except:
print(club)
json_string = json.dumps(data)
with open('/home/caglar/WebStormProjects/SportsClubForMe/assets/data/SportClubForMe_Districts2.json', "w") as output:
output.write(json_string) | 2.765625 | 3 |
plugins/data/ssd_plugin/digitsSSD/forms.py | wills2133/digits-ssd | 0 | 12769852 | # Copyright (c) 2016-2017, NVIDIA CORPORATION. All rights reserved.
from __future__ import absolute_import
import os
from digits import utils
from digits.utils import subclass
from flask.ext.wtf import Form
from wtforms import validators
@subclass
class DatasetForm(Form):
"""
A form used to create a Sunnybrook dataset
"""
def validate_folder_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) or not os.path.isdir(field.data):
raise validators.ValidationError(
'Folder does not exist or is not reachable')
else:
return True
voc_folder = utils.forms.StringField(
u'VOC folder',
validators=[
validators.DataRequired(),
# validate_folder_path,
],
tooltip="Specify the path to the voc folder"
)
dataset_server_ip = utils.forms.StringField(
'Dataset server ip',
validators=[
],
tooltip="Dataset server ip in format 'xxx.xxx.xxx.xxx'."
)
dataset_server_port = utils.forms.StringField(
'Dataset server port',
validators=[
],
tooltip="Dataset server port in format 'xxxx'."
)
@subclass
class InferenceForm(Form):
def validate_file_path(form, field):
if not field.data:
pass
else:
# make sure the filesystem path exists
if not os.path.exists(field.data) and not os.path.isdir(field.data):
raise validators.ValidationError(
'File does not exist or is not reachable')
else:
return True
"""
A form used to perform inference on a text classification dataset
"""
test_image_file = utils.forms.StringField(
u'Image file',
validators=[
validate_file_path,
],
tooltip="Provide the (server) path to an image."
)
validation_record = utils.forms.SelectField(
'Record from validation set',
choices=[
('none', '- select record -'),
],
default='none',
tooltip="Test a record from the validation set."
)
| 2.609375 | 3 |
lstm_dorway/bigram_batch.py | frenzykryger/lstm-dorway | 2 | 12769853 | import itertools
import numpy as np
import string
__all__ = ['BigramGenerator', 'SkipgramGenerator',
'id2bigram', 'vocabulary_size', 'all_bigrams']
letters = sorted(set((string.ascii_letters + string.digits + " ").lower()))
class WhitelistTable(object):
# there will be stories
def __init__(self, letters):
self._d = {ord(l): ord(l) for l in letters}
def __getitem__(self, k):
return self._d.get(k)
trans_table = WhitelistTable(letters)
all_bigrams = {x[0] + x[1]: i for i, x in
enumerate(itertools.product(letters, letters))}
inversed_bigrams = {i: x for x, i in all_bigrams.items()}
vocabulary_size = len(all_bigrams)
def id2bigram(i):
return inversed_bigrams[i]
def text_to_bigram_sequence(text):
text = text.translate(trans_table)
if len(text) % 2 != 0:
text += " "
sequence = [all_bigrams[text[i:i + 2]] for i in range(0, len(text), 2)]
return np.array(sequence, dtype=np.int16)
class BatchGenerator(object):
def __init__(self, text, batch_size, num_unrollings):
self._text = text
self._text_size = len(text)
self._batch_size = batch_size
self._num_unrollings = num_unrollings
segment = self._text_size // batch_size
self._cursor = [offset * segment for offset in range(batch_size)]
self._last_batch = self._next_batch()
def _next_batch(self):
"""Generate a single batch from the current cursor position in the data."""
batch = np.zeros(
shape=(self._batch_size), dtype=np.int16)
for b in range(self._batch_size):
batch[b] = self._text[self._cursor[b]]
self._cursor[b] = (self._cursor[b] + 1) % self._text_size
return batch
def next(self):
"""Generate the next array of batches from the data. The array consists of
the last batch of the previous array, followed by num_unrollings new ones.
"""
batches = [self._last_batch]
for step in range(self._num_unrollings):
batches.append(self._next_batch())
self._last_batch = batches[-1]
return batches
def to_skipgrams(batches):
""" This converts given number of batches to skipgrams
returns skipgram_batches, skipgram_labels
"""
assert len(batches) % 2 != 0
skip_window = len(batches) // 2
return ([batches[skip_window]] * (len(batches) - 1),
[b for i, b in enumerate(batches) if i != skip_window])
class BigramGenerator(object):
"""Generates batches of bigrams for given text"""
def __init__(self, text, batch_size, num_unrollings=0):
self._bigrams = text_to_bigram_sequence(text)
self._generator = BatchGenerator(
self._bigrams, batch_size, num_unrollings)
def next(self):
return self._generator.next()
class SkipgramGenerator(object):
"""Generates batches/labels of skipgrams for given text"""
def __init__(self, text, batch_size, num_skips):
self._bigrams = text_to_bigram_sequence(text)
self._generator = BatchGenerator(
self._bigrams, batch_size, num_skips * 2)
def next(self):
return to_skipgrams(self._generator.next())
| 3.03125 | 3 |
sdk/python/pulumi_time/__init__.py | tmeckel/pulumi-time | 1 | 12769854 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
from . import _utilities
import typing
# Export this package's modules as members:
from .provider import *
from .time_offset import *
from .time_rotating import *
from .time_static import *
_utilities.register(
resource_modules="""
[
{
"pkg": "time",
"mod": "index/timeOffset",
"fqn": "pulumi_time",
"classes": {
"time:index/timeOffset:TimeOffset": "TimeOffset"
}
},
{
"pkg": "time",
"mod": "index/timeRotating",
"fqn": "pulumi_time",
"classes": {
"time:index/timeRotating:TimeRotating": "TimeRotating"
}
},
{
"pkg": "time",
"mod": "index/timeStatic",
"fqn": "pulumi_time",
"classes": {
"time:index/timeStatic:TimeStatic": "TimeStatic"
}
}
]
""",
resource_packages="""
[
{
"pkg": "time",
"token": "pulumi:providers:time",
"fqn": "pulumi_time",
"class": "Provider"
}
]
"""
)
| 1.453125 | 1 |
pysnmp/NBS-MIB.py | agustinhenze/mibs.snmplabs.com | 11 | 12769855 | <gh_stars>10-100
#
# PySNMP MIB module NBS-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/NBS-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 20:07:17 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
ObjectIdentifier, Integer, OctetString = mibBuilder.importSymbols("ASN1", "ObjectIdentifier", "Integer", "OctetString")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
SingleValueConstraint, ValueRangeConstraint, ValueSizeConstraint, ConstraintsUnion, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "SingleValueConstraint", "ValueRangeConstraint", "ValueSizeConstraint", "ConstraintsUnion", "ConstraintsIntersection")
ModuleCompliance, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "NotificationGroup")
IpAddress, Integer32, MibScalar, MibTable, MibTableRow, MibTableColumn, ObjectIdentity, Unsigned32, enterprises, iso, NotificationType, TimeTicks, Counter32, MibIdentifier, Bits, Counter64, Gauge32, ModuleIdentity = mibBuilder.importSymbols("SNMPv2-SMI", "IpAddress", "Integer32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "ObjectIdentity", "Unsigned32", "enterprises", "iso", "NotificationType", "TimeTicks", "Counter32", "MibIdentifier", "Bits", "Counter64", "Gauge32", "ModuleIdentity")
DisplayString, TextualConvention = mibBuilder.importSymbols("SNMPv2-TC", "DisplayString", "TextualConvention")
nbsMib = ModuleIdentity((1, 3, 6, 1, 4, 1, 629, 250))
if mibBuilder.loadTexts: nbsMib.setLastUpdated('201309170000Z')
if mibBuilder.loadTexts: nbsMib.setOrganization('NBS')
nbs = ObjectIdentity((1, 3, 6, 1, 4, 1, 629))
if mibBuilder.loadTexts: nbs.setStatus('current')
class Unsigned16(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(0, 65535)
class Unsigned64(TextualConvention, Counter64):
status = 'current'
class WritableU64(TextualConvention, OctetString):
status = 'current'
subtypeSpec = OctetString.subtypeSpec + ValueSizeConstraint(8, 8)
fixedLength = 8
class NbsTcTemperature(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(-2147483648, 1000)
class NbsTcMilliVolt(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(-1, 1000000)
class NbsTcMilliAmp(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(-1, 1000000)
class NbsTcMicroAmp(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(-1, 2147483647)
class NbsTcMilliDb(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ValueRangeConstraint(-2147483648, 100000)
class NbsTcMilliWatts(TextualConvention, Integer32):
status = 'current'
class NbsTcMHz(TextualConvention, Unsigned32):
status = 'current'
subtypeSpec = Unsigned32.subtypeSpec + ValueRangeConstraint(1, 4294967295)
class NbsTcStatusSimple(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("notSupported", 1), ("bad", 2), ("good", 3), ("notInstalled", 4))
class NbsTcStatusLevel(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4, 5, 6))
namedValues = NamedValues(("notSupported", 1), ("statusLowError", 2), ("statusLowWarning", 3), ("statusGood", 4), ("statusHighWarning", 5), ("statusHighError", 6))
class NbsTcPartIndex(TextualConvention, Unsigned32):
status = 'current'
class NbsTcStagingCommit(TextualConvention, Integer32):
status = 'current'
subtypeSpec = Integer32.subtypeSpec + ConstraintsUnion(SingleValueConstraint(1, 2, 3, 4))
namedValues = NamedValues(("notSupported", 1), ("supported", 2), ("revertToCommitted", 3), ("apply", 4))
mibBuilder.exportSymbols("NBS-MIB", NbsTcStatusLevel=NbsTcStatusLevel, NbsTcPartIndex=NbsTcPartIndex, NbsTcStagingCommit=NbsTcStagingCommit, NbsTcMilliVolt=NbsTcMilliVolt, NbsTcTemperature=NbsTcTemperature, NbsTcMilliWatts=NbsTcMilliWatts, WritableU64=WritableU64, NbsTcMHz=NbsTcMHz, NbsTcStatusSimple=NbsTcStatusSimple, Unsigned16=Unsigned16, nbsMib=nbsMib, PYSNMP_MODULE_ID=nbsMib, nbs=nbs, NbsTcMilliAmp=NbsTcMilliAmp, NbsTcMicroAmp=NbsTcMicroAmp, Unsigned64=Unsigned64, NbsTcMilliDb=NbsTcMilliDb)
| 1.671875 | 2 |
scripts/display_temperature.py | cyclogenesis-au/opaware | 0 | 12769856 | <reponame>cyclogenesis-au/opaware
import subprocess
import opaware
from rgbmatrix import graphics, RGBMatrix, RGBMatrixOptions
import time
if __name__ == "__main__":
HOST = "pi@flashpi"
# Ports are handled in ~/.ssh/config since we use OpenSSH
COMMAND = 'tail -n 500 ~/data/testjson.json'
ssh = subprocess.Popen(["ssh", "%s" % HOST, COMMAND],
shell=False,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
result = ssh.stdout.readlines()
act_data = opaware.ingests.ingest_ambient(result)
ff = "T = %s" % (act_data.outside_temperature.values[-1])
options = RGBMatrixOptions()
options.rows = 32
options.cols = 32
options.chain_length = 2
options.hardware_mapping = "adafruit-hat"
mymat = RGBMatrix(options = options)
offscreen_canvas = mymat.CreateFrameCanvas()
font = graphics.Font()
font.LoadFont("/home/ubuntu/7x13.bdf")
textColor = graphics.Color(255, 255, 0)
pos = offscreen_canvas.width
my_text = ff
while True:
offscreen_canvas.Clear()
len = graphics.DrawText(offscreen_canvas, font, pos, 10, textColor, my_text)
pos -= 1
if (pos + len < 0):
pos = offscreen_canvas.width
time.sleep(0.05)
offscreen_canvas = mymat.SwapOnVSync(offscreen_canvas)
| 2.171875 | 2 |
servicenow/icon_servicenow/actions/search_incident_attachment/schema.py | emartin-merrill-r7/insightconnect-plugins | 1 | 12769857 | <filename>servicenow/icon_servicenow/actions/search_incident_attachment/schema.py<gh_stars>1-10
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Search for attachment files with the given name"
class Input:
NAME = "name"
class Output:
ATTACHMENT_IDS = "attachment_ids"
class SearchIncidentAttachmentInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"name": {
"type": "string",
"title": "Name",
"description": "Name of the attachment, i.e. the base file name used to create it",
"order": 1
}
},
"required": [
"name"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class SearchIncidentAttachmentOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"attachment_ids": {
"type": "array",
"title": "Attachment IDs",
"description": "List of System IDs of attachment records with the given name",
"items": {
"type": "string"
},
"order": 1
}
},
"required": [
"attachment_ids"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
| 2.40625 | 2 |
authlib/specs/rfc5849/__init__.py | tk193192/authlib | 2 | 12769858 | <reponame>tk193192/authlib<filename>authlib/specs/rfc5849/__init__.py
# -*- coding: utf-8 -*-
"""
authlib.specs.rfc5849
~~~~~~~~~~~~~~~~~~~~~
This module represents a direct implementation of The OAuth 1.0 Protocol.
https://tools.ietf.org/html/rfc5849
"""
# flake8: noqa
from authlib.oauth1.rfc5849.errors import *
from authlib.oauth1 import (
ClientAuth as Client,
OAuth1Request,
SIGNATURE_HMAC_SHA1,
SIGNATURE_RSA_SHA1,
SIGNATURE_PLAINTEXT,
SIGNATURE_TYPE_HEADER,
SIGNATURE_TYPE_QUERY,
SIGNATURE_TYPE_BODY,
ClientMixin,
TemporaryCredentialMixin,
TokenCredentialMixin,
TemporaryCredential,
AuthorizationServer,
ResourceProtector,
)
| 1.367188 | 1 |
eex/translators/amber/amber_utility.py | dgasmith/EEX | 0 | 12769859 | import numpy as np
import eex
import os
from . import amber_metadata as amd
def get_energies(prmtop=None, crd=None, input_file=None, amb_path=None):
"""Evaluate energies of AMBER files. Based on InterMol
Args:
input_file = path to input file (expects data file in same folder)
lmp_path = path to LAMMPS binaries
"""
if not prmtop:
#prmtop = os.path.join(directory, 'parm.prmtop')
raise OSError('Cannot find %s Amber parameter file' % prmtop)
if not crd:
#crd = os.path.join(directory, 'ener.edr')
raise OSError('Cannot find %s Amber parameter file' % crdtop)
directory, _ = os.path.split(os.path.abspath(prmtop))
mdout = os.path.join(directory, 'amber.out')
stdout_path = os.path.join(directory, 'amber_stdout.txt')
stderr_path = os.path.join(directory, 'amber_stderr.txt')
# Did they give a path, or the name of the file?
is_last_bin = os.path.basename(os.path.normpath(amb_path))
if is_last_bin == 'sander':
amber_bin = amb_path
else:
amber_bin = os.path.join(amb_path, 'sander')
if not eex.utility.which(amber_bin):
raise OSError('Unable to find AMBER executable (sander).')
# Run sander.
cmd = [amber_bin, '-i', input_file, '-c', crd, '-p', prmtop, '-o', mdout, '-O']
_ = eex.utility.run_subprocess(cmd, stdout_path, stderr_path)
ret = _group_energy_terms(mdout)
# TODO: Unit conversion
return eex.utility.canonicalize_energy_names(ret, amd.to_canonical)
def _group_energy_terms(mdout):
"""Parse AMBER output file and group the energy terms in a dict. """
with open(mdout) as f:
all_lines = f.readlines()
# Find where the energy information starts.
for i, line in enumerate(all_lines):
if line[0:8] == ' NSTEP':
startline = i
break
else:
raise AmberError('Unable to detect where energy info starts in AMBER '
'output file: {}'.format(mdout))
# Strange ranges for amber file data.
ranges = [[1, 24], [26, 49], [51, 77]]
e_out = dict()
potential = 0
for line in all_lines[startline+3:]:
if '=' in line:
for i in range(3):
r = ranges[i]
term = line[r[0]:r[1]]
if '=' in term:
energy_type, energy_value = term.split('=')
energy_value = float(energy_value)
potential += energy_value
energy_type = energy_type.rstrip()
e_out[energy_type] = energy_value
else:
break
e_out['ENERGY'] = potential
# eex.utility.canonicalize_energy_names(e_out)
return e_out
| 2.421875 | 2 |
ExPy/ExPy/module06.py | brad-h/expy | 0 | 12769860 | """Retirement Calculator"""
from datetime import date
def prompt(difference_check):
"""Calculate the year you can retire
difference_check - a function that takes a number and returns boolean,
the number provided is the difference between current age and retirement age
"""
age = int(input('What is your age? '))
retirement_age = int(input('At what age would you like to retire? '))
current_year = date.today().year
difference = retirement_age - age
if difference_check(difference):
future_year = current_year + difference
print("You have {difference} years left until you can retire."
.format(difference=difference))
print("It's {current_year}, so you can retire in {future_year}."
.format(current_year=current_year, future_year=future_year))
else:
print('You can retire now!')
def ex6():
"""Do not handle negative numbers"""
prompt(lambda _: True)
def ex6a():
"""Handle negative numbers"""
prompt(lambda x: x > 0)
| 4.0625 | 4 |
python/lsst/integration_test_reporting/utils/__init__.py | lsst-sitcom/integration_test_reporting | 0 | 12769861 | <reponame>lsst-sitcom/integration_test_reporting
# Developed for the LSST System Integration, Test and Commissioning Team.
# This product includes software developed by the LSST Project
# (http://www.lsst.org).
# See the LICENSE file at the top-level directory of this distribution
# for details of code ownership.
#
# Use of this source code is governed by a 3-clause BSD-style
# license that can be found in the LICENSE file.
from .constants import *
from .csc_cls import *
from .df import *
from .helpers import *
from .parser import *
| 0.835938 | 1 |
src/revops/api/__init__.py | adam-revops/revops-python | 0 | 12769862 | name = "api"
import os
import sys
import requests
import logging
import revops.exceptions
__LOGGING_DEFAULTS__ = {'level': logging.INFO}
__DEFAULT_ENDPOINT__ = 'https://api.revops.io'
logging.basicConfig(**__LOGGING_DEFAULTS__)
class RevOpsAPI(object):
headers = {}
"""
This is the RevOps API Client
Attributes:
api_key (str): API Key used to access RevOps API Access.
endpoint (str): API Endpoint to access your RevOps instance.
If not defined, defaults to 'https://api.revops.io'.
"""
def __init__(self, api_key = None, endpoint = __DEFAULT_ENDPOINT__):
self.api_key = os.environ.get('REVOPS_API_KEY', api_key)
if self.api_key == None or self.api_key == '':
raise Exception("REVOPS_API_KEY environment variable is not set.")
self.api_endpoint = os.environ.get('REVOPS_API_ENDPOINT', endpoint)
self.headers = {
'X-RevOps-API-Key': self.api_key,
'Content-Type': 'application/json',
}
def __getattr__(self, name):
resource = __import__(
"revops.resources.{}".format(name),
fromlist=["revops.resources"]
)
return resource.__api_module__(self)
def request(self, data, api_resource = None, http_method = "GET"):
url = "{}/{}".format(self.api_endpoint, api_resource)
response = requests.request(
http_method,
url,
data=data,
headers=self.headers,
)
if response.status_code == 401:
raise revops.exceptions.AuthenticationException(
"Unauthorized key, please check credentials provided.",
api_resource,
response,
)
return response
| 2.484375 | 2 |
setup.py | nava45/flask-routelogger | 1 | 12769863 | <reponame>nava45/flask-routelogger<filename>setup.py
#!/usr/bin/env python
"""
Flask-RouteLogger
-----------
Logs the meta route level information with minimal config for flask application
"""
from setuptools import setup
setup(
name='Flask-RouteLogger',
version='0.1',
url='',
license='BSD',
author='<NAME>',
author_email='<EMAIL>',
description='Logs the meta route level information with minimal config in flask application',
long_description=__doc__,
packages=[
'flask_routelogger',
],
zip_safe=False,
platforms='any',
install_requires=[
'Flask',
'elasticsearch'
],
test_suite='test_cache',
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| 2 | 2 |
src/ebonite/ext/sqlalchemy/__init__.py | koskotG/ebonite | 270 | 12769864 | from .repository import SQLAlchemyMetaRepository
__all__ = ['SQLAlchemyMetaRepository']
| 1.125 | 1 |
CoolPlot/Plot/PsychChart.py | CoolProp/CoolPlot | 9 | 12769865 | # -*- coding: utf-8 -*-
from __future__ import print_function, division, absolute_import
"""
This file implements a psychrometric chart for air at 1 atm
"""
from CoolProp.HumidAirProp import HAPropsSI
from .Plots import InlineLabel
import matplotlib, numpy, textwrap
import_template = (
"""
# This file was auto-generated by the PsychChart.py script in wrappers/Python/CoolProp/Plots
if __name__=='__main__':
import numpy, matplotlib
from CoolProp.HumidAirProp import HAPropsSI
from CoolProp.Plots.Plots import InlineLabel
p = 101325
Tdb = numpy.linspace(-10,60,100)+273.15
# Make the figure and the axes
fig=matplotlib.pyplot.figure(figsize=(10,8))
ax=fig.add_axes((0.1,0.1,0.85,0.85))
"""
)
closure_template = (
"""
matplotlib.pyplot.show()
"""
)
Tdb = numpy.linspace(-10, 60, 100) + 273.15
p = 101325
def indented_segment(s):
return '\n'.join([' ' + line for line in textwrap.dedent(s).split('\n')])
class PlotFormatting(object):
def plot(self, ax):
ax.set_xlim(Tdb[0] - 273.15, Tdb[-1] - 273.15)
ax.set_ylim(0, 0.03)
ax.set_xlabel(r"$T_{db}$ [$^{\circ}$C]")
ax.set_ylabel(r"$W$ ($m_{w}/m_{da}$) [-]")
def __str__(self):
return indented_segment("""
ax.set_xlim(Tdb[0]-273.15,Tdb[-1]-273.15)
ax.set_ylim(0,0.03)
ax.set_xlabel(r"$T_{db}$ [$^{\circ}$C]")
ax.set_ylabel(r"$W$ ($m_{w}/m_{da}$) [-]")
""")
class SaturationLine(object):
def plot(self, ax):
w = [HAPropsSI('W', 'T', T, 'P', p, 'R', 1.0) for T in Tdb]
ax.plot(Tdb - 273.15, w, lw=2)
def __str__(self):
return indented_segment("""
# Saturation line
w = [HAPropsSI('W','T',T,'P',p,'R',1.0) for T in Tdb]
ax.plot(Tdb-273.15,w,lw=2)
"""
)
class HumidityLabels(object):
def __init__(self, RH_values, h):
self.RH_values = RH_values
self.h = h
def plot(self, ax):
xv = Tdb # [K]
for RH in self.RH_values:
yv = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb]
y = HAPropsSI('W', 'P', p, 'H', self.h, 'R', RH)
T_K, w, rot = InlineLabel(xv, yv, y=y, axis=ax)
string = r'$\phi$=' + '{s:0.0f}'.format(s=RH * 100) + '%'
# Make a temporary label to get its bounding box
bbox_opts = dict(boxstyle='square,pad=0.0', fc='white', ec='None', alpha=0.5)
ax.text(T_K - 273.15, w, string, rotation=rot, ha='center', va='center', bbox=bbox_opts)
def __str__(self):
return indented_segment("""
xv = Tdb #[K]
for RH in {RHValues:s}:
yv = [HAPropsSI('W','T',T,'P',p,'R',RH) for T in Tdb]
y = HAPropsSI('W','P',p,'H',{h:f},'R',RH)
T_K,w,rot = InlineLabel(xv, yv, y=y, axis = ax)
string = r'$\phi$='+{s:s}+'%'
bbox_opts = dict(boxstyle='square,pad=0.0',fc='white',ec='None',alpha = 0.5)
ax.text(T_K-273.15,w,string,rotation = rot,ha ='center',va='center',bbox=bbox_opts)
""".format(h=self.h, RHValues=str(self.RH_values), s="'{s:0.0f}'.format(s=RH*100)")
)
class HumidityLines(object):
def __init__(self, RH_values):
self.RH_values = RH_values
def plot(self, ax):
for RH in self.RH_values:
w = [HAPropsSI('W', 'T', T, 'P', p, 'R', RH) for T in Tdb]
ax.plot(Tdb - 273.15, w, 'r', lw=1)
def __str__(self):
return indented_segment("""
# Humidity lines
RHValues = {RHValues:s}
for RH in RHValues:
w = [HAPropsSI('W','T',T,'P',p,'R',RH) for T in Tdb]
ax.plot(Tdb-273.15,w,'r',lw=1)
""".format(RHValues=str(self.RH_values))
)
class EnthalpyLines(object):
def __init__(self, H_values):
self.H_values = H_values
def plot(self, ax):
for H in self.H_values:
# Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAPropsSI('T', 'H', H, 'P', p, 'R', 1.0) - 273.15
T0 = HAPropsSI('T', 'H', H, 'P', p, 'R', 0.0) - 273.15
w1 = HAPropsSI('W', 'H', H, 'P', p, 'R', 1.0)
w0 = HAPropsSI('W', 'H', H, 'P', p, 'R', 0.0)
ax.plot(numpy.r_[T1, T0], numpy.r_[w1, w0], 'r', lw=1)
def __str__(self):
return indented_segment("""
# Humidity lines
for H in {HValues:s}:
#Line goes from saturation to zero humidity ratio for this enthalpy
T1 = HAPropsSI('T','H',H,'P',p,'R',1.0)-273.15
T0 = HAPropsSI('T','H',H,'P',p,'R',0.0)-273.15
w1 = HAPropsSI('W','H',H,'P',p,'R',1.0)
w0 = HAPropsSI('W','H',H,'P',p,'R',0.0)
ax.plot(numpy.r_[T1,T0],numpy.r_[w1,w0],'r',lw=1)
""".format(HValues=str(self.H_values))
)
if __name__ == '__main__':
and_plot = False
if and_plot:
fig = matplotlib.pyplot.figure(figsize=(10, 8))
ax = fig.add_axes((0.1, 0.1, 0.85, 0.85))
ax.set_xlim(Tdb[0] - 273.15, Tdb[-1] - 273.15)
ax.set_ylim(0, 0.03)
ax.set_xlabel(r"Dry bulb temperature [$^{\circ}$C]")
ax.set_ylabel(r"Humidity ratio ($m_{water}/m_{dry\ air}$) [-]")
SL = SaturationLine()
if and_plot: SL.plot(ax)
RHL = HumidityLines([0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9])
if and_plot: RHL.plot(ax)
RHLabels = HumidityLabels([0.05, 0.1, 0.15, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9], h=65000)
if and_plot: RHLabels.plot(ax)
HL = EnthalpyLines(range(-20000, 100000, 10000))
if and_plot: HL.plot(ax)
PF = PlotFormatting()
if and_plot: PF.plot(ax)
if and_plot: matplotlib.pyplot.show()
with open('PsychScript.py', 'w') as fp:
for chunk in [import_template, SL, RHL, HL, PF, RHLabels, closure_template]:
fp.write(str(chunk).encode('ascii'))
execfile('PsychScript.py')
| 2.90625 | 3 |
client/connectserver.py | ischeinkman/Server-Wall | 0 | 12769866 | #!/usr/bin/python
import socket
import sys
from Crypto.Cipher import AES
from Crypto import Random
from config import Config
def createMsg(cfg, user, ip = None):
ipstring = ''
if ip != None:
ipstring = ';'+ip
salt = Random.new().read(16)
cipher = AES.new(cfg.key, AES.MODE_CFB, salt)
encodedmsg = (salt + cipher.encrypt(user+';'+cfg.password+ipstring)).encode('hex')
return encodedmsg
def authenticate(cfg, user, srcip = None):
print('Creating socket...')
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
print('Connecting...')
sock.connect((cfg.ip, cfg.port))
print('Sending...')
sock.send(createMsg(cfg, user, srcip))
print('Sent.')
sock.close()
def main():
try:
cfg = Config(sys.argv[sys.argv.index('-c') + 1])
except ValueError:
cfg = Config()
except IOError as e:
print("Got IOError parsing config.")
print(e)
print("Using default ...")
cfg = Config()
try:
srcindex = sys.argv.index('-s')
srcip = sys.argv[srcindex+1]
except ValueError:
srcip = ''
try:
userindex = sys.argv.index('-u')
userid = sys.argv[userindex+1]
except ValueError:
userid = cfg.defaultuser
authenticate(cfg, userid, srcip)
if __name__ == '__main__':
main()
| 2.8125 | 3 |
gem5-gpu/configs/GPUConfig.py | ayoubg/gem5-graphics_v1 | 1 | 12769867 | # Copyright (c) 2012 <NAME> and <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>, <NAME>
import m5
import os
import re
from m5.objects import *
from m5.util.convert import *
from m5.util import fatal
gpu_core_configs = ['Fermi', 'Maxwell', 'Tegra']
def addGPUOptions(parser):
parser.add_option("--gpgpusim-config", type="string", default=None, help="Path to the gpgpusim.config to use. This overrides the gpgpusim.config template")
parser.add_option("--access-host-pagetable", action="store_true", default=False)
parser.add_option("--split", default=False, action="store_true", help="Use split CPU and GPU cache hierarchies instead of fusion")
parser.add_option("--kernel_stats", default=False, action="store_true", help="Dump statistics on GPU kernel boundaries")
parser.add_option("--gpgpusim_stats", default=False, action="store_true", help="Dump statistics of GPGPU-Sim on GPU kernel boundaries")
#gpu cores, note: these 3 configs will be loaded from the gpgpusim config file when specified
parser.add_option("--clusters", default=16, help="Number of shader core clusters in the gpu that GPGPU-sim is simulating", type="int")
parser.add_option("--cores_per_cluster", default=1, help="Number of shader cores per cluster in the gpu that GPGPU-sim is simulating", type="int")
parser.add_option("--gpu-core-clock", default='700MHz', help="The frequency of GPU clusters (note: shaders operate at double this frequency when modeling Fermi)")
parser.add_option("--ctas_per_shader", default=8, help="Number of simultaneous CTAs that can be scheduled to a single shader", type="int")
parser.add_option("--gpu_warp_size", type="int", default=32, help="Number of threads per warp, also functional units per shader core/SM")
parser.add_option("--gpu_threads_per_core", type="int", default=1536, help="Maximum number of threads per GPU core (SM)")
#caches
parser.add_option("--sc_l1_size", default="64kB", help="size of l1 cache hooked up to each sc")
parser.add_option("--sc_l1_assoc", default=4, help="associativity of l1 cache hooked up to each sc", type="int")
parser.add_option("--gpu_l1_buf_depth", type="int", default=96, help="Number of buffered L1 requests per shader")
parser.add_option("--sc_tl1_size", default="64kB", help="size of l1 texture cache hooked up to each sc")
parser.add_option("--sc_tl1_assoc", default=4, help="associativity of l1 texture cache hooked up to each sc", type="int")
parser.add_option("--gpu_tl1_buf_depth", type="int", default=96, help="Number of buffered L1 requests per shader")
parser.add_option("--gpu_l1_pagewalkers", type="int", default=32, help="Number of GPU L1 pagewalkers")
parser.add_option("--sc_zl1_size", default="32kB", help="size of l1 z cache hooked up to each sc")
parser.add_option("--sc_zl1_assoc", default=4, help="associativity of l1 z cache", type="int")
parser.add_option("--gpu_zl1_buf_depth", type="int", default=96, help="Number of buffered Z-cache requests")
parser.add_option("--gpu_tlb_entries", type="int", default=0, help="Number of entries in GPU Data TLB. 0 implies infinite")
parser.add_option("--gpu_tlb_assoc", type="int", default=0, help="Associativity of the Data L1 TLB. 0 implies infinite")
parser.add_option("--gpu_ttlb_entries", type="int", default=0, help="Number of entries in GPU Tex TLB. 0 implies infinite")
parser.add_option("--gpu_ttlb_assoc", type="int", default=0, help="Associativity of the Tex L1 TLB. 0 implies infinite")
parser.add_option("--gpu_num_l2caches", default=1, help="num of l2 GPU caches")
parser.add_option("--sc_l2_size", default="1MB", help="size of L2 cache divided by num L2 caches")
parser.add_option("--sc_l2_assoc", default=16, help="associativity of L2 cache backing SC L1's", type="int")
parser.add_option("--gpu-l2-resource-stalls", action="store_true", default=False)
parser.add_option("--pwc_size", default="8kB", help="Capacity of the page walk cache")
parser.add_option("--pwc_assoc", default=16, help="Assoc of the page walk cache")
parser.add_option("--pwc_policy", default= LRUReplacementPolicy(), help="Replacement policy of the page walk cache")
parser.add_option("--flush_kernel_end", default=False, action="store_true", help="Flush the L1s at the end of each kernel. (Only VI_hammer)")
#gpu memory
parser.add_option("--gpu_core_config", type="choice", choices=gpu_core_configs, default='Fermi', help="configure the GPU cores like %s" % gpu_core_configs)
parser.add_option("--gpu-mem-size", default='1GB', help="In split hierarchies, amount of GPU memory")
parser.add_option("--gpu_mem_ctl_latency", type="int", default=-1, help="GPU memory controller latency in cycles")
parser.add_option("--gpu_mem_freq", type="string", default=None, help="GPU memory controller frequency")
parser.add_option("--gpu_membus_busy_cycles", type="int", default=-1, help="GPU memory bus busy cycles per data transfer")
parser.add_option("--gpu_membank_busy_time", type="string", default=None, help="GPU memory bank busy time in ns (CL+tRP+tRCD+CAS)")
#system memory
parser.add_option("--total-mem-size", default='2GB', help="Total size of memory in system")
parser.add_option("--dev-numa-high-bit", type="int", default=0, help="High order address bit to use for device NUMA mapping.")
parser.add_option("--num-dev-dirs", default=1, help="In split hierarchies, number of device directories", type="int")
#graphics options
parser.add_option("--g_depth_shader", type = "int", default=0, help="depth test in shader")
parser.add_option("--g_blend_shader", type = "int", default=1, help="Blend in shader")
parser.add_option("--g_start_frame", type = "int", default=-1, help="Simulation start frame")
parser.add_option("--g_end_frame", type = "int", default=-1, help="Simulation end frame")
parser.add_option("--g_start_call", type = "int", default=0, help="Simulation start draw call")
parser.add_option("--g_end_call", type = "int", default=-1, help="Simulation end draw call")
parser.add_option("--g_raster_th", type = "int", default=32, help="Graphics raster tile height")
parser.add_option("--g_raster_tw", type = "int", default=32, help="Graphics raster tile width")
parser.add_option("--g_raster_bh", type = "int", default=128, help="Graphics raster block height")
parser.add_option("--g_raster_bw", type = "int", default=128, help="Graphics raster block width")
parser.add_option("--g_cp_start", type = "int", default=-1, help="Graphics checkpoint start frame")
parser.add_option("--g_cp_end", type = "int", default=-1, help="Graphics checkpoint end frame")
parser.add_option("--g_cp_period", type = "int", default=5, help="Graphics checkpoint period")
parser.add_option("--g_skip_cp_frames", type = "int", default=0, help="Graphics skip rendering checkpoint loading frames")
parser.add_option("--ce_buffering", type="int", default=128, help="Maximum cache lines buffered in the GPU CE. 0 implies infinite")
def configureMemorySpaces(options):
total_mem_range = AddrRange(options.total_mem_size)
cpu_mem_range = total_mem_range
gpu_mem_range = total_mem_range
if options.split:
buildEnv['PROTOCOL'] += '_split'
total_mem_size = total_mem_range.size()
gpu_mem_range = AddrRange(options.gpu_mem_size)
if gpu_mem_range.size() >= total_mem_size:
fatal("GPU memory size (%s) won't fit within total memory size (%s)!" % (options.gpu_mem_size, options.total_mem_size))
gpu_segment_base_addr = Addr(total_mem_size - gpu_mem_range.size())
gpu_mem_range = AddrRange(gpu_segment_base_addr, size = options.gpu_mem_size)
options.total_mem_size = long(gpu_segment_base_addr)
cpu_mem_range = AddrRange(options.total_mem_size)
else:
buildEnv['PROTOCOL'] += '_fusion'
return (cpu_mem_range, gpu_mem_range, total_mem_range)
def parseGpgpusimConfig(options):
# parse gpgpu config file
# First check the cwd, and if there is not a gpgpusim.config file there
# Use the template found in gem5-fusion/configs/gpu_config and fill in
# the missing information with command line options.
if options.gpgpusim_config:
usingTemplate = False
gpgpusimconfig = os.path.join(os.path.dirname(__file__),'gpu_config/'+options.gpgpusim_config)
icntconfig = os.path.join(os.path.dirname(__file__),'gpu_config/'+options.icnt_config)
else:
usingTemplate = True
if options.gpu_core_config == 'Fermi':
gpgpusimconfig = os.path.join(os.path.dirname(__file__), 'gpu_config/gpgpusim.fermi.config.template')
elif options.gpu_core_config == 'Maxwell':
gpgpusimconfig = os.path.join(os.path.dirname(__file__), 'gpu_config/gpgpusim.maxwell.config.template')
elif options.gpu_core_config == 'Tegra':
gpgpusimconfig = os.path.join(os.path.dirname(__file__), 'gpu_config/gpgpusim.tegra.template')
else:
gpgpusimconfig = os.path.join(os.path.dirname(__file__), 'gpu_config/gpgpusim.config.template')
if not os.path.isfile(gpgpusimconfig):
fatal("Unable to find gpgpusim config (%s)" % gpgpusimconfig)
f = open(gpgpusimconfig, 'r')
config = f.read()
f.close()
# fDumpPath = os.path.join(options.outdir, 'gpgpusimFrameDumps')
# fShadersPath = os.path.join(options.outdir, 'gpgpusimShaders')
# if not os.path.isdir(fDumpPath)
# os.makedirs(fDumpPath)
# if not os.path.isdir(fShadersPath)
# os.makedirs.isdir(fShadersPath)
config = config.replace("%outdir%", m5.options.outdir)
config = config.replace("%gDepthShader%", str(options.g_depth_shader) +"\n")
config = config.replace("%gBlendShader%", str(options.g_blend_shader) +"\n")
config = config.replace("%mem_ctrls%", str(options.mem_channels) +"\n")
config = config.replace("%gStartFrame%", str(options.g_start_frame) +"\n")
config = config.replace("%gEndFrame%", str(options.g_end_frame) +"\n")
config = config.replace("%gStartCall%", str(options.g_start_call) +"\n")
config = config.replace("%gEndCall%", str(options.g_end_call) +"\n")
config = config.replace("%gRasterTH%", str(options.g_raster_th) +"\n")
config = config.replace("%gRasterTW%", str(options.g_raster_tw) +"\n")
config = config.replace("%gRasterBH%", str(options.g_raster_bh) +"\n")
config = config.replace("%gRasterBW%", str(options.g_raster_bw) +"\n")
config = config.replace("%gCpStart%", str(options.g_cp_start) +"\n")
config = config.replace("%gCpEnd%", str(options.g_cp_end) +"\n")
config = config.replace("%gCpPeriod%", str(options.g_cp_period) +"\n")
config = config.replace("%gSkipCpFrames%", str(options.g_skip_cp_frames) +"\n")
if usingTemplate:
print "Using template and command line options for gpgpusim.config"
# Modify the GPGPU-Sim configuration template
config = config.replace("%clusters%", str(options.clusters))
config = config.replace("%cores_per_cluster%", str(options.cores_per_cluster))
config = config.replace("%ctas_per_shader%", str(options.ctas_per_shader))
icnt_outfile = os.path.join(m5.options.outdir, 'config_fermi_islip.icnt')
config = config.replace("%icnt_file%", icnt_outfile)
config = config.replace("%warp_size%", str(options.gpu_warp_size))
# GPGPU-Sim config expects freq in MHz
config = config.replace("%freq%", str(toFrequency(options.gpu_core_clock) / 1.0e6))
config = config.replace("%threads_per_sm%", str(options.gpu_threads_per_core))
options.num_sc = options.clusters*options.cores_per_cluster
# Write out the configuration file to the output directory
f = open(m5.options.outdir + '/gpgpusim.config', 'w')
f.write(config)
f.close()
gpgpusimconfig = m5.options.outdir + '/gpgpusim.config'
# Read in and modify the interconnect config template
icnt_template = os.path.join(os.path.dirname(__file__), 'gpu_config/config_fermi_islip.template.icnt')
f = open(icnt_template)
icnt_config = f.read()
f.close()
# The number of nodes in the GPU network is the number of core clusters,
# plus the number of GPU memory partitions, plus one extra (it is not
# clear in GPGPU-Sim what this extra is for). Note: Aiming to remove
# GPGPU-Sim interconnect completely as it only models parameter memory
# handling currently (i.e. tiny fraction of accesses). Only model one
# memory partition currently by default.
num_icnt_nodes = str(options.clusters + 1 + 1)
icnt_config = icnt_config.replace("%num_nodes%", num_icnt_nodes)
# Write out the interconnect config file to the output directory
f = open(icnt_outfile, 'w')
f.write(icnt_config)
f.close()
else:
print "Using gpgpusim.config for clusters, cores_per_cluster, Frequency, warp size"
config = re.sub(re.compile("#.*?\n"), "", config)
start = config.find("-gpgpu_n_clusters ") + len("-gpgpu_n_clusters ")
end = config.find('-', start)
gpgpu_n_clusters = int(config[start:end])
start = config.find("-gpgpu_n_cores_per_cluster ") + len("-gpgpu_n_cores_per_cluster ")
end = config.find('-', start)
gpgpu_n_cores_per_cluster = int(config[start:end])
num_sc = gpgpu_n_clusters * gpgpu_n_cores_per_cluster
options.num_sc = num_sc
start = config.find("-gpgpu_clock_domains ") + len("-gpgpu_clock_domains ")
end = config.find(':', start)
options.gpu_core_clock = config[start:end] + "MHz"
start = config.find('-gpgpu_shader_core_pipeline ') + len('-gpgpu_shader_core_pipeline ')
start = config.find(':', start) + 1
end = config.find('\n', start)
options.gpu_warp_size = int(config[start:end])
icnt_outfile = os.path.join(m5.options.outdir, 'config_network.icnt')
gpgpusimconfig = os.path.join(m5.options.outdir, 'gpgpusim.config')
config = config.replace("%icnt_file%", icnt_outfile)
print icnt_outfile
print gpgpusimconfig
f = open(gpgpusimconfig, 'w')
f.write(config)
f.close()
# Read in and modify the interconnect config template
if(options.icnt_config):
icnt_template = os.path.join(os.path.dirname(__file__), 'gpu_config/'+ options.icnt_config)
else:
icnt_template = os.path.join(os.path.dirname(__file__), 'gpu_config/template_icnt.icnt')
f = open(icnt_template)
icnt_config = f.read()
f.close()
num_icnt_nodes = str(options.clusters + options.mem_channels)
icnt_config = icnt_config.replace("%num_nodes%", num_icnt_nodes)
f = open(icnt_outfile, 'w')
f.write(icnt_config)
f.close()
if options.pwc_size == "0":
# Bypass the shared L1 cache
options.gpu_tlb_bypass_l1 = True
else:
# Do not bypass the page walk cache
options.gpu_tlb_bypass_l1 = False
# DEPRECATED: Get the GPU DRAM clock from the config file to be passed to
# the DRAM component wrapper. This should be removed at a later date!
config = re.sub(re.compile("#.*?\n"), "", config)
start = config.find("-gpgpu_clock_domains ")
end = config.find('\n', start)
clk_domains = config[start:end].split(':')
options.gpu_dram_clock = clk_domains[3] + "MHz"
return gpgpusimconfig
def createGPU(options, gpu_mem_range):
# DEPRECATED: Set a default GPU DRAM clock to be passed to the wrapper.
# This must be eliminated when the wrapper can be removed.
options.gpu_dram_clock = None
gpgpusimOptions = parseGpgpusimConfig(options)
# The GPU's clock domain is a source for all of the components within the
# GPU. By making it a SrcClkDomain, it can be directly referenced to change
# the GPU clock frequency dynamically.
gpu = CudaGPU(warp_size = options.gpu_warp_size,
manage_gpu_memory = options.split,
clk_domain = SrcClockDomain(clock = options.gpu_core_clock,
voltage_domain = VoltageDomain()),
gpu_memory_range = gpu_mem_range,
system_cacheline_size = options.cacheline_size)
gpu.cores_wrapper = GPGPUSimComponentWrapper(clk_domain = gpu.clk_domain)
gpu.icnt_wrapper = GPGPUSimComponentWrapper(clk_domain = DerivedClockDomain(
clk_domain = gpu.clk_domain,
clk_divider = 2))
gpu.l2_wrapper = GPGPUSimComponentWrapper(clk_domain = gpu.clk_domain)
gpu.dram_wrapper = GPGPUSimComponentWrapper(
clk_domain = SrcClockDomain(
clock = options.gpu_dram_clock,
voltage_domain = gpu.clk_domain.voltage_domain))
warps_per_core = options.gpu_threads_per_core / options.gpu_warp_size
gpu.shader_cores = [CudaCore(id = i, warp_contexts = warps_per_core)
for i in xrange(options.num_sc)]
gpu.ce = GPUCopyEngine(driver_delay = 5000000,
buffering = options.ce_buffering)
gpu.zunit = ZUnit()
for sc in gpu.shader_cores:
sc.lsq = ShaderLSQ()
sc.tex_lq = ShaderLSQ()
sc.lsq.data_tlb.entries = options.gpu_tlb_entries
sc.tex_lq.data_tlb.entries = options.gpu_ttlb_entries
sc.lsq.forward_flush = (buildEnv['PROTOCOL'] == 'VI_hammer_fusion' and options.flush_kernel_end)
sc.tex_lq.forward_flush = (buildEnv['PROTOCOL'] == 'VI_hammer_fusion' and options.flush_kernel_end)
sc.lsq.warp_size = options.gpu_warp_size
sc.tex_lq.warp_size = options.gpu_warp_size
sc.lsq.cache_line_size = options.cacheline_size
sc.tex_lq.cache_line_size = options.cacheline_size
#sc.lsq.request_buffer_depth = options.gpu_l1_buf_depth
#sc.tex_lq.request_buffer_depth = options.gpu_tl1_buf_depth
if options.gpu_threads_per_core % options.gpu_warp_size:
fatal("gpu_warp_size must divide gpu_threads_per_core evenly.")
sc.lsq.warp_contexts = warps_per_core
sc.tex_lq.warp_contexts = warps_per_core
if options.gpu_core_config == 'Fermi':
# Fermi latency for zero-load independent memory instructions is
# roughly 19 total cycles with ~4 cycles for tag access
sc.lsq.l1_tag_cycles = 4
sc.lsq.latency = 14
elif options.gpu_core_config == 'Maxwell':
# Maxwell latency for zero-load independent memory instructions is
# 8-10 cycles quicker than Fermi, and tag access appears shorter
sc.lsq.l1_tag_cycles = 1
sc.lsq.latency = 6
elif options.gpu_core_config == 'Tegra':
#for now copy fermi configs
#FIXME
sc.lsq.l1_tag_cycles = 1
sc.lsq.latency = 6
# This is a stop-gap solution until we implement a better way to register device memory
if options.access_host_pagetable:
gpu.access_host_pagetable = True
for sc in gpu.shader_cores:
sc.itb.access_host_pagetable = True
sc.ttb.access_host_pagetable = True
sc.lsq.data_tlb.access_host_pagetable = True
sc.tex_lq.data_tlb.access_host_pagetable = True
gpu.ce.device_dtb.access_host_pagetable = True
gpu.ce.host_dtb.access_host_pagetable = True
gpu.zunit.ztb.access_host_pagetable = True
gpu.config_path = gpgpusimOptions
gpu.dump_kernel_stats = options.kernel_stats
gpu.dump_gpgpusim_stats = options.gpgpusim_stats
return gpu
def connectGPUPorts(system, gpu, ruby, options):
# for now only VI_fusion has tex and z caches added
mp = 1
if(buildEnv['PROTOCOL'].lower().count("vi") and (not options.split)):
mp = 2
idx = options.num_cpus+len(gpu.shader_cores)*mp+2
print "connecting zunit to ", idx
gpu.zunit.z_port = ruby._cpu_ports[idx].slave
else:
#if not VI assert the g_depth_shader option is used
assert(options.g_depth_shader==1), "No z-cache, g_depth_shader has to be enabled"
for i,sc in enumerate(gpu.shader_cores):
sc.inst_port = ruby._cpu_ports[options.num_cpus+i*mp].slave
sc.tex_port = ruby._cpu_ports[options.num_cpus+i*mp].slave
for j in xrange(options.gpu_warp_size):
sc.lsq_port[j] = sc.lsq.lane_port[j]
sc.tex_lq_port[j] = sc.tex_lq.lane_port[j]
sc.lsq.cache_port = ruby._cpu_ports[options.num_cpus+i*mp].slave
sc.tex_lq.cache_port = ruby._cpu_ports[options.num_cpus+(i*mp)+mp-1].slave
sc.lsq_ctrl_port = sc.lsq.control_port
sc.tex_ctrl_port = sc.tex_lq.control_port
# The total number of sequencers is equal to the number of CPU cores * 2, plus
# the number of GPU cores plus any pagewalk caches, copy engine and z
# caches. Currently, for unified address space architectures, there is one
# pagewalk cache, one copy engine cache and one z-cache (3 total), and the pagewalk cache
# is indexed first, then the CE then the Z.
#For split address space architectures, there are 2 copy
# engine caches, and the host-side cache is indexed before the device-side.
try:
datapathsCount = len(system.datapaths)
except:
datapathsCount = 0
assert(len(ruby._cpu_ports) == options.num_cpus + options.num_sc*mp + mp +1 + datapathsCount)
# Initialize the MMU, connecting it to either the pagewalk cache port for
# unified address space, or the copy engine's host-side sequencer port for
# split address space architectures.
gpu.shader_mmu.setUpPagewalkers(options.gpu_l1_pagewalkers,
ruby._cpu_ports[options.num_cpus+options.num_sc*mp].slave,
options.gpu_tlb_bypass_l1)
if options.split:
# NOTE: In split address space architectures, the MMU only provides the
# copy engine host-side TLB access to a page walker. This should
# probably be changed so that the copy engine doesn't manage
# translations, but only the data handling
# If inappropriately used, crash to inform MMU config problems to user:
if options.access_host_pagetable:
fatal('Cannot access host pagetable from the GPU or the copy ' \
'engine\'s GPU-side port\n in split address space. Use ' \
'only one of --split or --access-host-pagetable')
# Tie copy engine ports to appropriate sequencers
gpu.ce.host_port = \
ruby._cpu_ports[options.num_cpus+options.num_sc*mp].slave
gpu.ce.device_port = \
ruby._cpu_ports[options.num_cpus+options.num_sc*mp+1].slave
gpu.ce.device_dtb.access_host_pagetable = False
else:
# With a unified address space, tie both copy engine ports to the same
# copy engine controller. NOTE: The copy engine is often unused in the
# unified address space
gpu.ce.host_port = \
ruby._cpu_ports[options.num_cpus+options.num_sc*mp+1].slave
gpu.ce.device_port = \
ruby._cpu_ports[options.num_cpus+options.num_sc*mp+1].slave
| 1.382813 | 1 |
tests/roots/test_roots.py | emaballarin/phytorch | 1 | 12769868 | from functools import partial
from math import inf, nan
from typing import Union
import hypothesis.extra.numpy as npst
import torch
from hypothesis import given, strategies as st
from hypothesis.strategies import SearchStrategy
from pytest import fixture, mark
from torch import allclose, as_tensor, isclose, tensor
from phytorch.roots import companion_matrix, roots, sroots, vieta
from tests.common.dtypes import make_dtype_tests, with_default_double
def compare_sets(a, b, **kwargs):
return isclose(a.unsqueeze(-1), as_tensor(b, dtype=a.dtype).unsqueeze(-2), **kwargs).any(-1).all(-1)
def coeffs_strategy(
n: Union[int, SearchStrategy[int]] = st.integers(min_value=2, max_value=4),
dtype=complex,
elements=st.complex_numbers(min_magnitude=1e-6, max_magnitude=1e6, allow_nan=False, allow_infinity=False)):
if isinstance(n, int):
return coeffs_strategy(st.integers(min_value=2, max_value=n))
return n.flatmap(lambda n: npst.mutually_broadcastable_shapes(num_shapes=n, max_dims=3, max_side=16).flatmap(
lambda shapes: st.tuples(*(npst.arrays(dtype, shape, elements=elements).map(lambda arr: tensor(arr)) for shape in shapes.input_shapes))
))
def test_companion_matrix():
assert (companion_matrix(tensor(-1), tensor(-2), tensor(-3)) == tensor([
[1, 2, 3],
[1, 0, 0],
[0, 1, 0]
])).all()
@given(coeffs_strategy())
def test_companion_matrix_batched(coeffs):
assert companion_matrix(*coeffs).shape == torch.broadcast_shapes(*(c.shape for c in coeffs)) + 2*(len(coeffs),)
@mark.xfail(reason='flaky', strict=False)
@with_default_double
@given(coeffs_strategy())
def test_vieta(coeffs):
for c, _c in zip(coeffs, vieta(roots(*coeffs))[1:]):
assert allclose(_c, c, rtol=1e-3, atol=1e-3)
@with_default_double
@given(coeffs_strategy())
def test_analytic_vs_numeric(coeffs):
assert compare_sets(
sroots(*coeffs, dim=-1),
sroots(*coeffs, dim=-1, force_numeric=True),
rtol=1e-3, atol=1e-3
).all()
class RootsTest:
@mark.parametrize('coeffs, vals', (
((0, 0), (0, 0)),
((1, 0), (-1, 0)),
((0, 1), (-1j, 1j)),
((0, 0, 0), (0, 0, 0)),
((1, 0, 0), (-1, 0, 0)),
((0, 1, 0), (0, 1j, -1j)),
((0, 0, 1), (-1, (-1)**(1 / 3), -(-1)**(2 / 3))),
((1, 1, 0), (0, (-1)**(2 / 3), -(-1)**(1 / 3))),
((0, 0, 0, 0), (0, 0, 0, 0)),
((1, 0, 0, 0), (0, -1, 0, -1)),
((0, 1, 0, 0), (-1j, 0, 1j, 0)),
((0, 0, 1, 0), (-1, 0, (-1)**(1 / 3), -(-1)**(2 / 3))),
((0, 0, 0, 1), (-(-1)**(1 / 4), (-1)**(3 / 4), (-1)**(1 / 4), -(-1)**(3 / 4))),
((1, 1, 0, 0), (-(-1)**(1 / 3), (-1)**(2 / 3), 0, 0)),
((0, 1, 0, 1), (-(-1)**(1 / 3), (-1)**(2 / 3), -(-1)**(2 / 3), (-1)**(1 / 3))),
((1, 1, 1, 0), (-1, 0, 1j, -1j))
))
def test_special(self, coeffs, vals):
assert compare_sets(sroots(*coeffs), vals).all()
@staticmethod
def test_finite():
# Any NaN or infinite coefficient should return NaN
for n in (2, 3, 4):
assert sroots(*(n-1)*(1,)+(nan,)).isnan().all()
assert sroots(*(n-1)*(1,)+(inf,)).isnan().all()
class ForceNumericRootsTest(RootsTest):
@staticmethod
@fixture(autouse=True, scope='class')
def _set_force_numeric():
# see e.g. https://github.com/pytest-dev/pytest/issues/363
# for why this workaround is needed
from _pytest.monkeypatch import MonkeyPatch
mpatch = MonkeyPatch()
mpatch.setitem(globals(), 'roots', partial(roots, force_numeric=True))
yield
mpatch.undo()
# @mark.xfail(reason='NaN in eig (https://github.com/pytorch/pytorch/issues/61251)', strict=True)
@mark.skip(reason='segfaults, so cannot recover...')
def test_finite(self): ...
globals().update(make_dtype_tests((RootsTest,), 'Roots'))
globals().update(make_dtype_tests((ForceNumericRootsTest,), 'ForceNumericRoots'))
| 1.914063 | 2 |
tests/test_idempotent.py | rberenguel/motllo | 17 | 12769869 | <filename>tests/test_idempotent.py
from motllo.ops import Folder, File, tree
from motllo.markdown import build_tree, build_markdown
from motllo.build import _process_markdown
import random
from string import ascii_uppercase
import pytest
RANGE = 5
@pytest.mark.parametrize("execution_number", range(RANGE))
@pytest.mark.parametrize("depth", range(RANGE))
@pytest.mark.parametrize("width", range(RANGE))
def test_randomised_idempotent(execution_number, depth, width):
sl = [l for l in ascii_uppercase * 2 * RANGE * RANGE * RANGE * RANGE]
random.shuffle(sl)
def random_tree(depth, width):
"""More complex random tree. We need to make sure there are no duplicate files
or folders, and that they have content"""
def purge_repeated_files(results):
def rename_if_needed(fil, nrf):
if fil.name in nrf:
doubled = fil.name + fil.name
fil._rename(doubled)
rename_if_needed(fil, nrf)
non_repeated_filenames = []
cleaned_results = []
for fil in results:
rename_if_needed(fil, non_repeated_filenames)
non_repeated_filenames += [fil.name]
cleaned_results += [fil]
return cleaned_results
if depth == 0:
results = [
File(sl.pop()).set_contents("contents=" + sl.pop())
for _ in range(width)
]
return purge_repeated_files(results)
folder_indexes = random.sample(range(width), random.randint(0, width - 1) + 1)
results = [
File(sl.pop()).set_contents("contents=" + sl.pop()) for _ in range(width)
]
cleaned_results = purge_repeated_files(results)
for index in folder_indexes:
cleaned_results[index] = Folder(
sl.pop(), contents=random_tree(depth - 1, width)
)
cleaned_folders = purge_repeated_files(cleaned_results)
return cleaned_folders
randomised = Folder("", contents=random_tree(1 + depth, 1 + width))
all_lines = [line for line in tree(randomised)]
markdown = "\n".join(
build_markdown(
build_tree(randomised, ignore_globs=None, include_globs=None), max_length=15
)
)
structure = _process_markdown(markdown, replacements=None)
new_markdown = "\n".join(
build_markdown(
build_tree(structure, ignore_globs=None, include_globs=None), max_length=15
)
)
assert structure == randomised
| 2.3125 | 2 |
forms.py | ljj6218/custom_process | 1 | 12769870 | from flask_wtf import FlaskForm
from wtforms import StringField, PasswordField
from wtforms.validators import DataRequired, EqualTo
class RegisterForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
password_2 = PasswordField('<PASSWORD>', validators=[DataRequired()])
class LoginForm(FlaskForm):
"""登录表单类"""
username = StringField('用户名', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()]) | 3.015625 | 3 |
outsorcing.py | Souta-m/sentence-transformers | 0 | 12769871 | <reponame>Souta-m/sentence-transformers<gh_stars>0
from sentence_transformers import SentenceTransformer,util
#import MeCab
#from sklearn.metrics.pairwise import cosine_similarity
import csv
import pandas as pd
df = pd.read_csv('data/outsourcing.csv',encoding="shift_jis")
model1 = SentenceTransformer('paraphrase-multilingual-mpnet-base-v2')
model2 = SentenceTransformer('paraphrase-MiniLM-L6-v2')
#wakati = MeCab.Tagger("-Owakati -d /usr/lib/x86_64-linux-gnu/mecab/dic/mecab-ipadic-neologd")
#Our sentences we like to encode
with open('data/output/ouc.csv', 'w', newline='') as f:
writer = csv.writer(f)
writer.writerow(['Japanese','English','label','J-Escore','E-Escore'])
for src,trg,cor,l in zip(df['Japanese'],df['English'],df['Correct'],df['label']):
#src= wakati.parse(src)
sentences1=[src]+[trg]
sentences2=[cor]+[trg]
#Sentences are encoded by calling model.encode()
embeddings1 = model1.encode(sentences1)
embeddings2 = model2.encode(sentences2)
#Print the embeddings
print(src,':',trg)
sim1 = util.pytorch_cos_sim(embeddings1[0],embeddings1[1])
sim2 = util.pytorch_cos_sim(embeddings2[0],embeddings2[1])
print(sim1.item())
print(sim2.item())
writer.writerow([src,trg,l,sim1.item(),sim2.item()])
| 2.390625 | 2 |
chip_eight/__init__.py | apalmer/computer-emulation | 0 | 12769872 | from .screen import Screen
from .keyboard import Keyboard
from .memory import Memory
from .cpu import Cpu
from .computer import Computer
from .runner import boot, load, run
| 1.171875 | 1 |
archived_NOT_working/WORKING.py | bendevlin18/sholl-analysis-python | 0 | 12769873 | <gh_stars>0
import os
import numpy as np
import pandas as pd
import cv2
import matplotlib.pyplot as plt
from skimage.morphology import skeletonize
from skimage import morphology
from shapely.geometry import Polygon
import matplotlib.pyplot as plt
from skimage import draw
import matplotlib as mpl
from matplotlib.colors import colorConverter
direc = 'C:\\Users\\Ben\\Dropbox\\bilbo_lab_spr2020\\il34_project\\sample_data_3dmorph\\ind_mgla'
list_of_files = os.listdir(direc)
files = []
for name in list_of_files:
if '.tif' in name:
files = np.append(files, name)
print(files)
os.mkdir(direc + '\\sholl_output')
rad_1 = 20
rad_2 = 30
rad_3 = 40
rad_4 = 50
rad_5 = 60
rad_6 = 70
rad_7 = 80
rad_8 = 90
rad_9 = 100
rad_10 = 110
rad_11 = 120
rad_12 = 130
rad_13 = 140
rad_14 = 150
rad_15 = 160
rad_16 = 170
rad_17 = 180
rad_18 = 190
rad_19 = 200
rad_20 = 210
rads = [rad_1, rad_2, rad_3, rad_4, rad_5, rad_6, rad_7, rad_8, rad_9, rad_10, rad_11, rad_12, rad_13, rad_14, rad_15, rad_16, rad_17, rad_18, rad_19, rad_20]
df = pd.DataFrame(index = rads)
## cleaning function for images that already have the brightness bumppped (usually hit the auto button 3 times or so for 8bit images)
for file in files:
img = cv2.imread(direc +'\\' + file)
## https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_photo/py_non_local_means/py_non_local_means.html
dst = cv2.fastNlMeansDenoisingColored(img,None,10,10,7,21)
## detect where most of the background is coming from so you can get that out
num = np.histogram(dst.flatten(), bins = 50)
## gets the value where most background is, so that we can subtract it away
bg_val = num[1][np.argmax(num[0]) + 6]
new = np.where(dst < bg_val, 0, dst)
skeleton = skeletonize(new)
processed = morphology.remove_small_objects(skeleton.astype(bool), min_size=50, connectivity=25).astype(int)
processed = np.where(processed > 0, 255, 0)
new_arr = np.zeros(shape = (500, 500))
for i in range(len(processed)):
for j in range(len(processed)):
new_arr[i][j] = processed[i][j][1]
# fig, (ax1, ax2, ax3) = plt.subplots(1, 3, figsize = (15, 18))
# ax1.imshow(new)
# ax1.set_title('Smoothed Image')
# ax2.imshow(skeleton)
# ax2.set_title('Skeleton Image')
# ax3.imshow(new_arr)
# ax3.set_title('Cleaned Final Skeleton ' + file)
# plt.show()
plt.imshow(new)
center = plt.ginput(1)
plt.close()
circles = ['1'] * len(rads)
rr = []
cc = []
w = -1
for rad in rads:
w += 1
arr = np.zeros((500, 500))
rr, cc = draw.circle_perimeter(int(center[0][1]), int(center[0][0]), radius=rad, shape=arr.shape)
arr[rr, cc] = 255
circles[w] = arr
intersects = []
def calc_intersection(arr):
intersects = []
for i in range(len(arr)):
for j in range(len(new_arr)):
if arr[i][j] == 255.0:
if arr[i][j] == new_arr[i][j]:
intersects = np.append(intersects, [i, j])
elif arr[i][j] != new_arr[i][j]:
pass
elif arr[i][j] != 255.0:
pass
print(len(intersects) / 2)
return(len(intersects) / 2, intersects)
z = ['l'] * len(rads)
intersections = []
it = -1
for arr in circles:
it += 1
z[it] = calc_intersection(arr)[0]
intersections = np.append(intersections, calc_intersection(arr)[1])
x, y = intersections.reshape( int(len(intersections)/2) , 2).T
color1 = colorConverter.to_rgba('white')
color2 = colorConverter.to_rgba('black')
cmap2 = mpl.colors.LinearSegmentedColormap.from_list('my_cmap2',[color1,color2],256)
cmap2._init() # create the _lut array, with rgba values
alphas = np.linspace(0, 0.8, cmap2.N+3)
cmap2._lut[:,-1] = alphas
fig, (ax2, ax1) = plt.subplots(1, 2, figsize = (15, 30))
table_vals = list(zip(rads, z))
col_labels = ['dist_from_soma', '# intersections']
row_labels = rads
ax2.table(cellText=table_vals, colWidths = [.2]*3, colLabels=col_labels, cellLoc = 'center', loc = 16, fontsize = 11).scale(1, 4)
ax1.imshow(new_arr, origin = 'lower')
ax1.imshow(circles[0], interpolation='nearest', cmap=cmap2, origin='lower')
ax1.imshow(circles[1], interpolation='nearest', cmap=cmap2, origin='lower')
ax1.imshow(circles[2], interpolation='nearest', cmap=cmap2, origin='lower')
ax1.imshow(circles[3], interpolation='nearest', cmap=cmap2, origin='lower')
ax1.imshow(circles[4], interpolation='nearest', cmap=cmap2, origin='lower')
ax1.imshow(circles[5], interpolation='nearest', cmap=cmap2, origin='lower')
ax1.imshow(circles[6], interpolation='nearest', cmap=cmap2, origin='lower')
ax1.imshow(circles[7], interpolation='nearest', cmap=cmap2, origin='lower')
ax1.imshow(circles[8], interpolation='nearest', cmap=cmap2, origin='lower')
ax1.imshow(circles[9], interpolation='nearest', cmap=cmap2, origin='lower')
ax1.scatter(center[0][0], center[0][1])
ax1.scatter(y, x)
ax1.set_title('Filename: ' + file)
ax2.imshow(new, origin = 'lower')
plt.savefig(direc + '\\sholl_output\\' + 'output_image_' + file + '.png')
plt.show()
df[file] = z
df.T.to_csv(direc + '\\sholl_output\\' + 'output.csv') | 2.109375 | 2 |
common_python/tests/util/test_item_aggregator.py | ScienceStacks/common_python | 1 | 12769874 | import pandas as pd
import unittest
import common_python.constants as cn
from common_python.util.item_aggregator import ItemAggregator
IGNORE_TEST = False
SIZE = 10
MULT = 5
ITEMS = [(n, MULT*n) for n in range(SIZE)]
class TestItemAggregator(unittest.TestCase):
def setUp(self):
self.aggregator = ItemAggregator(lambda v: v[0])
def testConstructor(self):
if IGNORE_TEST:
return
self.assertIsNone(self.aggregator._df)
def testAppend(self):
if IGNORE_TEST:
return
self.aggregator.append(ITEMS)
self.assertEqual(len(self.aggregator.sers[0]), SIZE)
self.aggregator.append(ITEMS)
self.assertEqual(len(self.aggregator.sers), 2)
def testDf(self):
if IGNORE_TEST:
return
aggregator1 = ItemAggregator(lambda v: v[1])
for agg in [self.aggregator, aggregator1]:
agg.append(ITEMS)
agg.append(ITEMS)
df = MULT*self.aggregator.df
self.assertTrue(aggregator1.df[cn.MEAN].equals(df[cn.MEAN]))
if __name__ == '__main__':
unittest.main()
| 2.796875 | 3 |
tests/test_deployment.py | dh7892/yamgraph | 0 | 12769875 | """
Unit tests for Deployment class
"""
from unittest.mock import MagicMock, Mock
import pytest
from colour import Colour
from deployment import Deployment
BLACK = Colour("black")
@pytest.fixture(name="simple_deployment")
def simple_deployment_fixture(simple_box):
"""
This fixture will return a simple Deployment
with width 10, height 20, x_pos 5, y_pos 15, colour black
and name "simple"
"""
deployment = Deployment("simple", simple_box)
return deployment
@pytest.mark.parametrize("name", ["Steve", "Bob"])
def test_create_deployment(name, simple_box):
"""
Test that we can create a deployment with the right name
"""
deployment = Deployment(name, simple_box)
assert deployment.name == name
def test_draw(simple_deployment):
"""
Test that a Deployment basically, we just want the deployment to
call "draw" on its box
"""
driver = Mock()
simple_deployment.box.draw = MagicMock()
simple_deployment.draw(driver)
simple_deployment.box.draw.assert_called_once()
| 3.34375 | 3 |
pyprometheus/values.py | Lispython/pyprometheus | 13 | 12769876 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
pyprometheus.values
~~~~~~~~~~~~~~~~~~~
Prometheus instrumentation library for Python applications
:copyright: (c) 2017 by <NAME>.
:license: , see LICENSE for more details.
:github: http://github.com/Lispython/pyprometheus
"""
import time
from pyprometheus.utils import escape_str
from pyprometheus.const import TYPES
from pyprometheus.managers import TimerManager, InprogressTrackerManager, GaugeTimerManager
class MetricValue(object):
"""Base metric collector"""
TYPE = TYPES.BASE
POSTFIX = ""
def __init__(self, metric, label_values={}, value=None):
self._metric = metric
self.validate_labels(metric.label_names, label_values)
self._labels, self._label_values = self.prepare_labels(label_values)
self._value = value
@staticmethod
def prepare_labels(label_values):
if isinstance(label_values, (list, tuple)):
labels = tuple(sorted(label_values, key=lambda x: x[0]))
elif isinstance(label_values, dict):
labels = tuple(sorted(label_values.items(), key=lambda x: x[0]))
return labels, dict(label_values)
@property
def metric(self):
return self._metric
def set_value(self, value):
self._value = value
def __repr__(self):
return u"<{0}[{1}]: {2} -> {3}>".format(
self.__class__.__name__, self._metric.name,
str(self._labels).replace("'", "\""), self.__repr_value__())
def validate_labels(self, label_names, labels):
if len(labels) != len(label_names):
raise RuntimeError(u"Invalid label values size: {0} != {1}".format(
len(label_names), len(labels)))
def __repr_value__(self):
return self.get()
# def __str__(self):
# return u"{0}{1}".format(self.__class__.__name__, self._labels)
@property
def key(self):
return (self.TYPE, self._metric.name, self.POSTFIX, self._labels)
def inc(self, amount=1):
return self._metric._storage.inc_value(self.key, amount)
def get(self):
# Do not lookup storage if value 0
if self._value is not None:
return self._value
return self._metric._storage.get_value(self.key)
@property
def value(self):
return self.get()
@property
def export_str(self):
return "{name}{postfix}{{{labels}}} {value} {timestamp}".format(
name=escape_str(self._metric.name), postfix=self.POSTFIX,
labels=self.export_labels, timestamp=int(time.time() * 1000), value=float(self.value))
@property
def export_labels(self):
return ", ".join(["{0}=\"{1}\"".format(self.format_export_label(name), self.format_export_value(value))
for name, value in self._labels])
def format_export_label(self, label):
if label == "bucket":
return "le"
return escape_str(label)
def format_export_value(self, value):
if value == float("inf"):
return "+Inf"
elif value == float("-inf"):
return "-Inf"
# elif math.isnan(value):
# return "NaN"
return escape_str(str(value))
class GaugeValue(MetricValue):
TYPE = TYPES.GAUGE
def dec(self, amount=1):
self.inc(-amount)
def set(self, value):
self._metric._storage.write_value(self.key, value)
return value
@property
def value(self):
return self.get()
def track_in_progress(self):
return InprogressTrackerManager(self)
def set_to_current_time(self):
return self.set(time.time())
def time(self):
return GaugeTimerManager(self)
class CounterValue(MetricValue):
TYPE = TYPES.COUNTER
@property
def value(self):
return self.get()
class SummarySumValue(CounterValue):
TYPE = TYPES.SUMMARY_SUM
POSTFIX = "_sum"
class SummaryCountValue(CounterValue):
TYPE = TYPES.SUMMARY_COUNTER
POSTFIX = "_count"
class SummaryQuantilyValue(GaugeValue):
TYPE = TYPES.SUMMARY_QUANTILE
POSTFIX = "_quantile"
def __init__(self, metric, label_values={}, quantile=0, value=None):
label_values = dict(label_values).copy()
label_values["quantile"] = quantile
self._quantile = quantile
super(SummaryQuantilyValue, self).__init__(metric, label_values, value)
def validate_labels(self, label_names, labels):
if len(labels) != len(label_names) + 1:
raise RuntimeError(u"Invalid label values size: {0} != {1}".format(
len(label_names), len(labels) + 1))
def __repr_value__(self):
return u"{0} -> {1}".format(self._quantile, self._value)
@property
def key(self):
return (self.TYPE, self._metric.name, self.POSTFIX, self._labels)
# return (self.TYPE, self._metric.name, self._metric.name, self._labels)
class SummaryValue(MetricValue):
u"""
summary with a base metric name of <basename> exposes multiple time series during a scrape:
streaming φ-quantiles (0 ≤ φ ≤ 1) of observed events, exposed as <basename>{quantile="<φ>"}
the total sum of all observed values, exposed as <basename>_sum
the count of events that have been observed, exposed as <basename>_count
"""
TYPE = TYPES.SUMMARY
SUBTYPES = {
"_sum": SummarySumValue,
"_count": SummaryCountValue,
"_quantile": SummaryQuantilyValue
}
def __init__(self, metric, label_values={}, value={}):
super(SummaryValue, self).__init__(metric, label_values=label_values)
self._sum = value.pop("sum", None) or SummarySumValue(self._metric, label_values=self._label_values)
self._count = value.pop("count", None) or SummaryCountValue(self._metric, label_values=self._label_values)
if isinstance(self._metric.quantiles, (list, tuple)):
self._quantiles = value.pop("quantiles", []) or [SummaryQuantilyValue(self._metric, label_values=self._label_values, quantile=quantile)
for quantile in self._metric.quantiles]
else:
self._quantiles = []
def __repr_value__(self):
return u"sum={sum} / count={count} = {value} [{quantiles}]".format(
**{
"sum": self._sum.value,
"count": self._count.value,
"value": (self._sum.value / self._count.value) if self._count.value != 0 else "-",
"quantiles": ", ".join([x.__repr_value__() for x in self._quantiles]) if self._quantiles else "empty"
}
)
def observe(self, amount):
self._sum.inc(amount)
self._count.inc()
# TODO: calculate quantiles
# for quantile, value in self._quantiles:
# pass
@property
def value(self):
return {
"sum": self._sum,
"count": self._count,
"quantiles": self._quantiles}
@property
def export_str(self):
return "\n".join([self._sum.export_str, self._count.export_str] + [quantile.export_str for quantile in self._quantiles])
def time(self):
return TimerManager(self)
class HistogramCountValue(SummaryCountValue):
TYPE = TYPES.HISTOGRAM_COUNTER
POSTFIX = "_count"
class HistogramSumValue(SummarySumValue):
TYPE = TYPES.HISTOGRAM_SUM
POSTFIX = "_sum"
class HistogramBucketValue(SummaryCountValue):
"""
""" """
<basename>_bucket{le="<upper inclusive bound>"}
"""
POSTFIX = "_bucket"
TYPE = TYPES.HISTOGRAM_BUCKET
def __init__(self, metric, label_values={}, bucket=None, value=None):
label_values = dict(label_values).copy()
label_values["bucket"] = bucket
self._bucket_threshold = bucket
super(HistogramBucketValue, self).__init__(metric, label_values, value)
def __repr_value__(self):
return u"{0} -> {1}".format(self._bucket_threshold, self._value)
@property
def bucket_threshold(self):
return self._bucket_threshold
def validate_labels(self, label_names, labels):
if len(labels) != len(label_names) + 1:
raise RuntimeError(u"Invalid label values size: {0} != {1}".format(
len(label_names), len(labels) + 1))
class HistogramValue(MetricValue):
TYPE = TYPES.HISTOGRAM
SUBTYPES = {
"_sum": HistogramSumValue,
"_count": HistogramCountValue,
"_bucket": HistogramBucketValue
}
def __init__(self, metric, label_values={}, value={}):
self._buckets = []
super(HistogramValue, self).__init__(metric, label_values=label_values)
self._sum = value.pop("sum", None) or HistogramSumValue(self._metric, label_values=self._label_values)
self._count = value.pop("count", None) or HistogramCountValue(self._metric, label_values=self._label_values)
self._buckets = (value.pop("buckets", []) or [HistogramBucketValue(self._metric, label_values=self._label_values, bucket=bucket)
for bucket in sorted(self._metric.buckets)])
def __repr_value__(self):
return u"sum={sum} / count={count} = {value} [{buckets}]".format(
**{
"sum": self._sum.__repr_value__(),
"count": self._count.__repr_value__(),
"value": (self._sum.value / self._count.value) if self._count.value != 0 else "-",
# "buckets": ""
"buckets": ", ".join([x.__repr_value__() for x in self._buckets]) if self._buckets else "empty"
}
)
def observe(self, amount):
self._sum.inc(amount)
self._count.inc()
for bucket in self._buckets:
bucket.inc(int(amount < bucket.bucket_threshold))
@property
def value(self):
return {
"sum": self._sum,
"count": self._count,
"buckets": self._buckets
}
@property
def export_str(self):
return "\n".join([self._sum.export_str, self._count.export_str] + [bucket.export_str for bucket in self._buckets])
def time(self):
return TimerManager(self)
| 2.15625 | 2 |
demo_word_list_topic_model.py | shettyprithvi/scattertext | 1,823 | 12769877 | import scattertext as st
from scattertext import RankDifference
convention_df = st.SampleCorpora.ConventionData2012.get_data()
convention_df['parse'] = convention_df['text'].apply(st.whitespace_nlp_with_sentences)
unigram_corpus = (st.CorpusFromParsedDocuments(convention_df,
category_col='party',
parsed_col='parse')
.build().get_stoplisted_unigram_corpus())
topic_model = (st.SentencesForTopicModeling(unigram_corpus)
.get_topics_from_terms(['obama', 'romney', 'democrats', 'republicans',
'health', 'military', 'taxes', 'education',
'olympics', 'auto', 'iraq', 'iran', 'israel'],
scorer=RankDifference(), num_terms_per_topic=20))
topic_feature_builder = st.FeatsFromTopicModel(topic_model)
topic_corpus = st.CorpusFromParsedDocuments(
convention_df,
category_col='party',
parsed_col='parse',
feats_from_spacy_doc=topic_feature_builder
).build()
html = st.produce_scattertext_explorer(
topic_corpus,
category='democrat',
category_name='Democratic',
not_category_name='Republican',
width_in_pixels=1000,
metadata=convention_df['speaker'],
use_non_text_features=True,
use_full_doc=True,
pmi_threshold_coefficient=0,
topic_model_term_lists=topic_feature_builder.get_top_model_term_lists()
)
open('./demo_word_list_topic_model.html', 'wb').write(html.encode('utf-8'))
print('Open ./demo_word_list_topic_model.html in Chrome or Firefox.')
| 2.578125 | 3 |
cinema_environment/server_monolith/server_app/migrations/0007_ticket_code.py | AndrewMalitchuk/cinema-server-monolith | 0 | 12769878 | <reponame>AndrewMalitchuk/cinema-server-monolith
# Generated by Django 3.0.2 on 2020-01-11 18:53
from django.db import migrations, models
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('server_app', '0006_remove_ticket_code'),
]
operations = [
migrations.AddField(
model_name='ticket',
name='code',
field=models.CharField(default=django.utils.timezone.now, max_length=1024, verbose_name='Код'),
preserve_default=False,
),
]
| 1.65625 | 2 |
src/Config.py | marcoputon/python-api | 0 | 12769879 | import os
import json
'''
Arquivo de configurações.
Os atributos de conexão são criados dinamicamente. Estão definidos apenas
para não dar erro na IDE, mas não é necessário.
Todo - Criptografar senhas.
'''
class Config:
def __init__(self):
# Conection
self.ip = None
self.port = None
self.user = None
self.password = <PASSWORD>
self.database = None
# Config paths
self.CONNECTION_NAME = "/connection.config"
self.HOME_DIR = self.get_home_dir()
self.CONNECTION_CONFIG_DIR = self.HOME_DIR + self.CONNECTION_NAME
# Load
self.load_config()
'''
Carrega o arquivo de configurações de conexão e cria os atributos da
classe dinamicamente.
Se o arquivo não existir, o mesmo será criado com valores padrão e
a aplicação será encerrada.
'''
def load_config (self):
try:
with open(self.CONNECTION_CONFIG_DIR) as file:
data = file.read().replace('\n', '')
except FileNotFoundError:
default_config = (
'{\n' +
'\t"ip": "localhost", \n' +
'\t"port": "5432", \n' +
'\t"user": "postgres", \n' +
'\t"password": "<PASSWORD>", \n' +
'\t"database": "teste" \n' +
'}'
)
file = open(self.CONNECTION_CONFIG_DIR, "w+")
file.write(default_config)
file.close()
print("ERROR: \n\t\tArquivo " + self.CONNECTION_CONFIG_DIR + " não encontrado. Arquivo padrão criado.")
exit(0)
self.__dict__ = json.loads(data)
'''
Retorna o diretório de configurações da aplicação.
Precisa da variável de ambiente API_HOME.
'''
def get_home_dir (self):
path = os.environ.get('API_HOME')
if path == None:
print("ERROR: \n\t\tA variável de ambiente 'API_HOME' não existe.")
exit(0)
return path
'''
Para questões de debug.
Não usar em release.
'''
def __str__(self):
s = ""
for attr, value in self.__dict__.items():
s += attr + ": " + value + "\n"
return s | 3.265625 | 3 |
board.py | Hsu1685/PythonBattleshipGame | 21 | 12769880 | from box import Box
class Board(Box):
def __init__(self):
"""Initialize the board empty."""
self.board = []
self.hitted_not_sunk = [] #Only for CPU
for row in range(10):
line = []
for column in range(10):
element = Box('.', (row, column))
line.append(element)
self.board.append(line)
def print_board(self, person):
"""Print out board."""
print(" A B C D E F G H I J")
index = 1
for line in self.board:
if index != 10:
end = ' '
else:
end = ' '
print(index, end = end)
index += 1
for element in line:
if (element.symbol != '.' and element.is_visible == True) or person == 'own':
print(element.symbol, end = " ")
else:
print(end = ". ")
print('')
print('')
def find_coordinate(self, coordinate):
"""Find the box object corresponding with that coordinate."""
for line in self.board:
for element in line:
if element.place == coordinate:
return element
| 3.546875 | 4 |
sources/VailDEC.py | Vail-Zero/VailDEC3-sources-Linux | 0 | 12769881 |
# Copyright (c) 2020 Vail-Zero. All Rights Resarved.
# 必要なライブラリーインポート
from tkinter import messagebox
from pack import decker
import tkinter
import threading
import os
import sys
from pack import passgen
# from pack import config
# from pack import GUIl
from tkinter import *
from tkinter import ttk
from tkinter import messagebox
import webbrowser
# from pack import regkey
global txt
from pack import PassBox
# 画像の後ろの背景色設定
backclr=""
args=sys.argv
if len(args)==2:
pb = PassBox.PswdBox()
pass1=str(pb.pswd)
if os.path.isfile(args[1])==True:
r, e = os.path.splitext(args[1])
file=args[1]
if e==".dec":
ns=0
n=decker.openzip(file,ns,pass1)
else:
n=decker.comzip(file,pass1)
if e==".dec":
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
if n==0:
messagebox.showerror('エラー', '指定されたファイルまたはディレクトリが見つかりません。')
if n==1:
messagebox.showinfo('確認', '復号化が終了しました!')
if n==-2:
messagebox.showerror('エラー', 'パスワードが間違っています')
if n==-3:
messagebox.showerror('エラー', 'ファイルはアクセスが制限されています')
root.destroy()
root.mainloop()
else:
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
if n==0:
messagebox.showerror('エラー', '指定されたファイルまたはディレクトリが見つかりません。')
if n==1:
messagebox.showinfo('確認', '暗号化が終了しました!')
if n==-3:
messagebox.showerror('エラー', 'ファイルはアクセスが制限されています')
if n==-2:
messagebox.showerror('エラー', '対応していないファイルの可能性があります')
root.destroy()
root.mainloop()
else:
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
messagebox.showerror('エラー', '指定されたファイルまたはディレクトリが見つかりません。')
root.destroy()
root.mainloop()
var = {'Theme': "None", 'online':True,'cash':"None"}
# リソース読み込み関数
def resourcePath(filename):
if hasattr(sys, "_MEIPASS"):
return os.path.join(sys._MEIPASS, filename)
return os.path.join(filename)
# ボタンクリック後の処理
# 暗号化
def btn_click(pass1):
iDir = ""
# var=config.loadconf()
var = {'Theme': "None", 'online':True,'cash':"None"}
fTyp = [("", "*")]
# iDir=os.getenv("HOMEDRIVE") + os.getenv("HOMEPATH") + "\\Desktop"
file = tkinter.filedialog.askopenfilename(filetypes=fTyp,initialdir=iDir)
import shutil
import tempfile
if file=="":
return
if file=="":
return
n=decker.comzip(file,pass1)
if n==0:
messagebox.showerror('エラー', '指定されたファイルまたはディレクトリが見つかりません。')
if n==1:
messagebox.showinfo('確認', '暗号化が終了しました!')
if n==-3:
messagebox.showerror('エラー', 'ファイルはアクセスが制限されています')
if n==-2:
messagebox.showerror('エラー', '対応していないファイルの可能性があります')
return
# 復号化関数
def btn2_click(pass1):
iDir = ""
n=-1
var = {'Theme': "None", 'online':True,'cash':"None"}
fTyp = [("DEC Files", "*.dec")]
# iDir=os.getenv("HOMEDRIVE") + os.getenv("HOMEPATH") + "\\Desktop"
file = tkinter.filedialog.askopenfilename(filetypes=fTyp,initialdir=iDir)
import shutil
import tempfile
if file=="":
return
ns=0
n=decker.openzip(file,ns,pass1)
if n==0:
messagebox.showerror('エラー', '指定されたファイルまたはディレクトリが見つかりません。')
if n==1:
messagebox.showinfo('確認', '復号化が終了しました!')
if n==-2:
messagebox.showerror('エラー', 'パスワードが間違っています')
if n==-3:
messagebox.showerror('エラー', 'ファイルはアクセスが制限されています')
if n==4:
messagebox.showerror('エラー', 'ファイルが破損している可能性があります。\n元データでもう一度暗号化しなおしてください。\nこのファイルを他人から受け取った場合は、正式なファイルのコピーをもう一度取得してください')
return
# 以下スレッド化
def btn():
pass1=txt.get()
if pass1=="":
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
messagebox.showerror('エラー', 'パスワードを入力してください')
root.destroy()
root.mainloop()
return
thread1 = threading.Thread(target=btn_click,args=([pass1]))
thread1.start()
txt.delete(0, tkinter.END)
return
def btn2():
pass1=txt.get()
if pass1=="":
root = tkinter.Tk()
root.withdraw()
root.attributes("-topmost", True)
messagebox.showerror('エラー', 'パスワードを入力してください')
root.destroy()
root.mainloop()
return
thread1 = threading.Thread(target=btn2_click,args=([pass1]))
thread1.start()
txt.delete(0, tkinter.END)
return
# ここまで
def btn04():
import pyperclip
txt.delete(0, tkinter.END)
import random
cc=random.randint(5,20)
bpass=passgen.gen(cc)
b=messagebox.askyesno('確認', bpass+'\n 生成したパスワードをパスワードボックスに入れますか?\n 「OK」をクリックした場合、パスワードはクリップボードにコピーされます。')
if b==False:
return
pyperclip.copy(bpass)
txt.insert(tkinter.END,bpass)
return
# ここまで
# 画面初期化
def put(event):
import pyperclip
txt.insert(tkinter.END,pyperclip.paste())
return
# GUIl.delcheck()
# メインウインドウを作成
window = tkinter.Tk()
window.geometry("451x300")
window.title("VailDEC ファイル暗号化ソフト")
#window.configure(bg=backclr)
window.resizable(False, False)
# 背景画像とアイコンの設定
iconfile = resourcePath('resources/IMG_8776.ICO')
# window.iconbitmap(iconfile)
window.attributes("-topmost", False)
txt = tkinter.Entry(font=("",15),show='*')
txt.place(x=130, y=200)
#label2 = ttk.Label(window, text='パスワード')
#label2.place(x=65, y=200)
# ボタンの追加と配置
btn4 = tkinter.Button(window, text="パスワード生成",command = btn04)
btn4.place(x=300, y=18)
label3 = ttk.Label(window, text='パスワードを下に入力してからボタンを押してください')
label3.place(x=101, y=180)
btn = tkinter.Button(window, text="暗号化",command = btn,font=("", 25))
btn.place(x=90, y=100)
btn2 = tkinter.Button(window, text="復号化",command = btn2,font=("", 25))
btn2.place(x=250, y=100)
window.mainloop()
| 2.453125 | 2 |
bridge.py | juangallostra/bridge-the-gaps | 0 | 12769882 | # This is an answer to: https://codegolf.stackexchange.com/questions/189277/bridge-the-gaps
import sys
import os
from PIL import Image
import numpy as np
import scipy.ndimage
def obtain_groups(image, threshold, structuring_el):
"""
Obtain isles of unconnected pixels via a threshold on the R channel
"""
image_logical = (image[:, :, 1] < threshold).astype(np.int)
return scipy.ndimage.measurements.label(image_logical, structure=structuring_el)
def swap_colors(image, original_color, new_color):
"""
Swap all the pixels of a specific color by another color
"""
r1, g1, b1 = original_color # RGB value to be replaced
r2, g2, b2 = new_color # New RGB value
red, green, blue = image[:, :, 0], image[:, :, 1], image[:, :, 2]
mask = (red == r1) & (green == g1) & (blue == b1)
image[:, :, :3][mask] = [r2, g2, b2]
return image
def main(image_path=None):
"""
For each processed image, we begin by changing the color
of all the white pixels in an image to red. By doing this,
it is guaranteed that all the elements (any isle of black
pixels) are connected.
Then, we iterate over all the pixels in the image starting
from the top left corner and moving right and down. For every
red pixel we find we change its color to white. If after this
change of color there is still only one element (an element
being now any isle of black and red pixels), we leave the pixel
white and move on to the next pixel. However, if after the
color change from red to white the number of elements is bigger
than one, we leave the pixel red and move on to the next pixel.
The connections obtained by only using this method show a regular
pattern and in some cases, there are unnecessary red pixels.
This extra red pixels can be easily removed by iterating again over
the image and performing the same operations as explained above but
from the bottom right corner to the top left corner. This second
pass is much faster since the amount of red pixels that have to be
checked.
"""
images = os.listdir("images")
f = open("results.txt", "w")
if image_path is not None:
images = [image_path]
for image_name in images:
im = Image.open("images/"+image_name).convert("RGBA")
image = np.array(im)
image = swap_colors(image, (255, 255, 255), (255, 0, 0))
# create structuring element to determine unconnected groups of pixels in image
s = scipy.ndimage.morphology.generate_binary_structure(2, 2)
for i in np.ndindex(image.shape[:2]):
# skip black pixels
if sum(image[i[0], i[1]]) == 255:
continue
image[i[0], i[1]] = [255, 255, 255, 255]
# label the different groups, considering diagonal connections as valid
groups, num_groups = obtain_groups(image, 255, s)
if num_groups != 1:
image[i[0], i[1]] = [255, 0, 0, 255]
# Show percentage
print((i[1] + i[0]*im.size[0])/(im.size[0]*im.size[1]))
# Number of red pixels
red_p = 0
for i in np.ndindex(image.shape[:2]):
j = (im.size[1] - i[0] - 1, im.size[0] - i[1] - 1)
# skip black and white pixels
if sum(image[j[0], j[1]]) == 255 or sum(image[j[0], j[1]]) == 255*4:
continue
image[j[0], j[1]] = [255, 255, 255, 255]
# label the different groups, considering diagonal connections as valid
groups, num_groups = obtain_groups(image, 255, s)
if num_groups != 1:
image[j[0], j[1]] = [255, 0, 0, 255]
# Show percentage
print((j[1] + j[0]*im.size[0])/(im.size[0]*im.size[1]))
red_p += (sum(image[j[0], j[1]]) == 255*2)
print(red_p)
f.write("r_"+image_name+": "+str(red_p)+"\n")
im = Image.fromarray(image)
# im.show()
im.save("r_"+image_name)
f.close()
if __name__ == "__main__":
if len(sys.argv) == 2:
main(sys.argv[1])
else:
main()
| 4.03125 | 4 |
PreProcess/Step3-ConvertToOnehot.py | CDboyOne/IHGNN | 5 | 12769883 | <filename>PreProcess/Step3-ConvertToOnehot.py
from typing import Type, Any, List, Dict, Set, Tuple, Union, Optional, Iterator, Iterable
import sys, os, argparse
sys.dont_write_bytecode = True
sys.path.append('.')
os.umask(0)
from Helpers.PreProcessHelper import PreProcessHelper
from Helpers.IOHelper import IOHelper
from Helpers.SearchLog import RawSearchLog, SearchLog
from Helpers.SearchLogCollection import RawSearchLogCollection, SearchLogCollection
def read_search_ids(filename: str) -> Set[str]:
with open(filename, 'r', encoding='utf-8') as f:
return set((l.strip() for l in f))
def get_interaction_count(logs: SearchLogCollection) -> Tuple[int, int]:
positive_flag_count, negative_flag_count = 0, 0
for log in logs:
i = len([1 for flag in log.interactions if flag > 0])
positive_flag_count += i
negative_flag_count += len(log.interactions) - i
return positive_flag_count, negative_flag_count
reserve_at_least_one_in_train = True
source_folder = 'E:/DataScienceDataset/AlibabaAir/Intermediate/Complete1Core/'
result_folder = 'E:/DataScienceDataset/AlibabaAir/Complete1Core2/'
split_ratios = [0.66, 0.1, 0.24]
source_folder = 'E:/DataScienceDataset/AlibabaAir/Intermediate/Complete5Core/'
result_folder = 'E:/DataScienceDataset/AlibabaAir/Complete5Core2/'
split_ratios = [0.695, 0.095, 0.21]
source_folder = 'E:/DataScienceDataset/AlibabaAir/Intermediate/Subset01W5Core/'
result_folder = 'E:/DataScienceDataset/AlibabaAir/Subset01W5Core/'
split_ratios = [0.696, 0.094, 0.21]
source_folder = 'E:/DataScienceDataset/Cikm/Intermediate/WithCategoryWithView5Core/'
result_folder = 'E:/DataScienceDataset/Cikm/WithCategoryWithView5Core/'
split_ratios = [0.69, 0.09, 0.22]
args = argparse.ArgumentParser()
args.add_argument('--source', default='', type=str, help='源数据目录')
args.add_argument('--result', default='', type=str, help='存储结果的目录')
args.add_argument('--split', default='', type=str, help='分割比例,可以只指定前两项,如:\'0.7, 0.1\'')
args.add_argument('--presplit', default='', type=str, help='指定预先分割好的 search_id 文件所在的目录')
args = args.parse_args()
source_folder = args.source or source_folder
result_folder = args.result or result_folder
if args.source or args.result:
if args.split:
split_ratios = [float(r.strip()) for r in str(args.split).split(',')]
if len(split_ratios) == 2: split_ratios += [1 - sum(split_ratios)]
else:
split_ratios = [0.7, 0.1, 0.2]
assert(source_folder != result_folder)
IOHelper.StartLogging(os.path.join(result_folder, 'PreProcess-Step3.txt'))
# --------------------------------------------------
IOHelper.LogPrint('读取文件...')
item_ids = IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'item_ids.txt'))
queries = IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'queries.txt'))
item_segments = [line.split() for line in IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'item_title_segments.txt'))]
query_segments = [line.split() for line in IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'query_segments.txt'))]
user_ids = IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'user_ids.txt'))
vocabulary = list(set(IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'vocabulary_item.txt'))) \
.union(IOHelper.ReadStringListFromFile(os.path.join(source_folder, 'vocabulary_query.txt'))))
search_logs = RawSearchLogCollection.read(os.path.join(source_folder, 'search_logs_raw.csv'))
# --------------------------------------------------
IOHelper.LogPrint('读取完毕,开始生成反查字典...')
user_id_rdict = PreProcessHelper.GetReverseLookupDictionary(user_ids)
query_rdict = PreProcessHelper.GetReverseLookupDictionary(queries)
item_id_rdict = PreProcessHelper.GetReverseLookupDictionary(item_ids)
vocabulary_rdict = PreProcessHelper.GetReverseLookupDictionary(vocabulary)
# --------------------------------------------------
IOHelper.LogPrint('\n开始分割数据集并将其转换为 one hot 形式...')
# 将 search logs 按 user 与 search time 排序
search_logs.sort_by_user_and_time()
# 分割 search logs
if args.presplit:
search_idss = [
read_search_ids(os.path.join(args.presplit, 'search_ids_train.txt')),
read_search_ids(os.path.join(args.presplit, 'search_ids_valid.txt')),
read_search_ids(os.path.join(args.presplit, 'search_ids_test.txt')),
]
IOHelper.LogPrint(f'将使用以下目录中的预定义的分割指示文件:{args.presplit}')
else:
search_idss = None
IOHelper.LogPrint(f'定下的训练、验证、测试集的比例为:{split_ratios}')
IOHelper.LogPrint(f'对特定 user,至少在训练集中保留一条:{reserve_at_least_one_in_train}')
train_logs, valid_logs, test_logs = search_logs.split_to_train_valid_test(
*split_ratios,
presplit_search_ids=search_idss,
reserve_at_least_one_in_train=reserve_at_least_one_in_train
)
IOHelper.LogPrint(f'search logs 已分割,原共 {len(search_logs)} 条,现训练、验证、测试集各'
+ f' {len(train_logs)} {len(valid_logs)} {len(test_logs)} 条')
IOHelper.LogPrint(f'三者占比各为:{len(train_logs) / len(search_logs):.3f}'
+ f' {len(valid_logs) / len(search_logs):.3f} {len(test_logs) / len(search_logs):.3f}')
# 为产生可复制的结果,将三个集合的 search id 写入文件
train_logs.write_search_ids(os.path.join(result_folder, 'search_ids_train.txt'))
valid_logs.write_search_ids(os.path.join(result_folder, 'search_ids_valid.txt'))
test_logs.write_search_ids(os.path.join(result_folder, 'search_ids_test.txt'))
train_logs = train_logs.to_onehot(user_id_rdict, item_id_rdict, query_rdict)
valid_logs = valid_logs.to_onehot(user_id_rdict, item_id_rdict, query_rdict)
test_logs = test_logs.to_onehot(user_id_rdict, item_id_rdict, query_rdict)
# --------------------------------------------------
IOHelper.LogPrint('\n制作统计数据...')
IOHelper.LogPrint(f'UserCount QueryCount ItemCount Vocabulary')
IOHelper.LogPrint(f'{len(user_ids):<9} {len(queries):<10} {len(item_ids):<9} {len(vocabulary):<10}')
IOHelper.LogPrint(f'\n各数据集的 search log 数量:')
IOHelper.LogPrint(f'Train Valid Test ')
IOHelper.LogPrint(f'{len(train_logs):<9} {len(valid_logs):<9} {len(test_logs):<9}')
node_count = len(user_ids) + len(queries) + len(vocabulary)
positive_flag_count, negative_flag_count = get_interaction_count(train_logs)
IOHelper.LogPrint(f'\n训练集中正负交互数量:{positive_flag_count} {negative_flag_count}')
IOHelper.LogPrint(f'平均每个正样本有 {negative_flag_count / positive_flag_count:.4f} 个负样本')
IOHelper.LogPrint(f'稀疏度(正交互数 / 用户查询商品数量之和的平方)为:{positive_flag_count / (node_count ** 2)}')
positive_flag_count += get_interaction_count(valid_logs)[0] + get_interaction_count(test_logs)[0]
IOHelper.LogPrint(f'\n全集中正交互数量:{positive_flag_count}')
# --------------------------------------------------
IOHelper.LogPrint('\n将 train valid test 数据写入文件...')
train_logs.write(os.path.join(result_folder, 'train_data.csv'))
valid_logs.write(os.path.join(result_folder, 'valid_data.csv'))
test_logs.write(os.path.join(result_folder, 'test_data.csv'))
# 将 vocabulary 写入文件
IOHelper.WriteListToFile(vocabulary, os.path.join(result_folder, 'vocabulary.txt'))
# 生成 item_titles_multihot.txt
with open(os.path.join(result_folder, 'item_titles_multihot.txt'), 'w', encoding='utf-8') as fout:
for segments in item_segments:
onehots = [vocabulary_rdict[segment] for segment in segments]
fout.write(' '.join([str(onehot) for onehot in onehots]) + '\n')
# 生成 queries_multihot.txt
with open(os.path.join(result_folder, 'queries_multihot.txt'), 'w', encoding='utf-8') as fout:
for segments in query_segments:
onehots = [vocabulary_rdict[segment] for segment in segments]
fout.write(' '.join([str(onehot) for onehot in onehots]) + '\n')
# 写入 user query item vocabulary 集合的大小
with open(os.path.join(result_folder, 'graph_info.txt'), 'w', encoding='utf-8') as fout:
fout.write(f'{len(user_ids)} {len(queries)} {len(item_ids)} {len(vocabulary)}')
IOHelper.EndLogging() | 2.484375 | 2 |
src/106a/src/obstacle_brain.py | 106aRoboCupSim/simatch | 1 | 12769884 | #!/usr/bin/env python
import rospy
import sys
import time
import numpy as np
from realtimepseudoAstar import plan
from globaltorobotcoords import transform
from nubot_common.msg import ActionCmd, VelCmd, OminiVisionInfo, BallInfo, ObstaclesInfo, RobotInfo, BallIsHolding
#Initialize desired x depending on obstacle number
ROBOT_NAME = 'rival' + str(sys.argv[1])
possible_x = [-600, -200, 200, 600]
target_1 = np.array([possible_x[int(sys.argv[1]) - 1], -400])
target_2 = np.array([possible_x[int(sys.argv[1]) - 1], 400])
current_target = target_1
# For plotting
# import math
# import matplotlib.pyplot as plt
# Initialize publisher and rate
pub = rospy.Publisher('/' + str(ROBOT_NAME)+'/nubotcontrol/actioncmd', ActionCmd, queue_size=1)
rospy.init_node(str(ROBOT_NAME) + '_brain', anonymous=False)
hertz = 10
rate = rospy.Rate(hertz)
def callback(data):
#Receive all robot info
r = data.robotinfo[int(sys.argv[1]) - 1]
robot_pos = np.array([r.pos.x, r.pos.y])
theta = r.heading.theta
#Alternate between +y and -y target positions
global current_target
if np.linalg.norm(robot_pos - current_target) < 50 and np.all(current_target == target_1):
current_target = target_2
elif np.linalg.norm(robot_pos - current_target) < 50 and np.all(current_target == target_2):
current_target = target_1
target = current_target
#Convert target from global coordinate frame to robot coordinate frame for use by hwcontroller
target = transform(target[0], target[1], robot_pos[0], robot_pos[1], theta)
#Generate ActionCmd() and publish to hwcontroller
action = ActionCmd()
action.target.x = target[0]
action.target.y = target[1]
action.maxvel = 150
action.handle_enable = 0
action.target_ori = 0
pub.publish(action)
rate.sleep()
def listener():
rospy.Subscriber("/" + str(ROBOT_NAME) + "/omnivision/OmniVisionInfo", OminiVisionInfo, callback, queue_size=1)
rospy.spin()
if __name__ == '__main__':
try:
listener()
except rospy.ROSInterruptException:
pass
| 2.40625 | 2 |
demo/one_shot/train.py | zhuguiqian/PaddleSlim | 926 | 12769885 | <gh_stars>100-1000
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import ast
import numpy as np
from PIL import Image
import os
import paddle
import paddle.fluid as fluid
from paddle.fluid.optimizer import AdamOptimizer
from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear
from paddle.fluid.dygraph.base import to_variable
from paddleslim.nas.one_shot import SuperMnasnet
from paddleslim.nas.one_shot import OneShotSearch
def parse_args():
parser = argparse.ArgumentParser("Training for Mnist.")
parser.add_argument(
"--use_data_parallel",
type=ast.literal_eval,
default=False,
help="The flag indicating whether to use data parallel mode to train the model."
)
parser.add_argument("-e", "--epoch", default=5, type=int, help="set epoch")
parser.add_argument("--ce", action="store_true", help="run ce")
args = parser.parse_args()
return args
class SimpleImgConv(fluid.dygraph.Layer):
def __init__(self,
num_channels,
num_filters,
filter_size,
conv_stride=1,
conv_padding=0,
conv_dilation=1,
conv_groups=1,
act=None,
use_cudnn=False,
param_attr=None,
bias_attr=None):
super(SimpleImgConv, self).__init__()
self._conv2d = Conv2D(
num_channels=num_channels,
num_filters=num_filters,
filter_size=filter_size,
stride=conv_stride,
padding=conv_padding,
dilation=conv_dilation,
groups=conv_groups,
param_attr=None,
bias_attr=None,
act=act,
use_cudnn=use_cudnn)
def forward(self, inputs):
x = self._conv2d(inputs)
return x
class MNIST(fluid.dygraph.Layer):
def __init__(self):
super(MNIST, self).__init__()
self._simple_img_conv_pool_1 = SimpleImgConv(1, 20, 2, act="relu")
self.arch = SuperMnasnet(
name_scope="super_net", input_channels=20, out_channels=20)
self._simple_img_conv_pool_2 = SimpleImgConv(20, 50, 2, act="relu")
self.pool_2_shape = 50 * 13 * 13
SIZE = 10
scale = (2.0 / (self.pool_2_shape**2 * SIZE))**0.5
self._fc = Linear(
self.pool_2_shape,
10,
param_attr=fluid.param_attr.ParamAttr(
initializer=fluid.initializer.NormalInitializer(
loc=0.0, scale=scale)),
act="softmax")
def forward(self, inputs, label=None, tokens=None):
x = self._simple_img_conv_pool_1(inputs)
x = self.arch(x, tokens=tokens) # addddddd
x = self._simple_img_conv_pool_2(x)
x = fluid.layers.reshape(x, shape=[-1, self.pool_2_shape])
x = self._fc(x)
if label is not None:
acc = fluid.layers.accuracy(input=x, label=label)
return x, acc
else:
return x
def test_mnist(model, tokens=None):
acc_set = []
avg_loss_set = []
batch_size = 64
test_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.test(), batch_size=batch_size, drop_last=True)
for batch_id, data in enumerate(test_reader()):
dy_x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(batch_size, 1)
img = to_variable(dy_x_data)
label = to_variable(y_data)
label.stop_gradient = True
prediction, acc = model.forward(img, label, tokens=tokens)
loss = fluid.layers.cross_entropy(input=prediction, label=label)
avg_loss = fluid.layers.mean(loss)
acc_set.append(float(acc.numpy()))
avg_loss_set.append(float(avg_loss.numpy()))
if batch_id % 100 == 0:
print("Test - batch_id: {}".format(batch_id))
# get test acc and loss
acc_val_mean = np.array(acc_set).mean()
avg_loss_val_mean = np.array(avg_loss_set).mean()
return acc_val_mean
def train_mnist(args, model, tokens=None):
epoch_num = args.epoch
BATCH_SIZE = 64
adam = AdamOptimizer(
learning_rate=0.001, parameter_list=model.parameters())
train_reader = paddle.fluid.io.batch(
paddle.dataset.mnist.train(), batch_size=BATCH_SIZE, drop_last=True)
if args.use_data_parallel:
train_reader = fluid.contrib.reader.distributed_batch_reader(
train_reader)
for epoch in range(epoch_num):
for batch_id, data in enumerate(train_reader()):
dy_x_data = np.array([x[0].reshape(1, 28, 28)
for x in data]).astype('float32')
y_data = np.array(
[x[1] for x in data]).astype('int64').reshape(-1, 1)
img = to_variable(dy_x_data)
label = to_variable(y_data)
label.stop_gradient = True
cost, acc = model.forward(img, label, tokens=tokens)
loss = fluid.layers.cross_entropy(cost, label)
avg_loss = fluid.layers.mean(loss)
if args.use_data_parallel:
avg_loss = model.scale_loss(avg_loss)
avg_loss.backward()
model.apply_collective_grads()
else:
avg_loss.backward()
adam.minimize(avg_loss)
# save checkpoint
model.clear_gradients()
if batch_id % 1 == 0:
print("Loss at epoch {} step {}: {:}".format(epoch, batch_id,
avg_loss.numpy()))
model.eval()
test_acc = test_mnist(model, tokens=tokens)
model.train()
print("Loss at epoch {} , acc is: {}".format(epoch, test_acc))
save_parameters = (not args.use_data_parallel) or (
args.use_data_parallel and
fluid.dygraph.parallel.Env().local_rank == 0)
if save_parameters:
fluid.save_dygraph(model.state_dict(), "save_temp")
print("checkpoint saved")
if __name__ == '__main__':
args = parse_args()
place = fluid.CPUPlace()
with fluid.dygraph.guard(place):
model = MNIST()
# step 1: training super net
#train_mnist(args, model)
# step 2: search
best_tokens = OneShotSearch(model, test_mnist)
# step 3: final training
# train_mnist(args, model, best_tokens)
| 2.125 | 2 |
examples/python/number/minimum.py | rockacola/neo-smart-contract-examples | 15 | 12769886 | """
Date Created: 2018-03-08
Date Modified: 2018-03-08
Version: 1
Contract Hash: 1aa965c53c373ef9d3be065bdb36b234cdcab66a
Available on NEO TestNet: False
Available on CoZ TestNet: False
Available on MainNet: False
Example:
Test Invoke: build /path/to/add.py test 0202 02 False False 2 5
Expected Result: 2
Operation Count: 51
GAS Consumption: 0.045
"""
def Main(a, b):
"""
:param a: First input number of concern
:param b: Second input number of concern
:type a: int
:type b: int
:return: The smallest value of the 2 input numbers
:rtype: int
"""
result = min(a, b)
return result
| 2.515625 | 3 |
venv/lib/python3.6/site-packages/ansible_collections/cisco/ios/plugins/module_utils/network/ios/config/route_maps/route_maps.py | usegalaxy-no/usegalaxy | 1 | 12769887 | #
# -*- coding: utf-8 -*-
# Copyright 2021 Red Hat
# GNU General Public License v3.0+
# (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
"""
The cisco.ios_route_maps config file.
It is in this file where the current configuration (as dict)
is compared to the provided configuration (as dict) and the command set
necessary to bring the current configuration to its desired end-state is
created.
"""
from ansible.module_utils.six import iteritems
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.resource_module import (
ResourceModule,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.facts.facts import (
Facts,
)
from ansible_collections.ansible.netcommon.plugins.module_utils.network.common.utils import (
dict_merge,
)
from ansible_collections.cisco.ios.plugins.module_utils.network.ios.rm_templates.route_maps import (
Route_mapsTemplate,
)
class Route_maps(ResourceModule):
"""
The cisco.ios_route_maps config class
"""
parsers = ["continue_entry", "description"]
def __init__(self, module):
super(Route_maps, self).__init__(
empty_fact_val={},
facts_module=Facts(module),
module=module,
resource="route_maps",
tmplt=Route_mapsTemplate(),
)
def execute_module(self):
""" Execute the module
:rtype: A dictionary
:returns: The result from module execution
"""
if self.state not in ["parsed", "gathered"]:
self.generate_commands()
self.run_commands()
return self.result
def generate_commands(self):
""" Generate configuration commands to send based on
want, have and desired state.
"""
if self.want:
wantd = {(entry["route_map"]): entry for entry in self.want}
else:
wantd = {}
if self.have:
haved = {(entry["route_map"]): entry for entry in self.have}
else:
haved = {}
# Convert each of config list to dict
for each in wantd, haved:
self.list_to_dict(each)
# if state is merged, merge want onto have and then compare
if self.state == "merged":
wantd = dict_merge(haved, wantd)
# if state is deleted, empty out wantd and set haved to wantd
if self.state == "deleted":
haved = {
k: v for k, v in iteritems(haved) if k in wantd or not wantd
}
wantd = {}
# remove superfluous config for overridden and deleted
if self.state in ["overridden", "deleted"]:
for k, have in iteritems(haved):
if k not in wantd:
route_map_cmd = "no route-map {route_map}".format(**have)
self.commands.append(route_map_cmd)
for k, want in iteritems(wantd):
self._compare(want=want, have=haved.pop(k, {}))
def _compare(self, want, have):
"""Leverages the base class `compare()` method and
populates the list of commands to be run by comparing
the `want` and `have` data with the `parsers` defined
for the Route_maps network resource.
"""
if want != have and self.state != "deleted":
self.entries_compare(want, have)
def entries_compare(self, want, have):
if want.get("entries"):
cmd_len = len(self.commands)
if have.get("entries"):
for k, v in iteritems(want["entries"]):
have_entry = have["entries"].pop(k, {})
if have_entry and want["entries"][k] != have_entry:
# description gets merged with existing description, so explicit delete is required
# replaced and overridden state
if (
(
self.state == "replaced"
or self.state == "overridden"
)
and have_entry.get("description")
and have_entry.get("description")
!= want["entries"][k].get("description")
):
self.compare(
parsers=["description"],
want=dict(),
have=have_entry,
)
self.compare(
parsers=self.parsers,
want=want["entries"][k],
have=have_entry,
)
have_match = have_entry.get("match")
want_match = v.get("match")
if have_match and want_match:
self.list_type_compare(
"match", want=want_match, have=have_match
)
elif not have_match and want_match:
self.list_type_compare(
"match", want=want_match, have=dict()
)
have_set = have_entry.get("set")
want_set = v.get("set")
if have_set and want_set:
self.list_type_compare(
"set", want=want_set, have=have_set
)
elif not have_set and want_set:
self.list_type_compare(
"set", want=want_set, have=dict()
)
if cmd_len != len(self.commands):
route_map_cmd = "route-map {route_map}".format(**want)
if want["entries"][k].get("action"):
route_map_cmd += " {action}".format(
**want["entries"][k]
)
if want["entries"][k].get("sequence"):
route_map_cmd += " {sequence}".format(
**want["entries"][k]
)
self.commands.insert(cmd_len, route_map_cmd)
cmd_len = len(self.commands)
else:
for k, v in iteritems(want["entries"]):
self.compare(
parsers=self.parsers,
want=want["entries"][k],
have=dict(),
)
want_match = v.get("match")
if want_match:
self.list_type_compare(
"match", want=want_match, have=dict()
)
want_set = v.get("set")
if want_set:
self.list_type_compare(
"set", want=want_set, have=dict()
)
if cmd_len != len(self.commands):
route_map_cmd = "route-map {route_map}".format(**want)
if want["entries"][k].get("action"):
route_map_cmd += " {action}".format(
**want["entries"][k]
)
if want["entries"][k].get("sequence"):
route_map_cmd += " {sequence}".format(
**want["entries"][k]
)
self.commands.insert(cmd_len, route_map_cmd)
cmd_len = len(self.commands)
if (
self.state == "replaced" or self.state == "overridden"
) and have.get("entries"):
cmd_len = len(self.commands)
for k, v in iteritems(have["entries"]):
route_map_cmd = "no route-map {route_map}".format(**have)
if have["entries"][k].get("action"):
route_map_cmd += " {action}".format(**have["entries"][k])
if have["entries"][k].get("sequence"):
route_map_cmd += " {sequence}".format(**have["entries"][k])
self.commands.insert(cmd_len, route_map_cmd)
def list_type_compare(self, compare_type, want, have):
parsers = [
"{0}".format(compare_type),
"{0}.ip".format(compare_type),
"{0}.ipv6".format(compare_type),
]
for k, v in iteritems(want):
have_v = have.pop(k, {})
if v != have_v and k not in ["ip", "ipv6", "action", "sequence"]:
if have_v:
self.compare(
parsers=parsers,
want={compare_type: {k: v}},
have={compare_type: {k: have_v}},
)
else:
self.compare(
parsers=parsers,
want={compare_type: {k: v}},
have=dict(),
)
if k in ["ip", "ipv6"]:
for key, val in iteritems(v):
have_val = have_v.pop(key, {})
if val != have_val:
if have_val:
if (
self.state == "overridden"
or self.state == "replaced"
):
self.compare(
parsers=parsers,
want=dict(),
have={compare_type: {k: {key: have_val}}},
)
self.compare(
parsers=parsers,
want={compare_type: {k: {key: val}}},
have={compare_type: {k: {key: have_val}}},
)
else:
self.compare(
parsers=parsers,
want={compare_type: {k: {key: val}}},
have=dict(),
)
if (
self.state == "overridden" or self.state == "replaced"
) and have_v:
for key, val in iteritems(have_v):
self.compare(
parsers=parsers,
want=dict(),
have={compare_type: {k: {key: val}}},
)
if have and (self.state == "replaced" or self.state == "overridden"):
for k, v in iteritems(have):
if k in ["ip", "ipv6"]:
for key, val in iteritems(v):
if key and val:
self.compare(
parsers=parsers,
want=dict(),
have={compare_type: {k: {key: val}}},
)
else:
self.compare(
parsers=parsers,
want=dict(),
have={compare_type: {k: v}},
)
def list_to_dict(self, param):
if param:
def convert_to_dict(inner_match, key):
temp = dict()
for each in inner_match:
temp.update({key + "_" + str(each): each})
return dict(sorted(temp.items(), key=lambda x: x[1]))
for key, val in iteritems(param):
temp_entries = dict()
if val.get("entries"):
for every in val["entries"]:
match = every.get("match")
if match:
if match.get("as_path") and match.get(
"as_path"
).get("acls"):
match["as_path"]["acls"] = convert_to_dict(
match["as_path"]["acls"], "acl"
)
if match.get("community") and match.get(
"community"
).get("name"):
match["community"]["name"] = convert_to_dict(
match["community"]["name"], "name"
)
if match.get("extcommunity"):
match["extcommunity"] = convert_to_dict(
match["extcommunity"], "num"
)
if match.get("interfaces"):
match["interfaces"] = convert_to_dict(
match["interfaces"], "interface"
)
if match.get("ip"):
for each_ip_param in [
"address",
"flowspec",
"next_hop",
"redistribution_source",
"route_source",
]:
if match["ip"].get(each_ip_param):
if match["ip"][each_ip_param].get(
"acls"
):
match["ip"][each_ip_param][
"acls"
] = convert_to_dict(
match["ip"][each_ip_param][
"acls"
],
"acl",
)
elif match["ip"][each_ip_param].get(
"prefix_lists"
):
match["ip"][each_ip_param][
"prefix_lists"
] = convert_to_dict(
match["ip"][each_ip_param][
"prefix_lists"
],
"prefix_list",
)
if match.get("local_preference") and match.get(
"local_preference"
).get("value"):
match["local_preference"][
"value"
] = convert_to_dict(
match["local_preference"]["value"], "value"
)
if match.get("mdt_group") and match.get(
"mdt_group"
).get("acls"):
match["mdt_group"]["acls"] = convert_to_dict(
match["mdt_group"]["acls"], "acl"
)
if match.get("policy_lists"):
match["policy_lists"] = convert_to_dict(
match["policy_lists"], "policy"
)
if match.get("security_group"):
for each_sg_param in ["source", "destination"]:
if match.get("security_group").get(
each_sg_param
):
match["security_group"][
each_sg_param
] = convert_to_dict(
match["security_group"][
each_sg_param
],
each_sg_param,
)
set = every.get("set")
if set:
if set.get("interfaces"):
set["interfaces"] = convert_to_dict(
set["interfaces"], "interface"
)
action = every.get("action")
sequence = every.get("sequence")
temp_entries.update(
{action + "_" + str(sequence): every}
)
val["entries"] = temp_entries
| 2.046875 | 2 |
aroma/features.py | vinferrer/aroma | 0 | 12769888 | <reponame>vinferrer/aroma
"""Functions to calculate ICA-AROMA features for component classification."""
import logging
import os
import numpy as np
from nilearn import image, masking
from nilearn._utils import load_niimg
from . import utils
LGR = logging.getLogger(__name__)
def feature_time_series(mel_mix, mc):
"""Extract maximum motion parameter correlation scores from components.
This function determines the maximum robust correlation of each component
time series with a model of 72 realignment parameters.
Parameters
----------
mel_mix : numpy.ndarray of shape (T, C)
Mixing matrix in shape T (time) by C (component).
mc : str or array_like
Full path of the text file containing the realignment parameters.
Motion parameters are (time x 6), with the first three columns being
rotation parameters (in radians) and the final three being translation
parameters (in mm).
Returns
-------
max_RP_corr : array_like
Array of the maximum RP correlation feature scores for the components
of the melodic_mix file.
"""
if isinstance(mc, str):
rp6 = utils.load_motpars(mc, source="auto")
else:
rp6 = mc
if (rp6.ndim != 2) or (rp6.shape[1] != 6):
raise ValueError(f"Motion parameters must of shape (n_trs, 6), not {rp6.shape}")
if rp6.shape[0] != mel_mix.shape[0]:
raise ValueError(
f"Number of rows in mixing matrix ({mel_mix.shape[0]}) does not match "
f"number of rows in motion parameters ({rp6.shape[0]})."
)
# Determine the derivatives of the RPs (add zeros at time-point zero)
_, nparams = rp6.shape
rp6_der = np.vstack((
np.zeros(nparams),
np.diff(rp6, axis=0)
))
# Create an RP-model including the RPs and its derivatives
rp12 = np.hstack((rp6, rp6_der))
# add the fw and bw shifted versions
rp12_1fw = np.vstack((
np.zeros(2 * nparams),
rp12[:-1]
))
rp12_1bw = np.vstack((
rp12[1:],
np.zeros(2 * nparams)
))
rp_model = np.hstack((rp12, rp12_1fw, rp12_1bw))
# Determine the maximum correlation between RPs and IC time-series
nsplits = 1000
nmixrows, nmixcols = mel_mix.shape
nrows_to_choose = int(round(0.9 * nmixrows))
# Max correlations for multiple splits of the dataset (for a robust
# estimate)
max_correls = np.empty((nsplits, nmixcols))
for i in range(nsplits):
# Select a random subset of 90% of the dataset rows
# (*without* replacement)
chosen_rows = np.random.choice(a=range(nmixrows),
size=nrows_to_choose,
replace=False)
# Combined correlations between RP and IC time-series, squared and
# non squared
correl_nonsquared = utils.cross_correlation(mel_mix[chosen_rows],
rp_model[chosen_rows])
correl_squared = utils.cross_correlation(mel_mix[chosen_rows]**2,
rp_model[chosen_rows]**2)
correl_both = np.hstack((correl_squared, correl_nonsquared))
# Maximum absolute temporal correlation for every IC
max_correls[i] = np.abs(correl_both).max(axis=1)
# Feature score is the mean of the maximum correlation over all the random
# splits
# Avoid propagating occasional nans that arise in artificial test cases
max_RP_corr = np.nanmean(max_correls, axis=0)
return max_RP_corr
def feature_frequency(mel_FT_mix: np.ndarray, TR: float):
"""Extract the high-frequency content feature scores.
This function determines the frequency, as fraction of the Nyquist
frequency, at which the higher and lower frequencies explain half
of the total power between 0.01Hz and Nyquist.
Parameters
----------
mel_FT_mix : numpy.ndarray of shape (F, C)
Stored array is (frequency x component), with frequencies
ranging from 0 Hz to Nyquist frequency.
TR : float
TR (in seconds) of the fMRI data
Returns
-------
HFC : array_like
Array of the HFC ('High-frequency content') feature scores
for the components of the melodic_FTmix file
"""
# Determine sample frequency
Fs = 1 / TR
# Determine Nyquist-frequency
Ny = Fs / 2
n_frequencies = mel_FT_mix.shape[0]
# Determine which frequencies are associated with every row in the
# melodic_FTmix file (assuming the rows range from 0Hz to Nyquist)
f = Ny * np.arange(1, n_frequencies + 1) / n_frequencies
# Only include frequencies higher than 0.01Hz
fincl = np.squeeze(np.array(np.where(f > 0.01)))
mel_FT_mix = mel_FT_mix[fincl, :]
f = f[fincl]
# Set frequency range to [0-1]
f_norm = (f - 0.01) / (Ny - 0.01)
# For every IC; get the cumulative sum as a fraction of the total sum
fcumsum_fract = np.cumsum(mel_FT_mix, axis=0) / np.sum(mel_FT_mix, axis=0)
# Determine the index of the frequency with the fractional cumulative sum
# closest to 0.5
idx_cutoff = np.argmin(np.abs(fcumsum_fract - 0.5), axis=0)
# Now get the fractions associated with those indices index, these are the
# final feature scores
HFC = f_norm[idx_cutoff]
# Return feature score
return HFC
def feature_spatial(mel_IC):
"""Extract the spatial feature scores.
For each IC it determines the fraction of the mixture modeled thresholded
Z-maps respectively located within the CSF or at the brain edges,
using predefined standardized masks.
Parameters
----------
mel_IC : str or niimg_like
Full path of the nii.gz file containing mixture-modeled thresholded
(p<0.5) Z-maps, registered to the MNI152 2mm template
Returns
-------
edge_fract : array_like
Array of the edge fraction feature scores for the components of the
mel_IC file
csf_fract : array_like
Array of the CSF fraction feature scores for the components of the
mel_IC file
"""
# Get the number of ICs
mel_IC_img = load_niimg(mel_IC)
num_ICs = mel_IC_img.shape[3]
masks_dir = utils.get_resource_path()
csf_mask = os.path.join(masks_dir, "mask_csf.nii.gz")
edge_mask = os.path.join(masks_dir, "mask_edge.nii.gz")
out_mask = os.path.join(masks_dir, "mask_out.nii.gz")
# Loop over ICs
edge_fract = np.zeros(num_ICs)
csf_fract = np.zeros(num_ICs)
for i in range(num_ICs):
# Extract IC from the merged melodic_IC_thr2MNI2mm file
temp_IC = image.index_img(mel_IC, i)
# Change to absolute Z-values
temp_IC = image.math_img("np.abs(img)", img=temp_IC)
# Get sum of Z-values within the total Z-map (calculate via the mean
# and number of non-zero voxels)
temp_IC_data = temp_IC.get_fdata()
tot_sum = np.sum(temp_IC_data)
if tot_sum == 0:
LGR.info("\t- The spatial map of component {} is empty. "
"Please check!".format(i + 1))
# Get sum of Z-values of the voxels located within the CSF
# (calculate via the mean and number of non-zero voxels)
csf_data = masking.apply_mask(temp_IC, csf_mask)
csf_sum = np.sum(csf_data)
# Get sum of Z-values of the voxels located within the Edge
# (calculate via the mean and number of non-zero voxels)
edge_data = masking.apply_mask(temp_IC, edge_mask)
edge_sum = np.sum(edge_data)
# Get sum of Z-values of the voxels located outside the brain
# (calculate via the mean and number of non-zero voxels)
out_data = masking.apply_mask(temp_IC, out_mask)
out_sum = np.sum(out_data)
# Determine edge and CSF fraction
if tot_sum != 0:
edge_fract[i] = (out_sum + edge_sum) / (tot_sum - csf_sum)
csf_fract[i] = csf_sum / tot_sum
else:
edge_fract[i] = 0
csf_fract[i] = 0
# Return feature scores
return edge_fract, csf_fract
| 2.4375 | 2 |
scripts/cpsg.py | skyhoshi/concfg | 864 | 12769889 | """
cpsg.py
~~~~~~
Concfg Preset Screenshot Generator
Only works in pure powershell/pwsh session, does not work in terminal like cmder.
Prerequisites:
Python3.4+, Pillow, jinja2, pywin32
"""
import os
import sys
import glob
import time
import shutil
import argparse
import win32gui
import subprocess
import win32process
from PIL import ImageGrab
from jinja2 import Template
LEGACY_PWSH = False
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
PRESETS_DIR = os.path.join(SCRIPT_DIR, os.pardir, 'presets')
PRESET_EXAMPLES_DIR = os.path.join(SCRIPT_DIR, os.pardir, 'preset_examples')
SKIP_LIST = ['basic', 'basic-reset']
def get_hwnds_for_pid(pid):
def callback(hwnd, hwnds):
if win32gui.IsWindowVisible(hwnd) and win32gui.IsWindowEnabled(hwnd):
_, found_pid = win32process.GetWindowThreadProcessId(hwnd)
if found_pid == pid:
hwnds.append(hwnd)
return True
hwnds = []
win32gui.EnumWindows(callback, hwnds)
return hwnds
def get_presets():
files = glob.glob(os.path.join(PRESETS_DIR, '*.json'))
presets = []
for item in files:
presets.append((os.path.splitext(os.path.basename(item))[0], item))
# preset pair list [(name, path), (name, path), ...]
return presets
def gens_for_preset(preset):
exe = 'powershell' if LEGACY_PWSH else 'pwsh'
print("Taking screenshot of preset '{0}'...".format(preset[0]))
# set color preset
pwsh = subprocess.Popen(
'{0} -noprofile -file {1}/setcolors.ps1 -preset {2}'.format(exe, SCRIPT_DIR, preset[1]),
creationflags=subprocess.CREATE_NEW_CONSOLE
)
# waiting for exit
time.sleep(4.0)
# print out color table then take screenshot
pwsh = subprocess.Popen(
'{0} -noprofile -noexit -file {1}/outcolors.ps1'.format(exe, SCRIPT_DIR),
creationflags=subprocess.CREATE_NEW_CONSOLE
)
# waiting for process
time.sleep(2.0)
for hwnd in get_hwnds_for_pid(pwsh.pid):
win32gui.SetForegroundWindow(hwnd)
bbox = win32gui.GetWindowRect(hwnd)
# remove window box shadow
crop_bbox = (bbox[0]+7, bbox[1], bbox[2]-7, bbox[3]-7)
img = ImageGrab.grab(crop_bbox)
if not os.path.exists(PRESET_EXAMPLES_DIR):
os.makedirs(PRESET_EXAMPLES_DIR)
img.save(os.path.join(PRESET_EXAMPLES_DIR, '{0}.png'.format(preset[0])))
pwsh.kill()
def img_dict(direntry):
return {
'name': direntry.name.replace('.png', ''),
'path': direntry.name
}
def is_img(direntry):
if direntry.is_file and direntry.name.endswith('.png'):
return True
return False
if __name__ == '__main__':
# Usage: python -m cpsg [args]
parser = argparse.ArgumentParser(
description='Concfg Preset Screenshot Generator')
parser.add_argument("-a", "--all",
help="generate screenshot for all presets",
action="store_true")
parser.add_argument("-l", "--legacy",
help="pass this option if you use Windows PowerShell",
action="store_true")
parser.add_argument("-p", "--preset",
help="generate screenshot for single preset")
parser.add_argument("-u", "--update",
help="also update the screenshot README",
action="store_true")
args = parser.parse_args()
if args.all or args.preset:
if not shutil.which('colortool.exe'):
print("Make sure you have 'ColorTool' installed.")
sys.exit(0)
input("NOTICE: Do not have other operations while the script runs, "
"or it will be interrupted when taking screenshots. "
"Hit Enter to continue: ")
presets = get_presets()
if args.legacy:
LEGACY_PWSH = True
if args.all:
for item in presets:
# skip non-color presets
if not item[0] in SKIP_LIST:
gens_for_preset(item)
elif args.preset:
# skip non-color presets
if not args.preset in SKIP_LIST:
match = [item for item in presets if item[0] == args.preset]
if len(match):
gens_for_preset(match[0])
else:
print("No preset named '{0}'.".format(args.preset))
sys.exit(0)
if args.update:
print('Updating screenshots README.md...')
# Get template
with open(os.path.join(SCRIPT_DIR, 'readme.jinja2')) as templateData:
template = Template(templateData.read())
# Get images
images = [img_dict(direntry) for direntry in os.scandir(PRESET_EXAMPLES_DIR) if is_img(direntry)]
images.sort(key=lambda x: x['name'])
# Generate README
with open(os.path.join(PRESET_EXAMPLES_DIR, 'README.md'), 'w') as readme:
readme.write(template.render(images=images))
else:
parser.print_help()
sys.exit(0)
| 1.976563 | 2 |
radiant/static/modules/mdc/MDCList.py | UN-GCPDS/radiant | 5 | 12769890 | """
Brython MDCComponent: MDCList
=============================
"""
from browser import html
from .core import MDCTemplate
########################################################################
class __listItem__(MDCTemplate):
""""""
MDC_optionals = {
'meta': '<span class="mdc-list-item__meta">{meta}</span>',
'icon_meta': '<a href="#" class="mdc-list-item__meta material-icons" onclick="event.preventDefault();" style="text-decoration: none; color: {meta_color};">{icon_meta}</a>',
# 'fa_icon_meta': '<a href="#" class="mdc-list-item__meta material-icons" onclick="event.preventDefault();" style="text-decoration: none; color: {meta_color};">{icon_meta}</a>',
'fa_icon_meta': '<i class="mdc-list-item__meta {fa_style_meta} {fa_icon_meta}" onclick="event.preventDefault();" style="text-decoration: none; color: {meta_color};"></i>',
'icon': '<i class="material-icons mdc-list-item__graphic" aria-hidden="true">{icon}</i>',
'fa_icon': '<i class="mdc-list-item__graphic {fa_style} {fa_icon}"></i>',
'avatar': '<span class="mdc-list-item__graphic" style="background-color: {avatar_background_color}; color: {avatar_color};" role="presentation"><i class="material-icons" aria-hidden="true">{avatar}</i></span>',
'placeholder': '<span class="mdc-list-item__graphic" style="background-color: {placeholder_background_color};"></span>',
}
# ----------------------------------------------------------------------
def __new__(self, text, secondary_text=None, icon=False, icon_meta=False, meta=False, avatar=False, placeholder_background_coloiconr='rgba(0,0,0,.38)', avatar_color='white', meta_color='rgba(0,0,0,.38)', avatar_background_color='rgba(0,0,0,.38)', **kwargs):
""""""
if icon and icon.startswith('fa'):
fa_style = icon[:icon.find('-')]
fa_icon = 'fa' + icon[icon.find('-'):]
del icon
if icon_meta and icon_meta.startswith('fa'):
fa_style_meta = icon_meta[:icon_meta.find('-')]
fa_icon_meta = 'fa' + icon_meta[icon_meta.find('-'):]
del icon_meta
self.element = self.render(locals(), kwargs)
return self.element
# ----------------------------------------------------------------------
@classmethod
def __html__(cls, **context):
""""""
if context['secondary_text']:
code = """
<li class="mdc-list-item">
{icon}
{fa_icon}
{avatar}
{placeholder}
<span class="mdc-list-item__text">
<span class="mdc-list-item__primary-text">{text}</span>
<span class="mdc-list-item__secondary-text">{secondary_text}</span>
</span>
{meta}
{icon_meta}
{fa_icon_meta}
</li>
"""
else:
code = """
<li class="mdc-list-item">
{icon}
{avatar}
{placeholder}
<span class="mdc-list-item__text">{text}</span>
{meta}
{icon_meta}
</li>
"""
return cls.render_html(code, context)
# ----------------------------------------------------------------------
@classmethod
def get(self, name):
""""""
if name is 'icon':
return self.element.select('.mdc-list-item__graphic')[0]
elif name is 'icon_meta':
return self.element.select('.mdc-list-item__meta')[0]
elif name is 'primary_text':
return self.element.select('.mdc-list-item__primary-text')[0]
########################################################################
class __listChekItem__(MDCTemplate):
""""""
MDC_optionals = {
'checked': 'checked=true',
}
# ----------------------------------------------------------------------
def __new__(self, text, checked=False, **kwargs):
""""""
self.element = self.render(locals(), kwargs)
return self.element
# ----------------------------------------------------------------------
@classmethod
def __html__(cls, **context):
""""""
code = """
<li class="mdc-list-item checkbox-list-ripple-surface mdc-ripple-upgraded" style="--mdc-ripple-fg-size:360px; --mdc-ripple-fg-scale:1.6997692716423716; --mdc-ripple-fg-translate-start:258px, -163.06666564941406px; --mdc-ripple-fg-translate-end:120px, -156px;">
<label for="trailing-checkbox-blueberries">{text}</label>
<span class="mdc-list-item__meta">
<div class="mdc-checkbox mdc-checkbox--upgraded mdc-ripple-upgraded mdc-ripple-upgraded--unbounded" style="--mdc-ripple-fg-size:24px; --mdc-ripple-fg-scale:1.6666666666666667; --mdc-ripple-left:8px; --mdc-ripple-top:8px;">
<input class="mdc-checkbox__native-control" {checked} id="trailing-checkbox-blueberries" type="checkbox">
<div class="mdc-checkbox__background">
<svg class="mdc-checkbox__checkmark" viewBox="0 0 24 24">
<path class="mdc-checkbox__checkmark-path" fill="none" stroke="white" d="M1.73,12.91 8.1,19.28 22.79,4.59"></path>
</svg>
<div class="mdc-checkbox__mixedmark"></div>
</div>
</div>
</span>
</li>
"""
return cls.render_html(code, context)
########################################################################
class MDCListGroup(MDCTemplate):
""""""
# ----------------------------------------------------------------------
# def __new__(self, **kwargs):
# """"""
#self.element = self.render(locals(), kwargs)
# return self.element
# ----------------------------------------------------------------------
@classmethod
def __html__(cls, **context):
""""""
code = """
<div class="mdc-list-group">
</div>
"""
return cls.render_html(code, context)
# ----------------------------------------------------------------------
@classmethod
def add_list(cls, element, label, list_):
""""""
cls.element <= html.H3(label, Class='mdc-list-group__subheader')
cls.element <= list_
########################################################################
class MDCList(MDCTemplate):
""""""
NAME = 'list', 'MDCList'
MDC_optionals = {
'two_line': 'mdc-list--two-line',
'dense': 'mdc-list--two-line mdc-list--dense',
'avatar': 'mdc-list--avatar-list',
'non_interactive': 'mdc-list--non-interactive',
}
# ----------------------------------------------------------------------
def __new__(self, two_line=False, dense=False, avatar=False, **kwargs):
""""""
self.element = self.render(locals(), kwargs)
return self.element
# ----------------------------------------------------------------------
@classmethod
def __html__(cls, **context):
""""""
code = """
<ul class="mdc-list {two_line} {dense} {avatar} {non_interactive}">
</ul>
"""
return cls.render_html(code, context)
# ----------------------------------------------------------------------
@classmethod
def get(self, name):
""""""
# if name is 'actions':
# return self.element.select('.mdc-card__actions')[0]
# elif name is 'action_buttons':
# return self.element.select('.mdc-card__action-buttons')[0]
# elif name is 'action_icons':
# return self.element.select('.mdc-card__action-icons')[0]
# #----------------------------------------------------------------------
# @classmethod
# def add_action_button(cls, element, element, mdc, *args, **kwargs):
# """"""
# ----------------------------------------------------------------------
@classmethod
def add_item(cls, element, *args, **kwargs):
""""""
item = __listItem__(*args, **kwargs)
cls.element <= item
return item
# ----------------------------------------------------------------------
@classmethod
def add_check_item(cls, element, *args, **kwargs):
""""""
item = __listChekItem__(*args, **kwargs)
cls.element <= item
return item
# ----------------------------------------------------------------------
@classmethod
def add_divider(cls, element, hr=False, inset=False):
""""""
if inset:
inset = 'mdc-list-divider--inset'
else:
inset = ''
if hr:
code = '<hr class="mdc-list-divider {inset}">'.format(inset=inset)
else:
code = '<li role="separator" class="mdc-list-divider {inset}"></li>'.format(
inset=inset)
code = cls.render_str(code)
cls.element <= code
| 2.109375 | 2 |
Session 1/Skeleton Files/exercise_2_with_functions_skeleton.py | Descent098/schulich-ignite-winter-2021 | 1 | 12769891 | """Exercise 2 skeleton: Create a square that bounces when it reaches any boundary of the canvas
Remember to fill out all the TODO's, you can quickly scan for them by pressing CTRL/CMD + F
"""
import sys
import os
import pygame
"""
SETUP section - preparing everything before the main loop runs
"""
pygame.init()
screen_width, screen_height = 1000, 800
screen = pygame.display.set_mode((screen_width, screen_height))
clock = pygame.time.Clock()
FRAME_RATE = 40
BLACK = (0, 0, 0)
# Setup our variables.
RED = (255, 0, 0) # A constant with the color red as a tuple
rect = pygame.Rect(200, 100, 75, 75) # A rectangle object we can manipulate later
speed_x = 5 # The speed we are traveling in the x direction on each frame
speed_y = 5 # The speed we are traveling in the y direction on each frame
def check_collisions(): # TODO: add parameters
# TODO: Create function that checks for colisions (there's more than 1 way to do this)
... # TODO: remove this when you fill in the function body
return # TODO: Add return arguments
while True:
"""
EVENTS section - how the code reacts when users do things
"""
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
"""
UPDATE section - manipulate everything on the screen
"""
# TODO: Update rectangle x and y coordinates if you hit the sides/top/bottom
speed_x, speed_y = check_collisions() # TODO: add arguments
"""
DRAW section - make everything show up on screen
"""
screen.fill(BLACK) # Fill the screen with one colour
pygame.draw.rect(screen, RED, rect) # Draw the rectangle
#### DRAW THINGS BEFORE THIS ####
pygame.display.flip() # Pygame uses a double-buffer, without this we see half-completed frames
clock.tick(FRAME_RATE) # Pause the clock to maintain 40 frames per second | 4.09375 | 4 |
vecLib/validator.py | chenjl0710/arcpyTools | 1 | 12769892 | <gh_stars>1-10
import arcpy
import os
import sys
class ToolValidator(object):
"""Class for validating a tool's parameter values and controlling
the behavior of the tool's dialog."""
def __init__(self):
"""Setup arcpy and the list of tool parameters."""
self.params = arcpy.GetParameterInfo()
self.current_path = sys.argv[0]
self.sdefile = os.path.join(self.current_path, "vector.sde")
self.project = os.path.join(self.sdefile, 'SDE.PROJECT')
self.fields = ['PRODUCT_TY', 'LOCATION', 'PRJ_ID', 'PRO_YEAR', 'RESOLUTION', 'PRJ_NAME']
def initializeParameters(self):
"""Refine the properties of a tool's parameters. This method is
called when the tool is opened."""
cur = arcpy.da.SearchCursor(self.project, self.fields)
self.prj_list = []
for row in cur:
self.prj_id_name = row[2] + "--" + row[5]
self.prj_list.append(self.prj_id_name)
self.params[0].filter.list = self.prj_list
return
def updateParameters(self):
"""Modify the values and properties of parameters before internal
validation is performed. This method is called whenever a parameter
has been changed."""
return
def updateMessages(self):
"""Modify the messages created by internal validation for each tool
parameter. This method is called after internal validation."""
return
| 2.78125 | 3 |
tests/integration/test_rename.py | ytzhangFTD/organize | 1 | 12769893 | import fs
from conftest import rules_shortcut, make_files, read_files
from organize import core
def test_rename_issue52():
# test for issue https://github.com/tfeldmann/organize/issues/51
files = {
"files": {
"19asd_WF_test2.PDF": "",
"other.pdf": "",
"18asd_WFX_test2.pdf": "",
}
}
with fs.open_fs("temp://") as mem:
make_files(mem, files)
config = rules_shortcut(
mem,
filters="""
- extension
- name:
startswith: "19"
contains:
- "_WF_"
""",
actions=[
{"rename": "{path.stem}_unread.{extension.lower()}"},
{"copy": {"dest": "files/copy/", "filesystem": mem}},
],
)
core.run(config, simulate=False)
mem.tree()
result = read_files(mem)
assert result == {
"files": {
"copy": {
"19asd_WF_test2_unread.pdf": "",
},
"19asd_WF_test2_unread.pdf": "",
"other.pdf": "",
"18asd_WFX_test2.pdf": "",
}
}
| 2.21875 | 2 |
elit/datasets/cws/multi_criteria_cws/mcws_dataset.py | emorynlp/levi-graph-amr-parser | 9 | 12769894 | <filename>elit/datasets/cws/multi_criteria_cws/mcws_dataset.py
# -*- coding:utf-8 -*-
# Author: hankcs
# Date: 2020-10-21 19:11
import os
from typing import Union, List, Callable, Dict, Iterable
from elit.datasets.tokenization.txt import TextTokenizingDataset
from elit.utils.io_util import get_resource
class MultiCriteriaTextTokenizingDataset(TextTokenizingDataset):
def __init__(self,
data: Union[str, List],
transform: Union[Callable, List] = None,
cache=None,
generate_idx=None,
delimiter=None,
max_seq_len=None,
sent_delimiter=None,
char_level=False,
hard_constraint=False) -> None:
super().__init__(data, transform, cache, generate_idx, delimiter, max_seq_len, sent_delimiter, char_level,
hard_constraint)
def should_load_file(self, data) -> bool:
return isinstance(data, (tuple, dict))
def load_file(self, filepath: Union[Iterable[str], Dict[str, str]]):
"""Load multi-criteria corpora specified in filepath.
Args:
filepath: A list of files where filename is its criterion. Or a dict of filename-criterion pairs.
.. highlight:: bash
.. code-block:: bash
$ tree -L 2 .
.
├── cnc
│ ├── dev.txt
│ ├── test.txt
│ ├── train-all.txt
│ └── train.txt
├── ctb
│ ├── dev.txt
│ ├── test.txt
│ ├── train-all.txt
│ └── train.txt
├── sxu
│ ├── dev.txt
│ ├── test.txt
│ ├── train-all.txt
│ └── train.txt
├── udc
│ ├── dev.txt
│ ├── test.txt
│ ├── train-all.txt
│ └── train.txt
├── wtb
│ ├── dev.txt
│ ├── test.txt
│ ├── train-all.txt
│ └── train.txt
└── zx
├── dev.txt
├── test.txt
├── train-all.txt
└── train.txt
$ head -n 2 ctb/dev.txt
上海 浦东 开发 与 法制 建设 同步
新华社 上海 二月 十日 电 ( 记者 谢金虎 、 张持坚 )
"""
for eachpath in (filepath.items() if isinstance(filepath, dict) else filepath):
if isinstance(eachpath, tuple):
criteria, eachpath = eachpath
eachpath = get_resource(eachpath)
else:
eachpath = get_resource(eachpath)
criteria = os.path.basename(os.path.dirname(eachpath))
for sample in super().load_file(eachpath):
sample['criteria'] = criteria
yield sample
def append_criteria_token(sample: dict, criteria_tokens: Dict[str, int], criteria_token_map: dict) -> dict:
criteria = sample['criteria']
token = criteria_token_map.get(criteria, None)
if not token:
unused_tokens = list(criteria_tokens.keys())
size = len(criteria_token_map)
assert size + 1 < len(unused_tokens), f'No unused token available for criteria {criteria}. ' \
f'Current criteria_token_map = {criteria_token_map}'
token = criteria_token_map[criteria] = unused_tokens[size]
sample['token_token_type_ids'] = [0] * len(sample['token_input_ids']) + [1]
sample['token_input_ids'] = sample['token_input_ids'] + [criteria_tokens[token]]
return sample
| 2.28125 | 2 |
sample_data/pytest_examples/calculator_example.py | AyTanase/ILAS- | 0 | 12769895 | def test_calculator_add_returns_correct_result():
result = calc_add(2,2)
assert result == 4
# def calc_add(x,y):
# pass
# return x+y
# if isinstance(x, number_types) and isinstance(y, number_types):
# return x + y
# else:
# raise ValueError("Non-numeric input given")
# def test_calculator_returns_error_message_if_both_args_not_numbers():
# try:
# calc_add("two", "three")
# except ValueError:
# print("Exception caught")
# assert True, "Fail: ValueError exception not caught"
# except:
# assert False, "Fail: Exception other than ValueError caught"
# else:
# assert False, "Fail: No exception caught"
# def test_calculator_returns_error_message_if_both_args_not_numbers():
# with pytest.raises(ValueError):
# calc_add("two", "three")
# def test_calculator_returns_error_message_if_x_arg_not_number():
# with pytest.raises(ValueError):
# calc_add("two", 3)
# def test_calculator_returns_error_message_if_y_arg_not_number( ):
# with pytest.raises(ValueError):
# calc_add(2, "three") | 3.328125 | 3 |
python/2020/day_six_part1.py | caw13/adventofcode | 0 | 12769896 | <reponame>caw13/adventofcode
def getGroupCount(line):
#print("Line "+line)
curSet = set(())
for curChar in line:
curSet.add(curChar)
#print("Set: "+str(curSet))
return len(curSet)
filename = "inputs\\2020\\input-day6.txt"
with open(filename) as f:
lines = f.readlines()
group = ""
sum = 0
for line in lines:
if len(line.strip()) == 0:
sum += getGroupCount(group.strip())
group = ""
else:
group += line.strip()
sum += getGroupCount(group.strip())
print("Sum: "+str(sum)) | 3.515625 | 4 |
Code/CarLoanCalculator.py | zachlim98/carloancalc | 0 | 12769897 | import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import pandas as pd
app = dash.Dash(__name__)
app.layout = html.Div([
html.H2("True Cost of Car (with Interest and Monthly Expenses)", style={'text-align':'center'}),
html.Hr(),
html.P("This calculator allows you to estimate the true cost of your car (over 10 years), taking into account loan downpayment, loan term, interest rates, and estimated monthly expenses (on fuel, parking etc.).", style={'text-align':'center'}),
html.Div([
dcc.Input(
id='carprice',
min=50000,
value='',
placeholder="Retail Price",
type="number",
style={'text-align':'center'}
),
dcc.Input(
id='monthexp',
min=500,
value='',
placeholder="Monthly Expenses",
type="number",
style={'text-align':'center'}
)], style=dict(display='flex', justifyContent='center')),
html.Div([
dcc.Input(
id='intrate',
min=0.01,
value='',
placeholder="Interest Rates (%)",
type="number",
style={'text-align':'center'}
)], style=dict(display='flex', justifyContent='center')),
html.Hr(),
dcc.Graph(id='graph-car-price')
])
@app.callback(
Output('graph-car-price', 'figure'),
[Input('carprice', 'value'),
Input('monthexp','value'),
Input('intrate','value'),
])
def update_figure(carprice, monthexp, intrate):
downpayment_list = [i for i in range(int(carprice*0.3),int(carprice),200)]
# create dataframe
car_loan_df = pd.DataFrame({"Downpayment" : downpayment_list
})
# add total cost of car to dataframe
for z in range(1,8):
car_loan_df["{} Year".format(z)] = [(((intrate/100)*z*(carprice - downpayment_list[i])+(carprice - downpayment_list[i])))+downpayment_list[i]+monthexp for i in range(0,len(downpayment_list))]
# melt for easier plotting
car_melt = pd.melt(car_loan_df, id_vars="Downpayment")
fig = px.line(car_melt,x="Downpayment",y="value",color="variable",labels={
"Downpayment": "Initial Downpayment",
"value": "Total Cost of Car",
"variable": "Loan Term"
}, color_discrete_sequence=px.colors.qualitative.Bold)
fig.update_layout({"plot_bgcolor":"white"})
fig.update_xaxes(showgrid=True, gridwidth=1, gridcolor='lightgrey')
fig.update_yaxes(showgrid=True, gridwidth=1, gridcolor='lightgrey')
fig.update_layout(transition_duration=500)
return fig
if __name__ == '__main__':
app.run_server(debug=False) | 3.078125 | 3 |
02.Regression/04.Support-Vector-Regression/support_vector_regression.py | HassanRahim26/Machine-Learning | 0 | 12769898 | <gh_stars>0
# Support Vector Regression (SVR)
# Importing the libraries
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# Importing the dataset
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:-1].values
y = dataset.iloc[:, -1].values
print(X)
"""
[[ 1]
[ 2]
[ 3]
[ 4]
[ 5]
[ 6]
[ 7]
[ 8]
[ 9]
[10]]
"""
print(y)
"""
[ 45000 50000 60000 80000 110000 150000 200000 300000 500000
1000000]
"""
y = y.reshape((len(y), 1))
# We've to reshape our dependent variable vector y because we've to scale it.
print(y)
"""
[[ 45000]
[ 50000]
[ 60000]
[ 80000]
[ 110000]
[ 150000]
[ 200000]
[ 300000]
[ 500000]
[1000000]]
"""
"""
# Splitting the dataset into the Training set and Test set
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.2, random_state = 0)
"""
# Feature Scaling
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
sc_y = StandardScaler()
X = sc_X.fit_transform(X)
y = sc_y.fit_transform(y)
print(X)
"""
[[-1.5666989 ]
[-1.21854359]
[-0.87038828]
[-0.52223297]
[-0.17407766]
[ 0.17407766]
[ 0.52223297]
[ 0.87038828]
[ 1.21854359]
[ 1.5666989 ]]
"""
print(y)
"""
[[-0.72004253]
[-0.70243757]
[-0.66722767]
[-0.59680786]
[-0.49117815]
[-0.35033854]
[-0.17428902]
[ 0.17781001]
[ 0.88200808]
[ 2.64250325]]
"""
# Training the SVR model on the whole dataset
from sklearn.svm import SVR
regressor = SVR(kernel = 'rbf')
## Radial Basis Kernel(RBF) is a kernel function that is used in machine learning to find a non-linear classifier or regression line.
regressor.fit(X, y.ravel())
## SVR()
# Predicting a new result
sc_y.inverse_transform(regressor.predict(sc_X.transform([[6.5]])).reshape(-1, 1))
## array([[170370.0204065]])
"""
np.set_printoptions(precision=2)
print(np.concatenate((y_pred.reshape(len(y_pred),1), y_test.reshape(len(y_test),1)),1))
# Evaluating the Model Performance
from sklearn.metrics import r2_score
r2_score(y_test, y_pred)
"""
# Visualising the SVR results
plt.scatter(sc_X.inverse_transform(X), sc_y.inverse_transform(y), color = 'red')
plt.plot(sc_X.inverse_transform(X), sc_y.inverse_transform(regressor.predict(X).reshape(-1, 1)), color = 'blue')
plt.title('Truth or Bluff(Support Vector Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show()
# Visualising the SVR results (for higher resolution and smoother curve)
X_grid = np.arange(min(sc_X.inverse_transform(X)), max(sc_X.inverse_transform(X)), 0.1)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(sc_X.inverse_transform(X), sc_y.inverse_transform(y), color = 'red')
plt.plot(X_grid, sc_y.inverse_transform(regressor.predict(sc_X.transform(X_grid)).reshape(-1, 1)), color = 'blue')
plt.title('Truth or Bluff(Support Vector Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show()
| 3.109375 | 3 |
testsuite/tests/apicast/auth/rhsso/test_oidc_rhsso_jwt_client_id.py | dlaso99/3scale-tests | 5 | 12769899 | <gh_stars>1-10
"""
Rewrite of the spec/functional_specs/auth/rhsso/oidc_rhsso_jwt_client_id_spec.rb
and spec/functional_specs/auth/rhsso/oidc_rhsso_jwt_client_id_reject_spec.rb
this two specs ware merged in to one parametrized test
"""
import pytest
from threescale_api.resources import Service
@pytest.fixture(params=["authorization", "query", "headers"])
def credentials_location(request):
"""Holds parametrized information where are credentials located"""
return request.param
@pytest.fixture(params=[("azp", 200), ("foo", 403)], ids=("valid-claim", "invalid-claim"))
def jwt_claim(request):
"""
Holds parametrized information about client_ids and response codes they
should return
"""
return request.param
# an issue seems to be in pytest, rhsso_setup(autouse) isn't applied here,
# therefore explicit dependency required.
# (is it because of parametrisation or some function scoped fixtures?)
# pylint: disable=unused-argument
def test_auth_client_id(rhsso_setup, api_client, service, credentials_location, jwt_claim):
"""
Test client access when service is configured with valid jwt
Then request made with appropriate Basic auth made has to pass as expected
"""
claim, status_code = jwt_claim
service.proxy.update(params={
"credentials_location": credentials_location,
"jwt_claim_with_client_id_type": "liquid",
"jwt_claim_with_client_id": "{{ %s }}" % claim})
service.proxy.deploy()
assert service["backend_version"] == Service.AUTH_OIDC
response = api_client().get("/get")
assert response.status_code == status_code
| 1.703125 | 2 |
raipy/Example.py | threemeninaboat3247/raipy | 0 | 12769900 | <reponame>threemeninaboat3247/raipy
# -*- coding: utf-8 -*-
"""
Created on Sat May 13 21:48:22 2017
@author: Yuki
"""
from PyQt5.QtWidgets import QVBoxLayout,QWidget,QHBoxLayout,QTabWidget,QStatusBar,QTextEdit,QApplication,QPushButton,QMenu,QAction
from PyQt5.QtCore import pyqtSignal
EXAMPLE='Examples' #the folder exists in the same folder with __init__.py and contains samples
class MyAction(QAction):
actionName=pyqtSignal(str)
def __init__(self,*args):
super().__init__(*args)
self.triggered.connect(self.myEmit)
def myEmit(self):
self.actionName.emit(self.text())
class ExampleMenu(QMenu):
'''show example programs in Example folder'''
def __init__(self,*args):
super().__init__(*args)
import raipy
import os
folder=os.path.dirname(os.path.abspath(raipy.__file__))+'\\'+EXAMPLE
files=os.listdir(folder)
pyfiles=[file for file in files if ('.py' in file)]
self.setList(pyfiles)
def setList(self,files):
#append file names to itself and connect signals
for file in files:
action=MyAction(file,self)
self.addAction(action)
action.actionName.connect(self.showExample)
def setFileManager(self,manager):
self.manager=manager
def showExample(self,file):
self.example=ExampleWidget(file,self.manager)
self.example.show()
class ExampleWidget(QWidget):
load=pyqtSignal(str)
export=pyqtSignal(str)
def __init__(self,file,manager):
super().__init__()
exe=QPushButton('Load')
export=QPushButton('Export')
exe.pressed.connect(self.emitLoad)
export.pressed.connect(self.emitExport)
self.load.connect(manager.importFile)
self.export.connect(manager.copyFile)
buttons=QHBoxLayout()
buttons.addWidget(exe)
buttons.addWidget(export)
buttons.addStretch(1)
self.text=QTextEdit()
vbox=QVBoxLayout()
vbox.addLayout(buttons)
vbox.addWidget(self.text)
self.setLayout(vbox)
self.setText(file)
self.text.setReadOnly(True)
self.setGeometry(500, 60, 960,900)
def setText(self,file):
#show a file in EXAMPLE folder
import raipy
import os
folder=os.path.dirname(os.path.abspath(raipy.__file__))+'\\'+EXAMPLE
self.path=folder+'\\'+file
import codecs
f=codecs.open(self.path,'r','utf-8')
text=f.read()
self.text.setText(text)
def emitLoad(self):
self.load.emit(self.path)
def emitExport(self):
self.export.emit(self.path)
#メイン
if __name__ == '__main__':
import sys
app = QApplication(sys.argv)
ex=ExampleWidget()
ex.show()
import raipy
import os
root=os.path.dirname(os.path.abspath(raipy.__file__))
path=root+'\\Examples\\Demo.py'
print(path)
ex.setText(root+'\\Examples\\Demo.py')
ll=ExampleList()
print(ll.getExamples())
sys.exit(app.exec_()) | 2.390625 | 2 |
django_mfa/middleware.py | juwaini/django-mfa | 0 | 12769901 | from django.urls import reverse
from django.shortcuts import resolve_url
from django.contrib.auth import REDIRECT_FIELD_NAME as redirect_field_name
from .models import is_mfa_enabled, is_u2f_enabled
from .views import verify_rmb_cookie
try:
from django.utils.deprecation import MiddlewareMixin
except ImportError: # Django < 1.10
# Works perfectly for everyone using MIDDLEWARE_CLASSES
MiddlewareMixin = object
class MfaMiddleware(MiddlewareMixin):
def process_request(self, request):
if request.user.is_authenticated and ((not verify_rmb_cookie(request) and is_mfa_enabled(request.user)) or (is_u2f_enabled(request.user))):
if not request.session.get('verfied_otp') and not request.session.get('verfied_u2f'):
current_path = request.path
paths = [reverse("mfa:verify_second_factor"), reverse(
"mfa:verify_second_factor_u2f"), reverse("mfa:verify_second_factor_totp")]
if current_path not in paths:
path = request.get_full_path()
resolved_login_url = resolve_url(
reverse("mfa:verify_second_factor"))
from django.contrib.auth.views import redirect_to_login
return redirect_to_login(path, resolved_login_url, redirect_field_name)
return None
| 1.984375 | 2 |
src/zope/mimetype/__init__.py | zopefoundation/zope.mimetype | 1 | 12769902 | from zope.mimetype import mtypes
types = mtypes # alternate spelling/backwards (1.3) compatible export
| 1.0625 | 1 |
pypd/tests/testutils.py | andyr0id/PyPD | 0 | 12769903 | <gh_stars>0
import os
def getPatchesDir():
d = os.path.dirname(os.path.realpath(__file__))
return d[:d.index('pypd/pypd/')] + 'pypd/patches/' | 2.03125 | 2 |
neuralnetwork/Backpropagation.py | Anselmoo/python-neuralnetwork | 0 | 12769904 | <filename>neuralnetwork/Backpropagation.py
import logging
import numpy as np
class Backpropagation:
"""Backpropagation
The Backpropagation class calculates the minimum value of the error function in relation to the training-set and the activation function.
The technique for achieving this goal is called the delta rule or gradient descent.
"""
nodeDeltas = np.array([])
gradients = np.array([])
biasGradients = np.array([])
learningRate = np.array([])
eta = np.array([])
weightUpdates = np.array([])
biasWeightUpdates = np.array([])
minimumError = ""
maxNumEpochs = ""
numEpochs = ""
network = np.array([])
delta = np.float64
networkLayers = []
error = 0.
def __init__(
self, network, learningRate, eta, minimumError=0.005, maxNumEpochs=2000
):
"""
__init__ [summary]
[extended_summary]
Parameters
----------
network : class
class of FeedForward-Routine
learningRate : float
Learning rate of the MLP
eta : float
Error correction factor
minimumError : float, optional
Minimal error to stop the training, by default 0.005
maxNumEpochs : int, optional
Maxinum numbers of epochs before stopping the training, by default 2000
"""
self.network = network
self.learningRate = learningRate
self.eta = eta
self.minimumError = minimumError
self.maxNumEpochs = maxNumEpochs
self.initialise()
def initialise(self):
"""initialise MLP.
The intiale procedure includes:
1. network
2. node deltas
3. gradients of values
4. gradients of bias
5. Update matrices for:
a. weight of values
b. weight of bias
c. gradients of values
d. gradients of bias
"""
self.network.initialise()
self.nodeDeltas = np.array([])
self.gradients = np.array([])
self.biasGradients = np.array([])
self.totalNumNodes = self.network.getTotalNumNodes()
self.dtype = self.network.getDtype()
self.networkLayers = self.network.getNetworkLayers()
# initiale the weight, bias, and gradients matrices
self.weightUpdates = np.zeros(
(self.totalNumNodes, self.totalNumNodes), dtype=self.dtype
)
self.biasWeightUpdates = np.zeros(
(self.totalNumNodes, self.totalNumNodes), dtype=self.dtype
)
self.gradients = np.zeros(
(self.totalNumNodes, self.totalNumNodes), dtype=self.dtype
)
self.biasGradients = np.zeros(
(self.totalNumNodes, self.totalNumNodes), dtype=self.dtype
)
self.initialiseValues()
def initialiseValues(self):
"""
initialiseValues inital the values array
"""
self.nodeDeltas = np.zeros(self.totalNumNodes, dtype=self.dtype)
def train(self, trainingSets, rprint=False):
"""train the mlp-network.
Training of the mlp-network for a given `trainingSets` for maximum number of epchos.
Parameters
----------
trainingSets : array
The training set is provided as float-array where X- and y-values are keeped together.
rprint : bool, optional
print the current progress with global error, by default False
Returns
-------
: bool
Return a bool for indicating successful (True) or failed (False) learning.
"""
self.numEpochs = 1
if rprint:
logging.basicConfig(level=logging.INFO)
# Have to change to a for-if slope
while True:
if self.numEpochs > self.maxNumEpochs:
return False
sumNetworkError = 0
for i in range(len(trainingSets)):
# Switching to FeedForworad.py
self.network.activate(trainingSets[i])
outputs = self.network.getOutputs()
# Come back to Backpropagation.py
self.calculateNodeDeltas(trainingSets[i])
self.calculateGradients()
self.calculateWeightUpdates()
self.applyWeightChanges()
sumNetworkError += self.calculateNetworkError(trainingSets[i])
globalError = sumNetworkError / len(trainingSets)
logging.info("--------------------------------")
logging.info("Num Epochs: {}".format(self.numEpochs))
logging.info("Global Error: {}".format(globalError))
self.error = globalError
self.numEpochs = self.numEpochs + 1
if globalError < self.minimumError:
break
return True
def calculateNodeDeltas(self, trainingSet):
"""calculateNodeDeltas, error of each node.
Parameters
----------
trainingSets : array
The training set is provided as float-array where X- and y-values are keeped together.
"""
idealOutputs = trainingSet[
-1 * self.networkLayers[len(self.networkLayers) - 1]["num_nodes"]
]
# Initial phase
actl_node = [
self.networkLayers[len(self.networkLayers) - 1]["start_node"],
self.networkLayers[len(self.networkLayers) - 1]["end_node"] + 1,
]
activation = self.network.getActivation()
error = self.network.values[actl_node[0] : actl_node[1]] - idealOutputs
self.nodeDeltas[actl_node[0] : actl_node[1]] = np.multiply(
-error,
activation.getDerivative(self.network.net[actl_node[0] : actl_node[1]]),
dtype=self.dtype,
)
for k in range(len(self.networkLayers) - 2, 0, -1):
actl_node = [
self.networkLayers[k]["start_node"],
self.networkLayers[k]["end_node"] + 1,
]
connectNode = len(self.network.getWeight())
# Calculating the node deltas
self.nodeDeltas[actl_node[0] : actl_node[1]] = np.multiply(
np.dot(
self.network.weights[actl_node[0] : actl_node[1]],
self.nodeDeltas[:connectNode],
),
activation.getDerivative(self.network.net[actl_node[0] : actl_node[1]]),
dtype=self.dtype,
)
def calculateGradients(self):
"""calculateGradients, gradient of each value and bias.
"""
for num, layer in enumerate(self.networkLayers[:-1]):
prev_index = [layer["start_node"], layer["end_node"] + 1]
# similiar to i
actl_index = [
self.networkLayers[num + 1]["start_node"],
self.networkLayers[num + 1]["end_node"] + 1,
] # similiar to j
# Value-Gradient
self.gradients[
prev_index[0] : prev_index[1], actl_index[0] : actl_index[1]
] = np.outer(
self.network.values[prev_index[0] : prev_index[1]],
self.nodeDeltas[actl_index[0] : actl_index[1]],
# dtype=self.dtype,
)
# Bias-Gradient
self.biasGradients[num, actl_index[0] : actl_index[1]] = self.nodeDeltas[
actl_index[0] : actl_index[1]
]
def calculateWeightUpdates(self):
"""calculateWeightUpdates of the 'new' weights and bias-weights.
"""
for num, layer in enumerate(self.networkLayers[:-1]):
prev_index = [layer["start_node"], layer["end_node"] + 1]
# similiar to i
actl_index = [
self.networkLayers[num + 1]["start_node"],
self.networkLayers[num + 1]["end_node"] + 1,
] # similiar to j
# Updating the weights
self.weightUpdates[
prev_index[0] : prev_index[1], actl_index[0] : actl_index[1]
] = np.add(
np.multiply(
self.learningRate,
self.gradients[
prev_index[0] : prev_index[1], actl_index[0] : actl_index[1]
],
dtype=self.dtype,
),
np.multiply(
self.eta,
self.weightUpdates[
prev_index[0] : prev_index[1], actl_index[0] : actl_index[1]
],
dtype=self.dtype,
),
dtype=self.dtype,
)
# Updating the bias-weights
self.biasWeightUpdates[num, actl_index[0] : actl_index[1]] = np.add(
np.multiply(
self.learningRate,
self.biasGradients[num, actl_index[0] : actl_index[1]],
dtype=self.dtype,
),
np.multiply(
self.eta,
self.biasWeightUpdates[num, actl_index[0] : actl_index[1]],
dtype=self.dtype,
),
dtype=self.dtype,
)
def applyWeightChanges(self):
"""applyWeightChanges of the gradient correction to the layers.
"""
for num, layer in enumerate(self.networkLayers[:-1]):
prev_index = [layer["start_node"], layer["end_node"] + 1]
# similiar to i
actl_index = [
self.networkLayers[num + 1]["start_node"],
self.networkLayers[num + 1]["end_node"] + 1,
] # similiar to j
self.network.updateWeight(
prev_index,
actl_index,
self.weightUpdates[
prev_index[0] : prev_index[1], actl_index[0] : actl_index[1]
],
)
self.network.updateBiasWeight(
num,
actl_index,
self.biasWeightUpdates[num, actl_index[0] : actl_index[1]],
)
def calculateNetworkError(self, trainingSet):
"""calculateNetworkError based on the the mean squared error.
calculateNetworkError is using the mean squared error (MSE) for measuring the average of the squares of the errors.
In this context, the average squared difference between the predicted values and the real values (training set).
Parameters
----------
trainingSet : array
The training-set with X,y for validation of the optimization-cycle
Returns
-------
globalError : float
Global Error as a non-negative floating point value (the best value is 0.0); defined as MSE
"""
idealOutputs = trainingSet[
-1 * self.networkLayers[len(self.networkLayers) - 1]["num_nodes"]
]
startNode = self.networkLayers[len(self.networkLayers) - 1]["start_node"]
endNode = self.networkLayers[len(self.networkLayers) - 1]["end_node"]
numNodes = self.networkLayers[len(self.networkLayers) - 1]["num_nodes"]
globalError = np.mean(
np.square(
np.subtract(
idealOutputs,
self.network.values[startNode : endNode + 1],
dtype=self.dtype,
),
dtype=self.dtype,
),
dtype=self.dtype,
)
return globalError
def getGlobalError(self):
"""
getGlobalError [summary]
Returns
-------
error : float
MSE-based global error
"""
return self.error
| 4.0625 | 4 |
dependencies/ui/tab3/resume_folder_viewer.py | statisticalbiotechnology/quandenser-pipeline | 8 | 12769905 | <filename>dependencies/ui/tab3/resume_folder_viewer.py
import sys
from PySide2.QtWidgets import QLineEdit
from PySide2.QtGui import QColor
import os
from custom_config_parser import custom_config_parser
class resume_folder_viewer(QLineEdit):
def __init__(self, nf_settings_path, id=0):
super(resume_folder_viewer,self).__init__(parent = None)
self.type = type
self.id = id
self.nf_settings_parser = custom_config_parser()
self.nf_settings_parser.load(nf_settings_path)
self.textChanged.connect(self.check_text)
def check_text(self):
self.blockSignals(True)
all_txt = self.text() # Copy, dont use
self.clear()
self.pick_color(all_txt)
self.blockSignals(False)
def pick_color(self, txt):
if os.path.isdir(txt):
self.setStyleSheet("color: rgb(0, 255, 150);") # Check if path
self.setText(txt)
self.nf_settings_parser.write('params.resume_directory', self.text())
else:
self.setStyleSheet("color: red;") # Check if path
self.setText(txt)
self.nf_settings_parser.write('params.resume_directory', '')
def default(self):
txt = self.nf_settings_parser.get('params.resume_directory')
self.setText(txt)
| 2.46875 | 2 |
envelopes/utils.py | audiolion/Envelopes-api | 0 | 12769906 | # Third Party Library Imports
from django.conf import settings
from hashids import Hashids
hashid = Hashids(
min_length=8,
salt=getattr(settings, 'HASHIDS_SALT', ''),
alphabet='0123456789ACDEFGHIJKLOQRSTUVWXYZ',
)
def encode(num):
return hashid.encode(num)
| 1.921875 | 2 |
6KYU/up_array.py | yaznasivasai/python_codewars | 4 | 12769907 | <filename>6KYU/up_array.py
def up_array(arr):
if not arr:
return None
else:
string = ''
for item in arr:
if not str(item).isdigit() or item > 9:
return None
else:
string += str(item)
total = []
for char in str(int(string)+1):
total.append(int(char))
return total | 3.46875 | 3 |
apps/amcm/migrations/0003_auto_20211024_0256.py | agsneutron/asociacion_mexicana_cuarto_milla | 0 | 12769908 | # Generated by Django 3.2.8 on 2021-10-24 02:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('amcm', '0002_auto_20211024_0134'),
]
operations = [
migrations.AlterModelOptions(
name='tipofecha',
options={'verbose_name': 'Tipo de Fecha del Evento', 'verbose_name_plural': 'Tipos de Fechas del Evento'},
),
migrations.AddField(
model_name='cuotaevento',
name='evento',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.CASCADE, to='amcm.evento'),
preserve_default=False,
),
migrations.AlterField(
model_name='fechasevento',
name='fecha',
field=models.DateField(verbose_name='Fecha de Vencimiento'),
),
]
| 1.460938 | 1 |
__init__.py | nujabes0321456/Handphone-Ecm-Simulation | 0 | 12769909 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 12 10:39:07 2020
@author: BK
"""
| 0.992188 | 1 |
deep_rl/double_dqn_agent.py | JonasVervloet/RL-Coverage-Planner | 12 | 12769910 | <reponame>JonasVervloet/RL-Coverage-Planner
import torch
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from collections import deque
import random
import math
from deep_rl.trainer import Transition as Transition
class DoubleDeepQAgent:
QUEUE_LENGTH = 5000
GAMMA = 0.9
EPSILON_START = 0.9
EPSILON_END = 0.05
EPSILON_DECAY = 2000
TARGET_UPDATE = 1000
BATCH_SIZE = 32
def __init__(self, network_generator, optim_class, nb_actions):
self.policy_net = network_generator.generate_network()
self.target_net = network_generator.generate_network()
self.nb_actions = nb_actions
self.replay_buffer = deque(maxlen=DoubleDeepQAgent.QUEUE_LENGTH)
self.epsilon = DoubleDeepQAgent.EPSILON_START
self.optimizer = optim_class(self.policy_net.parameters())
self.batch_size = DoubleDeepQAgent.BATCH_SIZE
self.step_counter = 1
self.target_net.load_state_dict(self.policy_net.state_dict())
self.target_net.eval()
def select_action(self, state_tensor, soft_max=False):
if not soft_max:
if random.random() > self.epsilon:
with torch.no_grad():
state_eval = self.policy_net(torch.unsqueeze(state_tensor, 0))[0]
return torch.argmax(state_eval).item()
return random.randrange(self.nb_actions)
with torch.no_grad():
state_eval = self.policy_net(torch.unsqueeze(state_tensor, 0))[0]
probs = F.softmax(state_eval, dim=0)
distribution = Categorical(probs=probs)
return distribution.sample().item()
def update_epsilon(self, episode_nb):
diff = DoubleDeepQAgent.EPSILON_START - DoubleDeepQAgent.EPSILON_END
self.epsilon = DoubleDeepQAgent.EPSILON_END + diff * math.exp(-1 * episode_nb / DoubleDeepQAgent.EPSILON_DECAY)
def save(self, path, episode_nb):
torch.save(self.policy_net.state_dict(), path + f"deep_q_agent_{episode_nb}.pt")
def load(self, path, episode_nb, device=None):
if device is not None:
self.policy_net.load_state_dict(torch.load(path + f"deep_q_agent_{episode_nb}.pt", map_location=device))
else:
self.policy_net.load_state_dict(torch.load(path + f"deep_q_agent_{episode_nb}.pt"))
self.target_net.load_state_dict(self.policy_net.state_dict())
def evaluate(self):
self.epsilon = 0.0
def observe_transition(self, transition, device):
self.replay_buffer.append(transition)
if len(self.replay_buffer) <= self.batch_size * 10:
return
transitions = random.sample(self.replay_buffer, self.batch_size)
mini_batch = Transition(*zip(*transitions))
state_batch = torch.stack(mini_batch.state)
action_batch = torch.stack(mini_batch.action)
action_batch = action_batch.unsqueeze(1)
reward_batch = torch.stack(mini_batch.reward)
next_state_batch = torch.stack(mini_batch.next_state)
non_final_mask = ~torch.stack(mini_batch.done)
state_action_values = self.policy_net(state_batch).gather(1, action_batch)
next_state_values = torch.zeros(self.batch_size, device=device)
next_state_actions = torch.argmax(self.policy_net(next_state_batch), dim=1)
next_state_values[non_final_mask] = self.target_net(next_state_batch)[
torch.arange(self.batch_size), next_state_actions
][non_final_mask]
expected_values = (next_state_values * DoubleDeepQAgent.GAMMA) + reward_batch
loss = F.smooth_l1_loss(state_action_values, expected_values.unsqueeze(1))
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
if self.step_counter % DoubleDeepQAgent.TARGET_UPDATE == 0:
self.target_net.load_state_dict(self.policy_net.state_dict())
self.step_counter += 1 | 2.171875 | 2 |
examples/autopipegen2/plugins/cpstuff.py | microns-ariadne/ariadne-pipeline-test-harness | 2 | 12769911 | <filename>examples/autopipegen2/plugins/cpstuff.py
# generated by shell2pipe on 2016-03-17
import os
import plugin
plugin_class='cpstuff'
class cpstuff(plugin.AriadneOp):
name='cpstuff'
def run(self, args):
os.system('cp stuff/contents stuff/otherstuff')
os.system('cp stuff/contents morestuff/')
| 2 | 2 |
tests/core/test_page_id_cache.py | uktrade/directory-cms | 6 | 12769912 | <reponame>uktrade/directory-cms
import pytest
from core.cache import PageIDCache
@pytest.mark.django_db
def test_get_populate_and_delete():
# the cache should be empty
assert PageIDCache.get() is None
# when the page is populated
result = PageIDCache.populate()
# then get returns something useful
assert PageIDCache.get() == result
# when the cache is cleared
PageIDCache.clear()
# then the cache is empty again
assert PageIDCache.get() is None
# but we can use populate_if_cold to trigger population
new_result = PageIDCache.get(populate_if_cold=True)
# and the result should look very much like what we had before
assert new_result == result
# let's clear it again
PageIDCache.clear()
| 2.171875 | 2 |
trains/utilities/wizard/user_input.py | AdrianoKF/trains | 1 | 12769913 | from typing import Optional
def get_input(
key, # type: str
description="", # type: str
question="Enter", # type: str
required=False, # type: bool
default=None, # type: Optional[str]
new_line=False, # type: bool
):
# type: (...) -> Optional[str]
if new_line:
print()
while True:
value = input("{} {} {}: ".format(question, key, description))
if not value.strip() and required:
print("{} is required".format(key))
elif not (value.strip() or required):
return default
else:
return value
def input_int(
key, # type: str
description="", # type: str
required=False, # type: bool
default=None, # type: Optional[int]
new_line=False, # type: bool
):
# type: (...) -> Optional[int]
while True:
try:
value = int(
get_input(
key,
description,
required=required,
default=default,
new_line=new_line,
)
)
return value
except ValueError:
print(
"Invalid input: {} should be a number. Please enter an integer".format(
key
)
)
def input_bool(question, default=False):
# type: (str, bool) -> bool
"""
:param question: string to display
:param default: default boolean value
:return: return True if response is 'y'/'yes' 't'/'true' in input.lower()
"""
while True:
try:
response = input("{}: ".format(question)).lower()
if not response:
return default
if response.startswith("y") or response.startswith("t"):
return True
if response.startswith("n") or response.startswith("f"):
return False
raise ValueError()
except ValueError:
print("Invalid input: please enter 'yes' or 'no'")
| 4.03125 | 4 |
examples/team-fullmetal/dockerComp/src/server/app/__init__.py | connectthefuture/docker-hacks | 0 | 12769914 | import os
import sys
from flask import Flask, render_template
from flask.ext.mongoengine import MongoEngine
app = Flask(__name__)
app.config.from_object('config')
app.config["MONGODB_SETTINGS"] = {
"DB": os.environ.get("U_DB"),
"USERNAME": os.environ.get("U_USER"),
"PASSWORD": os.environ.get("U_PASS"),
"HOST": "127.0.0.1",
"PORT": 27017 }
db = MongoEngine(app)
@app.errorhandler(404)
def not_found(error):
return render_template('404.html'), 404
| 2.421875 | 2 |
wavepy.py | gregbad/WavePy | 1 | 12769915 | <filename>wavepy.py
# WavePy is a Wave Optics Simulation for Atmospheric Optics Modeling
# Authors: <NAME>, <NAME>, <NAME>
# Michigan Technological University (c) 2016
# WavePy 0.1 Initial Release
# Contact: <EMAIL>
# Released under BSD attribution license please reference
# Maintained at www.github.com/jpbos/WavePy
# Developed under Python 2.7
import numpy as np
from math import pi, gamma, cos, sin
import matplotlib.pyplot as plt
class wavepy:
def __init__(self,simOption=0,N=256,SideLen=1.0,NumScr=10,DRx=0.1,dx=5e-3,
wvl=1e-6,PropDist=10e3,Cn2=1e-16,loon=1,aniso=1.0,Rdx=5e-3):
self.N = N # number of grid points per side
self.SideLen = SideLen # Length of one side of square phase secreen [m]
self.dx = dx # Sampling interval at source plane
self.Rdx = Rdx # Sampling interval at receiver plane
self.L0 = 1e3 # Turbulence outer scale [m]
self.l0 = 1e-3 # Turbulence inner scale [m]
self.NumScr = NumScr # Number of screens Turn into input variable
self.DRx = DRx # Diameter of aperture [m]
self.wvl = wvl # Wavelength [m]
self.PropDist = PropDist # Propagation distance(Path Length) [m]
self.Cn2 = Cn2
self.simOption = simOption # Simulation type (i.e. spherical, plane)
self.theta = 0 # Angle of anisotropy [deg]
self.aniso = aniso # Anisotropy magnitude
self.alpha = 22.0 # Power Law exponent 22 = 11/3 (Kolmogorov)
self.k = 2*pi / self.wvl # Optical wavenumber [rad/m]
self.NumSubHarmonics = 5 # Number of subharmonics
self.DTx = 0.1 # Transmitting aperture size for Gauss [m]
self.w0 = (np.exp(-1) * self.DTx)
# Include sub-harmonic compensation?
self.loon = loon
# Simulation output
self.Output = np.zeros((N,N))
# Place holders for geometry/source variables
self.Source = None
self.r1 = None
self.x1 = None
self.y1 = None
self.rR = None
self.xR = None
self.yR = None
self.Uout = None
x = np.linspace(-self.N/2, (self.N/2)-1, self.N) * self.dx
y = np.linspace(-self.N/2, (self.N/2)-1, self.N) * self.dx
self.x1, self.y1 = np.meshgrid(x, y)
self.r1 = np.sqrt(self.x1**2 + self.y1**2)
if simOption == 0:
# Plane Wave source (default)
self.Source = self.PlaneSource()
elif simOption == 1:
# Spherical Wave Source
self.Source = self.PointSource()
elif simOption == 2:
#Collimated Gaussian Source
self.Source = self.CollimatedGaussian()
elif simOption == 3:
#Flatte Point Source
self.Source = self.FlattePointSource()
x = np.linspace(-self.N/2, (self.N/2)-1, self.N) * self.Rdx
y = np.linspace(-self.N/2, (self.N/2)-1, self.N) * self.Rdx
self.xR, self.yR = np.meshgrid(x, y)
self.rR = np.sqrt(self.xR**2 + self.yR**2)
# Set Propagation Geometry / Screen placement
self.dzProps = np.ones(self.NumScr+2)*(self.PropDist/self.NumScr)
self.dzProps[0:2] = 0.5*(self.PropDist/self.NumScr)
self.dzProps[self.NumScr:self.NumScr+2] = 0.5*(self.PropDist/self.NumScr)
self.PropLocs = np.zeros(self.NumScr+3)
for zval in range(0,self.NumScr+2):
self.PropLocs[ zval+1 ] = self.PropLocs[zval]+self.dzProps[zval]
self.ScrnLoc = np.concatenate((self.PropLocs[1:self.NumScr],
np.array([self.PropLocs[self.NumScr+1]])),axis=0)
self.FracPropDist = self.PropLocs/self.PropDist
self.PropSampling = (self.Rdx - self.dx)*self.FracPropDist + self.dx
self.SamplingRatioBetweenScreen = \
self.PropSampling[1:len(self.PropSampling)] \
/self.PropSampling[0:len(self.PropSampling)-1]
# Set derived values
self.r0 = (0.423 * (self.k)**2 * self.Cn2 * self.PropDist)**(-3.0/5.0)
self.r0scrn = (0.423 * ((self.k)**2) * self.Cn2 * (self.PropDist/self.NumScr))**(-3.0/5.0)
self.log_ampl_var = 0.3075 * ((self.k)**2) * ((self.PropDist)**(11.0/6.0)) * self.Cn2
self.phase_var = 0.78*(self.Cn2)*(self.k**2)*self.PropDist*(self.L0**(-5.0/3.0))
self.rho_0 = (1.46 * self.Cn2 * self.k**2 * self.PropDist)**(-5.0/3.0)
self.rytovNum = np.sqrt(1.23 * self.Cn2 * (self.k**(7/6)) * (self.PropDist**(11/6)) )
self.rytovVar = self.rytovNum**2
def PlaneSource(self):
#Uniform plane wave
plane = np.ones([self.N,self.N])
return plane
def PointSource(self):
#Schmidt Point Source
DROI = 4.0 *self.DRx #Observation plane region [m]
D1 = self.wvl * self.PropDist / DROI #Central Lobe width [m]
R = self.PropDist #Radius of curvature at wavefront [m
temp = np.exp(-1j*self.k/(2*R) * (self.r1**2)) / (D1**2)
pt = temp * np.sinc((self.x1/D1)) * np.sinc((self.y1/D1)) * np.exp(-(self.r1/(4.0 * D1))**2)
return pt
def FlattePointSource(self):
fpt = np.exp(-(self.r1**2) / (10*( self.dx**2)) ) \
* np.cos(-(self.r1**2) / (10*(self.dx**2)) )
return fpt
def CollimatedGaussian(self):
source = np.exp(-(self.r1**2 / self.w0**2))
source = source * self.MakePupil(self.DTx)
#Source return
return source
def MakeSGB(self):
#Construction of Super Gaussian Boundary
rad = self.r1*(self.N);
w = 0.55*self.N
sg = np.exp(- ((rad / w)**16.0) )
return sg
def MakePupil(self,D_eval):
#Target pupil creation
boundary1 = -(self.SideLen / 2) #sets negative coord of sidelength
boundary2 = self.SideLen / 2 #sets positive coord of sidelength
A = np.linspace(boundary1, boundary2, self.N) #creates a series of numbers evenly spaced between
#positive and negative boundary
A = np.array([A] * self.N) #horizontal distance map created
base = np.linspace(boundary1, boundary2, self.N) #creates another linspace of numbers
set_ones = np.ones(self.N) #builds array of length N filled with ones
B = np.array([set_ones] * self.N)
for i in range(0, len(base)):
B[i] = B[i] * base[i] #vertical distance map created
A = A.reshape(self.N,self.N)
B = B.reshape(self.N,self.N) #arrays reshaped into matrices
x_coord = A**2
y_coord = B**2
rad_dist = np.sqrt(x_coord + y_coord) #now radial distance has been defined
mask = []
for row in rad_dist:
for val in row:
if val < D_eval:
mask.append(1.0)
elif val > D_eval:
mask.append(0.0)
elif val == D_eval:
mask.append(0.5)
mask = np.array([mask])
mask = mask.reshape(self.N,self.N) #mask created and reshaped into a matrix
return mask #returns the pupil mask as the whole function's output
def PhaseScreen(self):
#Generate phase screens
#potentially change generation to be 1 screen/1 km
b = self.aniso
c = 1.0
thetar = (pi/180.0)*self.theta
delta = self.dx #Spatial sampling rate
del_f = 1.0/(self.N * delta) #Frequency grid spacing(1/m)
cen = np.floor(self.N/2)
na = self.alpha/6.0 #Normalized alpha value
Bnum = gamma(na/2.0)
Bdenom = 2.0**(2.0-na)*pi*na*gamma(-na/2.0)
#c1 Striblings Consistency parameter. Evaluates to 6.88 in Kolmogorov turb.
cone = (2.0* (8.0/(na-2.0) *gamma(2.0/(na-2.0)))**((na-2.0)/2.0))
#Charnotskii/Bos generalized phase consistency parameter
Bfac = (2.0*pi)**(2.0-na) * (Bnum/Bdenom)
a = gamma(na-1.0)*cos(na*pi/2.0)/(4.0*pi**2.0)
# Toselli's inner-scale intertial range consistency parameter
c_a = (gamma(0.5*(5.0-na))*a*2.0*pi/3.0)**(1.0/(na-5.0))
fm = c_a/self.l0 # Inner scale frequency(1/m)
# Set up parameters for Kolmogorov PSD
nae = 22/6.0 #Normalized alpha value
Bnume = gamma(nae/2.0)
Bdenome = 2.0**(2.0-nae)*pi*nae*gamma(-nae/2.0)
conee = (2.0* (8.0/(nae-2.0) *gamma(2.0/(nae-2.0)))**((nae-2.0)/2.0))
Bface = (2.0*pi)**(2.0-nae) * (Bnume/Bdenome)
ae = gamma(nae-1.0)*cos(nae*pi/2.0)/(4.0*pi**2.0)
c_ae = (gamma(0.5*(5.0-nae))*ae*2.0*pi/3.0)**(1.0/(nae-5.0))
fme = c_ae/self.l0 # Inner scale frequency(1/m)
f0 = 1.0/self.L0 # Outer scale frequency
# Create frequency sample grid
fx = np.arange(-self.N/2.0, self.N/2.0) * del_f
fx, fy = np.meshgrid(fx,-1*fx)
# Apply affine transform
tx = fx*cos(thetar) + fy*sin(thetar)
ty = -1.0*fx*sin(thetar) + fy*cos(thetar)
# Scalar frequency grid
f = np.sqrt((tx**2.0)/(b**2.0) + (ty**2.0)/(c**2.0))
# Sample Turbulence PSD
PSD_phi = (cone * Bfac * ((b*c)**(-na/2.0)) * (self.r0scrn**(2.0-na)) * np.exp(-(f/fm)**2.0) \
/((f**2.0 + f0**2.0)**(na/2.0)))
tot_NOK = np.sum(PSD_phi)
# Kolmogorov equivalent and enforce isotropy
# Sample Turbulence PSD
PSD_phie = (conee * Bface * (self.r0scrn**(2.0-nae)) * np.exp(-(f/fme)**2.0) \
/((f**2.0 + f0**2.0)**(nae/2.0)))
tot_OK = np.sum(PSD_phie)
PSD_phi = (tot_OK/tot_NOK) * PSD_phi
#PSD_phi = cone*Bfac* (r0**(2-na)) * f**(-na/2) # Kolmogorov PSD
PSD_phi[np.int(cen),np.int(cen)]=0.0
# Create a random field that is circular complex Guassian
cn = (np.random.randn(self.N,self.N) + 1j*np.random.randn(self.N,self.N) )
# Filter by turbulence PSD
cn = cn * np.sqrt(PSD_phi)*del_f
# Inverse FFT
phz_temp = np.fft.ifft2(np.fft.fftshift(cn))*((self.N)**2)
# Phase screens
phz1 = np.real(phz_temp)
return phz1
def SubHarmonicComp(self,nsub):
#Sub-Harmonic Phase screen production
dq = 1/self.SideLen
na = self.alpha/6.0
Bnum = gamma(na/2.0)
Bdenom = (2**(2-na)) * pi * na * gamma(-na/2)
Bfac = (2*pi)**(2-na) * (Bnum/Bdenom)
# c1 Striblings Consistency parameter. Evaluates to 6.88 in Kolmogorov turb.
cone = (2* (8/(na-2) * gamma(2/(na-2)))**((na-2)/2))
#Anisotropy factors
b = self.aniso
c=1
f0 = 1/self.L0
lof_phz = np.zeros((self.N,self.N))
temp_m = np.linspace(-0.5,0.5,self.N)
m_indices, n_indices = np.meshgrid(temp_m, -1*np.transpose(temp_m))
temp_mp = np.linspace(-2.5,2.5,6)
m_prime_indices,n_prime_indices = np.meshgrid(temp_mp,-1*np.transpose(temp_mp))
for Np in range(1,nsub+1):
temp_phz = np.zeros((self.N,self.N))
#Subharmonic frequency
dqp = dq/(3.0**Np)
#Set samples
f_x = 3**(-Np)*m_prime_indices*dq
f_y = 3**(-Np)*n_prime_indices*dq
f = np.sqrt((f_x**2)/(b**2) + (f_y**2)/(c**2))
#Sample PSD
PSD_fi = cone*Bfac*((b*c)**(-na/2))*(self.r0scrn)**(2-na)*(f**2 + f0**2)**(-na/2)
#Generate normal circ complex RV
w = np.random.randn(6,6) + 1j*np.random.randn(6,6)
#Covariances
cv = w * np.sqrt(PSD_fi)*dqp
#Sum over subharmonic components
temp_shape = np.shape(cv)
for n in range(0, temp_shape[0]):
for m in range(0,temp_shape[1]):
indexMap = ( m_prime_indices[n][m]*m_indices +
n_prime_indices[n][m]*n_indices )
temp_phz = temp_phz + cv[m][n] * np.exp(1j*2*pi*(3**(-Np))*indexMap)
#Accumulate components to phase screen
lof_phz = lof_phz + temp_phz
lof_phz = np.real(lof_phz) - np.mean(np.real(lof_phz))
return lof_phz
def VacuumProp(self):
# Vacuum propagation (included for source valiation)
sg = self.MakeSGB() #Generates SGB
SamplingRatio = self.SamplingRatioBetweenScreen
a = self.N/2
nx, ny = np.meshgrid(range(-a,a), range(-a, a))
# Initial Propagation from source plane to first screen location
P0 = np.exp(1j* (self.k/ (2*self.dzProps[0]) ) * (self.r1**2) * (1-SamplingRatio[0]) )
Uin = P0 * self.Source
for pcount in range(1,len(self.PropLocs)-2):
UinSpec = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(Uin)))
#Set spatial frequencies at propagation plane
deltaf = 1/(self.N * self.PropSampling[pcount])
fX = nx * deltaf
fY = ny * deltaf
fsq = fX**2 + fY**2
#Quadratic Phase Factor
QuadPhaseFac = np.exp( -1j * np.pi * self.wvl * self.dzProps[pcount] \
* SamplingRatio[pcount] * fsq)
Uin = np.fft.ifftshift(np.fft.ifft2( \
np.fft.ifftshift(UinSpec * QuadPhaseFac)) )
Uin = Uin * sg
PF = np.exp(1j* ( self.k/ (2*self.dzProps[-1]) ) * (self.rR**2) * (SamplingRatio[-1]))
Uout = PF * Uin
return Uout
def SplitStepProp(self,Uin,PhaseScreenStack):
#Propagation/Fresnel Diffraction Integral
sg = self.MakeSGB() #Generates SGB
SamplingRatio = self.SamplingRatioBetweenScreen
a = self.N/2
nx, ny = np.meshgrid(range(-a,a), range(-a, a))
# Initial Propagation from source plane to first screen location
P0 = np.exp(1j* (self.k/ (2*self.dzProps[0]) ) * (self.r1**2) * (1-SamplingRatio[0]) )
Uin = P0 * self.Source * np.exp(1j * PhaseScreenStack[:,:,0])
for pcount in range(1,len(self.PropLocs)-2):
UinSpec = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(Uin)))
#Set spatial frequencies at propagation plane
deltaf = 1/(self.N * self.PropSampling[pcount])
fX = nx * deltaf
fY = ny * deltaf
fsq = fX**2 + fY**2
#Quadratic Phase Factor
QuadPhaseFac = np.exp( -1j * np.pi * self.wvl * self.dzProps[pcount] \
* SamplingRatio[pcount] * fsq)
Uin = np.fft.ifftshift(np.fft.ifft2( \
np.fft.ifftshift(UinSpec * QuadPhaseFac)) )
Uin = Uin * sg * np.exp(1j * PhaseScreenStack[:,:,pcount-1])
PF = np.exp(1j* ( self.k/ (2*self.dzProps[-1]) ) * (self.rR**2) * (SamplingRatio[-1]))
Uout = PF * Uin
return Uout
def TurbSim(self):
#initialize phase screen array
phz = np.zeros(shape=(self.N,self.N,self.NumScr))
phz_lo = np.zeros(shape=(self.N,self.N,self.NumScr))
phz_hi = np.zeros(shape=(self.N,self.N,self.NumScr))
for idxscr in range(0,self.NumScr,1):
phz_hi[:,:,idxscr] = self.PhaseScreen()
#FFT-based phase screens
phz_lo[:,:,idxscr] = self.SubHarmonicComp(self.NumSubHarmonics)
#sub harmonics
phz[:,:,idxscr] = self.loon * phz_lo[:,:,idxscr] + phz_hi[:,:,idxscr]
#subharmonic compensated phase screens
#Simulating propagation
self.Output = self.SplitStepProp(self.Source, np.exp(1j*phz))
def SetCn2Rytov(self,UserRytov):
# Change rytov number and variance to user specified value
self.rytovNum = UserRytov
self.rytov = self.rytovNum**2
rytov_denom = 1.23*(self.k)**(7.0/6.0)*(self.PropDist)**(11.0/6.0)
# Find Cn2
self.Cn2 = self.rytov/rytov_denom
# Set derived values
self.r0 = (0.423 * (self.k)**2 * self.Cn2 * self.PropDist)**(-3.0/5.0)
self.r0scrn = (0.423 * ((self.k)**2) * self.Cn2 * (self.PropDist/self.NumScr))**(-3.0/5.0)
self.log_ampl_var = 0.3075 * ((self.k)**2) * ((self.PropDist)**(11.0/6.0)) * self.Cn2
self.phase_var = 0.78*(self.Cn2)*(self.k**2)*self.PropDist*(self.L0**(-5.0/3.0))
self.rho_0 = (1.46 * self.Cn2 * self.k**2 * self.PropDist)**(-5.0/3.0)
def EvalSI(self):
temp_s = (np.abs(self.Output)**2) * self.makePupil(self.DRx)
temp_s = temp_s.ravel()[np.flatnonzero(temp_s)]
s_i = (np.mean( temp_s**2 )/ (np.mean(temp_s)**2) ) - 1
return s_i
def StructFunc(self,ph):
# Define mask construction
mask = self.MakePupil(self.SideLen/4)
delta = self.SideLen/self.N
N_size = np.shape(ph) #Make sure to reference 0th element later
ph = ph*mask
P = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(ph)))*(delta**2)
S = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(ph**2)))*(delta**2)
W = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(mask)))*(delta**2)
delta_f = 1/(N_size[0]*delta)
fft_size_a = np.shape(W*np.conjugate(W))
w2 = np.fft.ifftshift(np.fft.ifft2(np.fft.ifftshift(W*np.conjugate(W))))*((fft_size_a[0]*delta_f)**2)
fft_size_b = np.shape(np.real(S*np.conjugate(W))-np.abs(P)**2)
D = 2 * ((np.fft.ifftshift(np.fft.ifft2(np.fft.ifftshift(np.real(S*np.conjugate(W))-np.abs(P)**2)))) * ((fft_size_b[0]*delta_f)**2) )
D = D/w2
D = np.abs(D) * mask
return D
def Validate(self,nruns):
self.r0scrn = 0.5*self.SideLen/20
self.N = 512
phz_FT = np.zeros((self.N,self.N))
phz_FT_temp = phz_FT
phz_SH = np.zeros((self.N,self.N))
phz_SH_temp = phz_SH
#Generating multiple phase screens
for j in range(0,nruns):
phz_FT_temp = self.PhaseScreen()
#using phase screens from ^ so that time isn't wasted generating
#screens for the SubHarmonic case
phz_SH_temp = self.SubHarmonicComp(1) + phz_FT_temp
phz_FT_temp = self.StructFunc(phz_FT_temp)
phz_SH_temp = self.StructFunc(phz_SH_temp)
phz_FT = phz_FT + phz_FT_temp
phz_SH = phz_SH + phz_SH_temp
#Averaging the runs and correct bin size
phz_FT = phz_FT/nruns
phz_SH = phz_SH/nruns
m,n = np.shape(phz_FT)
centerX = round(m/2)+1
phz_FT_disp = np.ones(self.N/2)
phz_FT_disp = phz_FT[:,centerX]
phz_SH_disp = np.ones(self.N/2)
phz_SH_disp = phz_SH[:,centerX]
phz_FT_disp = phz_FT_disp[0:(self.N/2)]
phz_FT_disp = phz_FT_disp[::-1]
phz_SH_disp = phz_SH_disp[0:(self.N/2)]
phz_SH_disp = phz_SH_disp[::-1]
#array of values for normalized r to plot x-axis
cent_dist = np.zeros(self.N/2)
r_size = (0.5*self.SideLen)/(0.5*self.N)
for i in range(0,(self.N/2)):
cent_dist[i] = (i*r_size)/(self.r0scrn)
#Defining theoretical equation
theory_val = np.zeros(self.N/2)
theory_val = 6.88*(cent_dist)**(5.0/3.0)
#Plotting 3 options, with blue=theory, green=FT, and red=SH in current order
plt.plot(cent_dist,theory_val)
plt.plot(cent_dist,phz_FT_disp)
plt.plot(cent_dist,phz_SH_disp)
plt.xlim((0,10))
plt.ylim((0,400))
| 2.78125 | 3 |
CCF/CSP/2020/20061.py | cnsteven/online-judge | 1 | 12769916 | <reponame>cnsteven/online-judge
from collections import defaultdict
n, m = map(int, input().split())
points = defaultdict(list)
for _ in range(n):
x, y, t = input().split()
points[t].append((int(x), int(y)))
def test(t0, t1, t2):
for t in points:
off = 0
for x, y in points[t]:
v = t0 + t1 * x + t2 * y
if off == 0:
off = v // abs(v)
elif off * v < 0:
return False
return True
for _ in range(m):
t0, t1, t2 = map(int, input().split())
print('Yes' if test(t0, t1, t2) else 'No')
| 3.265625 | 3 |
archives/admin.py | kingsdigitallab/sdo-django | 0 | 12769917 | from django.contrib import admin
from archives.models import *
from archives.forms import DocumentForm
class AddressInline(admin.StackedInline):
model = Address
extra = 1
class ContainerStatementInline(admin.TabularInline):
model = ContainerStatements
extra = 1
class DocumentStatementInline(admin.TabularInline):
model = DocumentStatements
extra = 1
class CollectionStatementInline(admin.TabularInline):
model = CollectionStatements
extra = 1
class AddressAdmin(admin.ModelAdmin):
search_fields = ['address1']
class RepositoryAdmin(admin.ModelAdmin):
list_display = ('name', 'identifier', 'description')
inlines = [AddressInline]
save_on_top = True
search_fields = ['name']
class ContainerAdmin(admin.ModelAdmin):
list_display = ('__str__', 'get_collection_full_name',
'box', 'folder', 'series', 'description', 'content_type')
list_filter = ('content_type', 'collection')
inlines = [ContainerStatementInline]
search_fields = ['collection__name', 'collection__identifier',
'box', 'folder', 'series', 'description']
fieldsets = [
(None, {'fields': ['collection', 'content_type']}),
('Container Identifiers', {'fields': ['series', 'box', 'folder']}),
]
save_on_top = True
class DocumentAdmin(admin.ModelAdmin):
list_display = ('__str__', 'id_supplied', 'get_container_full_label',
'get_container_content_type', 'coverage_start', 'coverage_end', 'description')
list_filter = ('container',)
inlines = [DocumentStatementInline]
form = DocumentForm
save_on_top = True
class CollectionAdmin(admin.ModelAdmin):
list_display = ('name', 'name_supplied', 'identifier',
'repository', 'description')
inlines = [CollectionStatementInline]
search_fields = ['name', 'identifier', 'description']
save_on_top = True
admin.site.register(Collection, CollectionAdmin)
admin.site.register(Repository, RepositoryAdmin)
admin.site.register(Container, ContainerAdmin)
admin.site.register(Document, DocumentAdmin)
| 1.882813 | 2 |
Cnet/SummaryCallback.py | chriamue/Cnet | 0 | 12769918 | import numpy as np
from keras.callbacks import Callback
from keras import backend as K
import tensorflow as tf
class SummaryCallback(Callback):
def __init__(self, trainer, validation=False):
super(SummaryCallback, self)
self.trainer = trainer
self.summarysteps = trainer.config['summarysteps']
self.validation = validation
self.image = tf.Variable(0., validate_shape=False)
self.mask = tf.Variable(0., validate_shape=False)
self.predicted = tf.Variable(0., validate_shape=False)
model = self.trainer.model.model
self.fetches = [tf.assign(self.image, model.inputs[0], validate_shape=False),
tf.assign(self.mask, model.targets[0], validate_shape=False),
tf.assign(self.predicted, model.outputs[0], validate_shape=False)]
model._function_kwargs = {'fetches': self.fetches}
def on_train_begin(self, logs={}):
self.losses = []
model = self.trainer.model.model
self.fetches = [tf.assign(self.image, model.inputs[0], validate_shape=False),
tf.assign(self.mask, model.targets[0], validate_shape=False),
tf.assign(self.predicted, model.outputs[0], validate_shape=False)]
model._function_kwargs = {'fetches': self.fetches}
def on_train_end(self, logs={}):
model = self.trainer.model.model
model._function_kwargs = {'fetches': []}
def on_batch_end(self, batch, logs={}):
loss = logs.get('loss')
self.losses.append(loss)
if self.validation is False:
self.trainer.global_step += 1
self.trainer.loss += loss
if batch % self.summarysteps == 0:
if self.trainer.summarywriter:
self.trainer.summarywriter.add_scalar(
self.trainer.name+'loss', loss, global_step=self.trainer.global_step)
image = K.eval(self.image)
if not type(image) is np.float32:
image = image[0]
image = np.rollaxis(image, axis=2, start=0)
mask = K.eval(self.mask)[0]
mask = np.rollaxis(mask, axis=2, start=0)[1]
predicted = K.eval(self.predicted)[0]
predicted = np.rollaxis(predicted, axis=2, start=0)[1]
self.trainer.summarywriter.add_image(
self.trainer.name+'image',image/255.0, global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'mask', mask.astype(np.float32), global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'predicted', predicted/(predicted.max()+0.0001), global_step=self.trainer.global_step)
else:
if self.trainer.summarywriter:
self.trainer.summarywriter.add_scalar(
self.trainer.name+'val_loss', loss, global_step=self.trainer.global_step)
image = K.eval(self.image)
if not type(image) is np.float32:
image = image[0]
image = np.rollaxis(image, axis=2, start=0)
mask = K.eval(self.mask)[0]
mask = np.rollaxis(mask, axis=2, start=0)[1]
predicted = K.eval(self.predicted)[0]
predicted = np.rollaxis(predicted, axis=2, start=0)[1]
self.trainer.summarywriter.add_image(
self.trainer.name+'val_image',image/255.0, global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'val_mask', mask, global_step=self.trainer.global_step)
self.trainer.summarywriter.add_image(
self.trainer.name+'val_predicted', predicted/(predicted.max()+0.0001), global_step=self.trainer.global_step)
| 2.421875 | 2 |
app/tasks.py | Spin14/wolf-backend | 2 | 12769919 | import logging
logger = logging.getLogger(__name__)
async def do(param: str) -> None:
pass # pragma: no cover
async def log_something() -> None:
logger.debug("debug log")
logger.info("info log")
logger.warning("warning log")
logger.error("debug log")
logger.critical("critical log")
logger.debug("debug log 2")
logger.error("debug error 2")
| 2.640625 | 3 |
tests/test_remove_stress.py | absentabyss/ithkuil | 20 | 12769920 | import pytest
from ithkuil.morphology.words import remove_stress
txts_to_test = [
('a', 'a'),
('o', 'o'),
('áu', 'au'),
('ái', 'ai'),
('aú', 'aù'),
('aé', 'ae'),
('á', 'a')
]
@pytest.mark.parametrize('txt, expected', txts_to_test)
def test_word(txt, expected):
assert remove_stress(txt) == expected | 3.171875 | 3 |
openmdao.lib/src/openmdao/lib/drivers/test/test_newton.py | MrShoks/OpenMDAO-Framework | 1 | 12769921 | <filename>openmdao.lib/src/openmdao/lib/drivers/test/test_newton.py
"""
Test the Newton solver
"""
import unittest
import numpy
# pylint: disable=F0401,E0611
from openmdao.lib.drivers.newton_solver import NewtonSolver
from openmdao.lib.optproblems.scalable import Discipline
from openmdao.lib.optproblems.sellar import Discipline1_WithDerivatives, \
Discipline2_WithDerivatives, \
Discipline1, Discipline2
from openmdao.main.api import Assembly, Component, set_as_top, Driver
from openmdao.main.hasparameters import HasParameters
from openmdao.main.interfaces import IHasParameters, implements
from openmdao.main.test.simpledriver import SimpleDriver
from openmdao.main.datatypes.api import Float
from openmdao.test.execcomp import ExecComp, ExecCompWithDerivatives
from openmdao.util.testutil import assert_rel_error
from openmdao.util.decorators import add_delegate
class Sellar_MDA(Assembly):
def configure(self):
self.add('d1', Discipline1_WithDerivatives())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2_WithDerivatives())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
#self.connect('d2.y2', 'd1.y2')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y2', low=-1e99, high=1e99)
self.driver.add_constraint('d1.y2 = d2.y2')
class Sellar_MDA_subbed(Assembly):
def configure(self):
self.add('d1', Discipline1_WithDerivatives())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2_WithDerivatives())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
#self.connect('d2.y2', 'd1.y2')
self.add('subdriver', NewtonSolver())
self.driver.workflow.add(['subdriver'])
self.subdriver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y2', low=-1e99, high=1e99)
self.driver.add_constraint('d1.y2 = d2.y2')
class Sellar_MDA_Mixed(Assembly):
def configure(self):
self.add('d1', Discipline1())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2_WithDerivatives())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
#self.connect('d2.y2', 'd1.y2')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y2', low=-1e99, high=1e99)
self.driver.add_constraint('d1.y2 = d2.y2')
class Sellar_MDA_Mixed_Flipped(Assembly):
def configure(self):
self.add('d1', Discipline1_WithDerivatives())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
#self.connect('d2.y2', 'd1.y2')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y2', low=-1e99, high=1e99)
self.driver.add_constraint('d1.y2 = d2.y2')
class Sellar_MDA_None(Assembly):
def configure(self):
self.add('d1', Discipline1())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
#self.connect('d2.y2', 'd1.y2')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y2', low=-1e99, high=1e99)
self.driver.add_constraint('d1.y2 = d2.y2')
class Scalable_MDA(Assembly):
def configure(self):
self.add('d1', Discipline(prob_size=2))
self.add('d2', Discipline(prob_size=2))
self.connect('d1.y_out', 'd2.y_in')
#self.connect('d2.y_out', 'd1.y_in')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
self.driver.add_parameter('d1.y_in', low=-1e99, high=1e99)
self.driver.add_constraint('d2.y_out = d1.y_in')
##self.driver.add_constraint('d1.y_in = d2.y_out')
class Newton_SolverTestCase(unittest.TestCase):
"""test the Newton Solver component"""
def setUp(self):
self.top = set_as_top(Sellar_MDA())
def tearDown(self):
self.top = None
def test_newton(self):
print self.top.d1.y1, self.top.d2.y1, self.top.d1.y2, self.top.d2.y2
self.top.run()
print self.top.d1.y1, self.top.d2.y1, self.top.d1.y2, self.top.d2.y2
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_newton_flip_constraint(self):
self.top.driver.clear_constraints()
self.top.driver.add_constraint('d2.y2 = d1.y2')
self.top.run()
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_newton_mixed(self):
self.top = set_as_top(Sellar_MDA_Mixed())
self.top.run()
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_newton_mixed_flipped(self):
self.top = set_as_top(Sellar_MDA_Mixed_Flipped())
self.top.run()
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_newton_none(self):
self.top = set_as_top(Sellar_MDA_None())
self.top.run()
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_scalable_newton(self):
# This verifies that it works for arrays
self.top = set_as_top(Scalable_MDA())
self.top.d1.x = self.top.d2.x = numpy.array([[3.0], [-1.5]])
self.top.d1.z = self.top.d2.z = numpy.array([[-1.3], [2.45]])
self.top.d1.C_y = numpy.array([[1.1, 1.3], [1.05, 1.13]])
self.top.d2.C_y = numpy.array([[0.95, 0.98], [0.97, 0.95]])
self.top.run()
assert_rel_error(self, self.top.d1.y_out[0],
self.top.d2.y_in[0],
1.0e-4)
assert_rel_error(self, self.top.d1.y_out[1],
self.top.d2.y_in[1],
1.0e-4)
assert_rel_error(self, self.top.d2.y_out[0],
self.top.d1.y_in[0],
1.0e-4)
assert_rel_error(self, self.top.d2.y_out[1],
self.top.d1.y_in[1],
1.0e-4)
def test_general_solver(self):
a = set_as_top(Assembly())
comp = a.add('comp', ExecComp(exprs=["f=a * x**n + b * x - c"]))
comp.n = 77.0/27.0
comp.a = 1.0
comp.b = 1.0
comp.c = 10.0
comp.x = 0.0
driver = a.add('driver', NewtonSolver())
driver.workflow.add('comp')
driver.add_parameter('comp.x', 0, 100)
driver.add_constraint('comp.f=0')
self.top.driver.gradient_options.fd_step = 0.01
self.top.driver.gradient_options.fd_step_type = 'relative'
a.run()
assert_rel_error(self, a.comp.x, 2.06720359226, .0001)
assert_rel_error(self, a.comp.f, 0, .0001)
# The following test generates warnings due to nans and infs in u and df
# vectors in the newton backtracking. The test doesn't actually check
# anything except apparently that we don't raise an exception, so it's
# not really a good test.
#def test_initial_run(self):
#class MyComp(Component):
#x = Float(0.0, iotype='in')
#xx = Float(0.0, iotype='in', low=-100000, high=100000)
#f_x = Float(iotype='out')
#y = Float(iotype='out')
#def execute(self):
#if self.xx != 1.0:
#self.raise_exception("Lazy", RuntimeError)
#self.f_x = 2.0*self.x
#self.y = self.x
#@add_delegate(HasParameters)
#class SpecialDriver(Driver):
#implements(IHasParameters)
#def execute(self):
#self.set_parameters([1.0])
#top = set_as_top(Assembly())
#top.add('comp', MyComp())
#top.add('driver', NewtonSolver())
#top.add('subdriver', SpecialDriver())
#top.driver.workflow.add('subdriver')
#top.subdriver.workflow.add('comp')
#top.subdriver.add_parameter('comp.xx')
#top.driver.add_parameter('comp.x')
#top.driver.add_constraint('comp.y = 1.0')
#top.driver.max_iteration = 2
#top.run()
def test_newton_nested(self):
# Make sure derivatives across the newton-solved system are correct.
top = set_as_top(Assembly())
top.add('driver', SimpleDriver())
top.add('d1', Discipline1_WithDerivatives())
top.d1.x1 = 1.0
top.d1.y1 = 1.0
top.d1.y2 = 1.0
top.d1.z1 = 5.0
top.d1.z2 = 2.0
top.add('d2', Discipline2_WithDerivatives())
top.d2.y1 = 1.0
top.d2.y2 = 1.0
top.d2.z1 = 5.0
top.d2.z2 = 2.0
top.connect('d1.y1', 'd2.y1')
top.add('solver', NewtonSolver())
top.solver.atol = 1e-9
top.solver.workflow.add(['d1', 'd2'])
top.solver.add_parameter('d1.y2', low=-1e99, high=1e99)
top.solver.add_constraint('d1.y2 = d2.y2')
top.driver.workflow.add(['solver'])
top.driver.add_parameter('d1.z1', low=-100, high=100)
top.driver.add_objective('d1.y1 + d1.y2')
top.run()
J = top.driver.workflow.calc_gradient(mode='forward')
print J
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
J = top.driver.workflow.calc_gradient(mode='adjoint')
print J
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
top.driver.gradient_options.fd_step = 1e-7
top.driver.gradient_options.fd_form = 'central'
J = top.driver.workflow.calc_gradient(mode='fd')
print J
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
def test_equation(self):
top = set_as_top(Assembly())
top.add('precomp', ExecCompWithDerivatives(['y=x'],
['dy_dx = 1']))
top.precomp.x = 1.0
expr = ['y = 3.0*x*x -4.0*x']
deriv = ['dy_dx = 6.0*x -4.0']
top.add('comp', ExecCompWithDerivatives(expr, deriv))
top.driver.workflow.add(['comp'])
top.add('driver', NewtonSolver())
top.driver.add_parameter('comp.x')
top.driver.add_constraint('precomp.y - comp.y = 1.0 - 2.0')
top.run()
print top.comp.x, top.comp.y
assert_rel_error(self, top.comp.x, -0.38742588, 1e-4)
class Sellar_MDA_Cycles(Assembly):
def configure(self):
self.add('d1', Discipline1_WithDerivatives())
self.d1.x1 = 1.0
self.d1.y1 = 1.0
self.d1.y2 = 1.0
self.d1.z1 = 5.0
self.d1.z2 = 2.0
self.add('d2', Discipline2_WithDerivatives())
self.d2.y1 = 1.0
self.d2.y2 = 1.0
self.d2.z1 = 5.0
self.d2.z2 = 2.0
self.connect('d1.y1', 'd2.y1')
self.connect('d2.y2', 'd1.y2')
self.add('driver', NewtonSolver())
self.driver.workflow.add(['d1', 'd2'])
class Newton_SolverTestCase_with_Cycles(unittest.TestCase):
"""test the Newton Solver component with cycles"""
def setUp(self):
self.top = set_as_top(Sellar_MDA_Cycles())
def tearDown(self):
self.top = None
def test_newton(self):
self.top.run()
assert_rel_error(self, self.top.d1.y1,
self.top.d2.y1,
1.0e-4)
assert_rel_error(self, self.top.d1.y2,
self.top.d2.y2,
1.0e-4)
def test_newton_nested(self):
# Make sure derivatives across the newton-solved system are correct.
top = set_as_top(Assembly())
top.add('driver', SimpleDriver())
top.add('d1', Discipline1_WithDerivatives())
top.d1.x1 = 1.0
top.d1.y1 = 1.0
top.d1.y2 = 1.0
top.d1.z1 = 5.0
top.d1.z2 = 2.0
top.add('d2', Discipline2_WithDerivatives())
top.d2.y1 = 1.0
top.d2.y2 = 1.0
top.d2.z1 = 5.0
top.d2.z2 = 2.0
top.connect('d1.y1', 'd2.y1')
top.connect('d2.y2', 'd1.y2')
top.add('solver', NewtonSolver())
top.solver.atol = 1e-9
top.solver.workflow.add(['d1', 'd2'])
top.driver.workflow.add(['solver'])
top.driver.add_parameter('d1.z1', low=-100, high=100)
top.driver.add_objective('d1.y1 + d1.y2')
top.run()
J = top.driver.workflow.calc_gradient(mode='forward')
print J
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
J = top.driver.workflow.calc_gradient(mode='adjoint')
print J
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
top.driver.gradient_options.fd_step = 1e-7
top.driver.gradient_options.fd_form = 'central'
J = top.driver.workflow.calc_gradient(mode='fd')
print J
assert_rel_error(self, J[0][0], 10.77542099, 1e-5)
if __name__ == "__main__":
unittest.main()
| 2.171875 | 2 |
model/__init__.py | mfacar/controltac | 3 | 12769922 | from .GloVeModel import GloVeModel
from .Word2vecGoogleModel import Word2vecGoogleModel
| 1.070313 | 1 |
openpifpaf/network/factory.py | adujardin/openpifpaf | 0 | 12769923 | import argparse
import logging
import os
from typing import Tuple
import warnings
import torch
import torchvision
from .. import headmeta
from . import basenetworks, heads, nets
# generate hash values with: shasum -a 256 filename.pkl
PRETRAINED_UNAVAILABLE = object()
# Dataset cocokp is implied. All other datasets need to be explicit.
# Use http instead of https to avoid SSL certificate issues on Windows.
CHECKPOINT_URLS = {
'mobilenetv2': ('http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12a5/mobilenetv2-201112-193315-cocokp-1728a9f5.pkl'),
'resnet18': PRETRAINED_UNAVAILABLE,
'resnet50': ('http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12a7/resnet50-201123-175351-cocokp-o10s-127f7fdf.pkl'),
'resnet50-crowdpose': (
'http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12a7/resnet50-201005-100758-crowdpose-d978a89f.pkl'
),
'resnet101': PRETRAINED_UNAVAILABLE,
'resnet152': PRETRAINED_UNAVAILABLE,
'shufflenetv2x1': PRETRAINED_UNAVAILABLE,
'shufflenetv2x2': PRETRAINED_UNAVAILABLE,
'shufflenetv2k16': ('http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12b4/shufflenetv2k16-210214-123448-cocokp-o10s-e2ae3708.pkl'),
'shufflenetv2k16-withdense': (
'http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12b4/shufflenetv2k16-210221-131426-cocokp-o10s-627d901e.pkl'
),
'shufflenetv2k30': ('http://github.com/vita-epfl/openpifpaf-torchhub/releases/download/'
'v0.12b4/shufflenetv2k30-210217-075056-cocokp-o10s-6f9daa84.pkl'),
'shufflenetv2k44': PRETRAINED_UNAVAILABLE,
}
BASE_TYPES = set([
basenetworks.MobileNetV2,
basenetworks.Resnet,
basenetworks.ShuffleNetV2,
basenetworks.ShuffleNetV2K,
basenetworks.SqueezeNet,
])
BASE_FACTORIES = {
'mobilenetv2': lambda: basenetworks.MobileNetV2('mobilenetv2', torchvision.models.mobilenet_v2),
'resnet18': lambda: basenetworks.Resnet('resnet18', torchvision.models.resnet18, 512),
'resnet50': lambda: basenetworks.Resnet('resnet50', torchvision.models.resnet50),
'resnet101': lambda: basenetworks.Resnet('resnet101', torchvision.models.resnet101),
'resnet152': lambda: basenetworks.Resnet('resnet152', torchvision.models.resnet152),
'resnext50': lambda: basenetworks.Resnet('resnext50', torchvision.models.resnext50_32x4d),
'resnext101': lambda: basenetworks.Resnet('resnext101', torchvision.models.resnext101_32x8d),
'shufflenetv2x1': lambda: basenetworks.ShuffleNetV2(
'shufflenetv2x1', torchvision.models.shufflenet_v2_x1_0, 1024),
'shufflenetv2x2': lambda: basenetworks.ShuffleNetV2(
# defined in torchvision as [4, 8, 4], [24, 244, 488, 976, 2048]
'shufflenetv2x2', torchvision.models.shufflenet_v2_x2_0),
'shufflenetv2k16': lambda: basenetworks.ShuffleNetV2K(
'shufflenetv2k16', [4, 8, 4], [24, 348, 696, 1392, 1392]),
'shufflenetv2k20': lambda: basenetworks.ShuffleNetV2K(
'shufflenetv2k20', [5, 10, 5], [32, 512, 1024, 2048, 2048]),
'shufflenetv2kx5': lambda: basenetworks.ShuffleNetV2K(
'shufflenetv2kx5', [6, 13, 6], [42, 640, 1280, 2560, 2560]),
'shufflenetv2k30': lambda: basenetworks.ShuffleNetV2K(
'shufflenetv2k30', [8, 16, 6], [32, 512, 1024, 2048, 2048]),
'shufflenetv2k44': lambda: basenetworks.ShuffleNetV2K(
'shufflenetv2k44', [12, 24, 8], [32, 512, 1024, 2048, 2048]),
'squeezenet': lambda: basenetworks.SqueezeNet('squeezenet', torchvision.models.squeezenet1_1),
}
#: headmeta class to head class
HEADS = {
headmeta.Cif: heads.CompositeField3,
headmeta.Caf: heads.CompositeField3,
headmeta.CifDet: heads.CompositeField3,
}
LOG = logging.getLogger(__name__)
def local_checkpoint_path(checkpoint):
if os.path.exists(checkpoint):
return checkpoint
if checkpoint in CHECKPOINT_URLS:
url = CHECKPOINT_URLS[checkpoint]
base_dir = None
if hasattr(torch, 'hub') and hasattr(torch.hub, 'get_dir'):
# new in pytorch 1.6.0
base_dir = torch.hub.get_dir()
elif os.getenv('TORCH_HOME'):
base_dir = os.getenv('TORCH_HOME')
elif os.getenv('XDG_CACHE_HOME'):
base_dir = os.path.join(os.getenv('XDG_CACHE_HOME'), 'torch')
else:
base_dir = os.path.expanduser(os.path.join('~', '.cache', 'torch'))
file_name = os.path.join(base_dir, 'checkpoints', os.path.basename(url))
if os.path.exists(file_name):
return file_name
return None
class Factory:
base_name = None
checkpoint = None
cross_talk = 0.0
download_progress = True
head_consolidation = 'filter_and_extend'
def __init__(self, **kwargs):
if self.base_name is not None:
assert self.checkpoint is None
if self.checkpoint is not None:
assert self.base_name is None
# use kwargs to set instance attributes to overwrite class attributes
for key, value in kwargs.items():
assert hasattr(self, key), key
setattr(self, key, value)
@classmethod
def cli(cls, parser: argparse.ArgumentParser):
for bn in BASE_TYPES:
bn.cli(parser)
for hn in set(HEADS.values()):
hn.cli(parser)
group = parser.add_argument_group('network configuration')
available_checkpoints = ['"{}"'.format(n) for n, url in CHECKPOINT_URLS.items()
if url is not PRETRAINED_UNAVAILABLE]
group.add_argument(
'--checkpoint', default=cls.checkpoint,
help=(
'Path to a local checkpoint. '
'Or provide one of the following to download a pretrained model: {}'
''.format(', '.join(available_checkpoints))
)
)
group.add_argument('--basenet', default=cls.base_name,
help='base network, e.g. resnet50')
group.add_argument('--cross-talk', default=cls.cross_talk, type=float,
help='[experimental]')
assert cls.download_progress
group.add_argument('--no-download-progress', dest='download_progress',
default=True, action='store_false',
help='suppress model download progress bar')
group.add_argument('--head-consolidation',
choices=('keep', 'create', 'filter_and_extend'),
default=cls.head_consolidation,
help=('consolidation strategy for a checkpoint\'s head '
'networks and the heads specified by the datamodule'))
@classmethod
def configure(cls, args: argparse.Namespace):
for bn in BASE_TYPES:
bn.configure(args)
for hn in set(HEADS.values()):
hn.configure(args)
cls.base_name = args.basenet
cls.checkpoint = args.checkpoint
cls.cross_talk = args.cross_talk
cls.download_progress = args.download_progress
cls.head_consolidation = args.head_consolidation
def factory(self, *, head_metas=None) -> Tuple[nets.Shell, int]:
if self.base_name:
assert head_metas
assert self.checkpoint is None
net_cpu: nets.Shell = self.from_scratch(head_metas)
net_cpu = self.init_net(net_cpu)
epoch = 0
return net_cpu, epoch
net_cpu, epoch = self.from_checkpoint()
if head_metas is not None:
self.consolidate_heads(net_cpu, head_metas)
net_cpu = self.init_net(net_cpu)
return net_cpu, epoch
def consolidate_heads(self, net_cpu, head_metas):
if self.head_consolidation == 'keep':
LOG.info('keeping heads from loaded checkpoint')
# Match head metas by name and overwrite with meta from checkpoint.
# This makes sure that the head metas have their head_index and
# base_stride attributes set.
input_head_meta_indices = {(meta.dataset, meta.name): i
for i, meta in enumerate(head_metas)}
for hn in net_cpu.head_nets:
input_index = input_head_meta_indices.get((hn.meta.dataset, hn.meta.name), None)
if input_index is None:
continue
head_metas[input_index] = hn.meta
elif self.head_consolidation == 'create':
LOG.info('creating new heads')
headnets = [HEADS[h.__class__](h, net_cpu.base_net.out_features)
for h in head_metas]
net_cpu.set_head_nets(headnets)
elif self.head_consolidation == 'filter_and_extend':
LOG.info('filtering for dataset heads and extending existing heads')
existing_headnets = {(hn.meta.dataset, hn.meta.name): hn
for hn in net_cpu.head_nets}
headnets = []
for meta_i, meta in enumerate(head_metas):
if (meta.dataset, meta.name) in existing_headnets:
hn = existing_headnets[(meta.dataset, meta.name)]
headnets.append(hn)
# Match head metas by name and overwrite with meta from checkpoint.
# This makes sure that the head metas have their head_index and
# base_stride attributes set.
head_metas[meta_i] = hn.meta
else:
headnets.append(
HEADS[meta.__class__](meta, net_cpu.base_net.out_features))
net_cpu.set_head_nets(headnets)
else:
raise Exception('head strategy {} unknown'.format(self.head_consolidation))
def from_checkpoint(self) -> Tuple[nets.Shell, int]:
checkpoint = self.checkpoint
if not checkpoint:
checkpoint = 'shufflenetv2k16'
if CHECKPOINT_URLS.get(checkpoint, None) is PRETRAINED_UNAVAILABLE:
raise Exception(
'The pretrained model for {} is not available yet '
'in this release cycle. Use one of {}.'.format(
checkpoint,
[k for k, v in CHECKPOINT_URLS.items() if v is not PRETRAINED_UNAVAILABLE],
)
)
checkpoint = CHECKPOINT_URLS.get(checkpoint, checkpoint)
with warnings.catch_warnings():
warnings.filterwarnings('ignore', category=torch.serialization.SourceChangeWarning)
if checkpoint.startswith('http'):
checkpoint = torch.hub.load_state_dict_from_url(
checkpoint,
check_hash=not checkpoint.startswith('https'),
progress=self.download_progress)
else:
checkpoint = torch.load(checkpoint)
net_cpu: nets.Shell = checkpoint['model']
epoch = checkpoint['epoch']
# normalize for backwards compatibility
nets.model_migration(net_cpu)
return net_cpu, epoch
def from_scratch(self, head_metas) -> nets.Shell:
if self.base_name not in BASE_FACTORIES:
raise Exception('basenet {} unknown'.format(self.base_name))
basenet = BASE_FACTORIES[self.base_name]()
headnets = [HEADS[h.__class__](h, basenet.out_features) for h in head_metas]
net_cpu = nets.Shell(basenet, headnets)
nets.model_defaults(net_cpu)
return net_cpu
def init_net(self, net_cpu):
if self.cross_talk:
net_cpu.process_input = nets.CrossTalk(self.cross_talk)
# initialize for eval
net_cpu.eval()
LOG.debug(net_cpu)
return net_cpu
| 1.9375 | 2 |
build/genoken/dna/dnautil.py | k3nnywilliam/genoken | 0 | 12769924 | <gh_stars>0
# -*- coding: utf-8 -*-
#!usr/bin/env python3
#Created by <NAME> 2020
from dna import constant
class DNAUtils:
def __init__(self, dna):
super().__init__()
self.dna = dna
def has_stop_codon(self, frame: int) -> bool:
stop_codon_found = False
for i in range(frame, len(self.dna), 3):
codon = self.dna[i:i+3].lower()
if codon in constant.STOP_CODONS:
stop_codon_found = True
break
return stop_codon_found
def transcribe(self) -> str:
"""Transcribe DNA to RNA
Returns:
str: concatenate the transcribed sequence
"""
dna = list(self.dna)
t_rna = list()
for i in range(len(dna)):
if dna[i] == 'T':
t_rna.append('U')
else:
t_rna.append(dna[i])
result = ''.join(t_rna)
return result
def complement(self) -> str:
self.letters = ''
self.letters = list(self.dna)
self.letters = [constant.BASE_COMPLEMENT[base] for base in self.letters]
return ''.join(self.letters)
def reverse_complement(self) -> str:
complement_bases = self.complement()
reversed_complements = list(complement_bases)
return ''.join(reversed_complements[::-1])
def QtoPhred33(self, Q):
"""Turn Q into Phred+33 ACII encoded quality
Args:
Q (float): base quality
Returns:
int: converts character to integer according to ASCII table
"""
return chr(Q + 33)
def phred33ToQ(self, qual):
"""Turn Phred+33 ACII-encoded quality into Q
Args:
qual (int):
Returns:
[type]: converts integer to character according to ASCII table
"""
return ord(qual)-33 | 3.484375 | 3 |
algorithm/leetcode/Python_2.7.10/00003.py | leonard-sxy/algorithm-practice | 1 | 12769925 | #
## https://leetcode.com/problems/longest-substring-without-repeating-characters/
#
class Solution(object):
def lengthOfLongestSubstring(self, s):
"""
:type s: str
:rtype: int
"""
if s is None:
raise ValueError('Parameter should be a String.')
if len(s) < 2:
return len(s)
start = last_max_len = 0
for idx in range(len(s) - 1):
if s[idx + 1] in s[start : idx + 1]:
if idx - start + 1 > last_max_len:
last_max_len = idx - start + 1
start = start + s[start : idx + 1].index(s[idx + 1]) + 1
final_seq_len = len(s) - start
return final_seq_len if final_seq_len > last_max_len else last_max_len
s = Solution();
s.lengthOfLongestSubstring('dvdf')
| 3.34375 | 3 |
quotesbot/spiders/toscrape-css.py | benmoorechd/quotesbot | 0 | 12769926 | <filename>quotesbot/spiders/toscrape-css.py
# -*- coding: utf-8 -*-
import scrapy
class ToScrapeCSSSpider(scrapy.Spider):
name = "www.doralkia.com"
allowed_domains = ['www.doralkia.com']
start_urls = ['https://www.doralkia.com/searchnew.aspx']
rules = [
Rule(
LinkExtractor(
allow=('.*'),
deny=()
),
callback='parse_item',
follow=True
)
]
items = [
[
Item(
CarsForSaleMiamiFlDoralKia1Item,
None,
'.col-md-9',
[
Field(
'VIN',
'div:nth-child(4)::attr(data-vin)',
[]),
Field(
'Condition',
'div:nth-child(4)::attr(data-vehicletype)',
[]),
Field(
'Trim',
'div:nth-child(4)::attr(data-trim)',
[]),
Field(
'Transmission',
'div:nth-child(4)::attr(data-trans)',
[]),
Field(
'Stock',
'div:nth-child(4)::attr(data-stocknum)',
[]),
Field(
'Sale_Price',
'div:nth-child(4)::attr(data-price)',
[]),
Field(
'MSRP',
'div:nth-child(4)::attr(data-msrp)',
[]),
Field(
'Bodystyle',
'div:nth-child(4)::attr(data-bodystyle)',
[]),
Field(
'Certified',
'div:nth-child(4)::attr(data-cpo)',
[]),
Field(
'Engine',
'div:nth-child(4)::attr(data-engine)',
[]),
Field(
'Exterior_Color',
'div:nth-child(4)::attr(data-extcolor)',
[]),
Field(
'Fuel_Type',
'div:nth-child(4)::attr(data-fueltype)',
[]),
Field(
'Interior_Color',
'div:nth-child(4)::attr(data-intcolor)',
[]),
Field(
'Make',
'div:nth-child(4)::attr(data-make)',
[]),
Field(
'Model',
'div:nth-child(4)::attr(data-model)',
[]),
Field(
'Model_Code',
'div:nth-child(4)::attr(data-modelcode)',
[]),
Field(
'MPG_City',
'div:nth-child(4)::attr(data-mpgcity)',
[]),
Field(
'MPG_Highway',
'div:nth-child(4)::attr(data-mpghwy)',
[]),
Field(
'Full_Title',
'div:nth-child(4)::attr(data-name)',
[]),
Field(
'Year',
'div#srpVehicle-KNDEUCAA0M7050784::attr(data-year)',
[]),
Field(
'Image',
'div:nth-child(4) > .col-md-12 > .well > .well > .row > div:nth-child(1) > .vehiclePhoto > .img-responsive::attr(data-img)',
[],
True)])]]
| 2.28125 | 2 |
bandcamp_parser/album.py | strizhechenko/bandcamp-player | 16 | 12769927 | # coding=utf-8
from random import shuffle
from bs4 import BeautifulSoup
from bandcamp_parser.request import Request
class AlbumResult(object):
""" Just for autocompletion and more 'static' structure instead of json/soup """
def __init__(self, soup):
self.title = soup.attrs['title']
self.href = soup.attrs['href']
def __repr__(self) -> str:
return '\n<BandcampAlbumResult: title: {0} href: {1}>'.format(self.title, self.href)
class Album(object):
""" Album object provides access to its tracks """
def __init__(self, url):
self.url = url
def page(self) -> str:
""" :returns: album's page html """
return Request.get(self.url).content
def tracks(self) -> list:
""" :returns: list of urls of tracks in album """
soup = BeautifulSoup(self.page(), "html.parser")
results = soup.find('table', attrs={'id': 'track_table'}).find_all('a')
results = [item.attrs['href'] for item in results if item.has_attr('href')]
results = [item for item in results if '#lyrics' not in item]
return [self.url[:self.url.find('/album')] + item for item in results]
def track_random(self) -> str:
""" :returns: link to random track """
tracks = self.tracks()
shuffle(tracks)
return tracks[0]
| 3.09375 | 3 |
examples/test_oscpart.py | Tarheel-Formal-Methods/kaa-optimize | 0 | 12769928 | <reponame>Tarheel-Formal-Methods/kaa-optimize
from kaa.reach import ReachSet
from kaa.plotutil import Plot
from models.oscpart import OscPart
from kaa.timer import Timer
def test_OscPart():
model = OscPart()
#trajs = generate_traj(model, 10, 200)
mod_reach = ReachSet(model)
mod_flow = mod_reach.computeReachSet(20)
sir_plot = Plot()
#trajs = generate_traj(model, 10, 200)
'Generaste the trajectories and add them to the plot.'
sir_plot.add(mod_flow)
sir_plot.plot(0,1,2)
Timer.generate_stats()
| 2.03125 | 2 |
src/pushbroom/__init__.py | gpanders/Janitor | 4 | 12769929 | __version__ = "v1.0.0"
from .sweep import sweep
| 1.03125 | 1 |
service/models.py | oscar-king/A-Decentralised-Digital-Identity-Architecture | 4 | 12769930 | <filename>service/models.py
from flask_login import UserMixin
from service import db
class User(UserMixin, db.Model):
y = db.Column(db.String(256), primary_key=True)
def __init__(self, y):
self.y = y
def save_to_db(self):
db.session.add(self)
db.session.commit() | 2.765625 | 3 |
data.py | EyeEyeloving/Replication_selfcar-BC_win10 | 0 | 12769931 | <reponame>EyeEyeloving/Replication_selfcar-BC_win10
"""
Python module to load the data for training
"""
import cv2
import csv
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib.image as mpimg
from sklearn.utils import shuffle
from tqdm import tqdm
# Function to load dataset
def load_dataset(csv_path, relative_path):
"""
Inputs
---
csv_path: path to training data csv
relative_path: relative path to training data
Outputs
---
X: Training data numpy array
y: Training labels numpy array
"""
# Read CSV lines
lines = []
with open(csv_path) as csvfile:
reader = csv.reader(csvfile)
print("Loading CSV File ...")
for line in tqdm(reader):
lines.append(line)
images = []; measurements = []
print("Loading Data ...")
# Read from CSV lines
for line in tqdm(lines):
# Center Image
image, measurement = _load_image(line, 0, relative_path)
images.append(image)
measurements.append(measurement)
image_flipped = np.fliplr(image)
images.append(image_flipped)
measurement_flipped = -1 * measurement
measurements.append(measurement_flipped)
# Left Image
image, measurement = _load_image(line, 1, relative_path)
images.append(image)
measurements.append(measurement)
image_flipped = np.fliplr(image)
images.append(image_flipped)
measurement_flipped = -1 * measurement
measurements.append(measurement_flipped)
# Right Image
image, measurement = _load_image(line, 2, relative_path)
images.append(image)
measurements.append(measurement)
image_flipped = np.fliplr(image)
images.append(image_flipped)
measurement_flipped = -1 * measurement
measurements.append(measurement_flipped)
X = np.array(images)
y = np.array(measurements)
return X, y
# Function to generate a Generator
def load_generator(csv_path, relative_path, batch_size = 5):
"""
Inputs
---
csv_path: csv file to read data from
relative_path: relative path of the data
batch_size: batch size of the generator (factor of 6)
Outputs
---
generator: generator function
"""
# Read CSV lines
lines = []
with open(csv_path) as csvfile:
reader = csv.reader(csvfile)
print("Loading CSV File ...")
for line in tqdm(reader):
lines.append(line)
train_data, validation_data = train_test_split(lines, test_size=0.2)
# Define a generator function
def generator(data, batch_size = batch_size):
num_data = len(data)
while True:
shuffle(data)
for offset in range(0, num_data, batch_size):
batch_data = data[offset : offset + batch_size]
images = []; measurements = []
# Generate batches
for batch in batch_data:
# Center Image
image, measurement = _load_image(batch, 0, relative_path)
images.append(image)
measurements.append(measurement)
image_flipped = np.fliplr(image)
images.append(image_flipped)
measurement_flipped = -1 * measurement
measurements.append(measurement_flipped)
# Left Image
image, measurement = _load_image(batch, 1, relative_path)
images.append(image)
measurements.append(measurement)
image_flipped = np.fliplr(image)
images.append(image_flipped)
measurement_flipped = -1 * measurement
measurements.append(measurement_flipped)
# Right Image
image, measurement = _load_image(batch, 2, relative_path)
images.append(image)
measurements.append(measurement)
image_flipped = np.fliplr(image)
images.append(image_flipped)
measurement_flipped = -1 * measurement
measurements.append(measurement_flipped)
X = np.array(images)
y = np.array(measurements)
X, y = shuffle(X, y)
yield (X, y)
return generator(train_data), generator(validation_data), len(train_data), len(validation_data)
# Private function to load image
def _load_image(line, index, relative_path):
"""
Inputs
---
line: csv line to read data from
index: decides left, right or center
relative_path: relative path of the data
Outputs
---
image: output image
measurement: output measurement
"""
source_path = line[index]
filename = source_path.split('\\')[-1]
current_path = relative_path + filename
image = mpimg.imread(current_path)
if index == 1:
# Left Image
correction = 0.2
elif index == 2:
# Right Image
correction = -0.2
else:
# Center Image
correction = 0
measurement = float(line[3]) + correction
return image, measurement | 2.515625 | 3 |
simpleTweetCrawler.py | maicondmenezes/tweetCrawler | 1 | 12769932 | #@title Biblioteca de funções execute este trecho após o cabeçalho
# -*- coding: utf-8 -*-
def login_Twitter(chave_consumidor, segredo_consumidor, token_acesso, token_acesso_segredo):
#gerando objeto de autenticação
autenticacao = tw.OAuthHandler(chave_consumidor, segredo_consumidor)
#gerando tokens
autenticacao.set_access_token(token_acesso, token_acesso_segredo)
#Conectando ao twitter
return tw.API(autenticacao, wait_on_rate_limit=True)
def search_tweets(twitter, term, startDay, finalDay, maxItens):
tweets = tw.Cursor(twitter.search,
q=term,
lang='pt',
since=startDay,
until=finalDay).items(maxItens)
tweetsList = [tweet.text for tweet in tweets]
return tweetsList
def extract_nouns_list(tweets, out):
counter = 0
leitorMAX = int(len(tweets))
#deixa o texto todo em mundo e particiona ele em lista de palavras
words_in_tweet = [tweet.lower().split() for tweet in tweets]
listNouns = ['NOUN']
nouns_in_tweet = []
#elimina pontuações e extrai apenas os substantivos
for tweet in words_in_tweet:
doc = sp(str(tweet))
tweet_no_punct = [ token.orth_ for token in doc if not token.is_punct and token.pos_ in listNouns]
nouns_in_tweet.append(tweet_no_punct)
counter+=1
out.update(progress('Extraindo substantivos...', counter, leitorMAX))
return nouns_in_tweet
def count_words_frequency(wordsList):
bagOfWords = list(itertools.chain(*wordsList))
#Create counter
words_frequency = collections.Counter(bagOfWords)
return words_frequency
def plot_popular_words(wordsList, itensAmount, title):
popular_words = pd.DataFrame(wordsList.most_common(itensAmount), columns=['words', 'frequency'])
fig, ax = plt.subplots(figsize=(8, 8))
# Plot horizontal bar graph
popular_words.sort_values(by='frequency').plot.barh(x='words',
y='frequency',
ax=ax,
color="purple")
ax.set_title(title)
plt.show()
def extract_named_entities(tweetsList, out):
counter = 0
leitorMAX = int(len(tweetsList))
namedEntities = []
for tweet in tweetsList:
doc = sp(str(tweet))
tweetEntities = [str(entity) for entity in doc.ents]
namedEntities.append(tweetEntities)
counter+=1
out.update(progress('Identificando entidades...', counter, leitorMAX))
return namedEntities
def process_tweet_querie(term, startDay, finalDay, podium):
API_KEY = 'BP7tNDh2UPbELpR1sQyiRtY6G'
API_SECRET_KEY = '<KEY>'
token_acesso = '<KEY>'
token_acesso_segredo = '<KEY>'
BEARER_TOKEN = '<PASSWORD>'
counter = 1
leitorMAX = 8
out = display(progress('Conectando ao Twitter', counter, leitorMAX), display_id=True)
outTask = display(progress('...', 0, 1), display_id=True)
twitterConn = login_Twitter(API_KEY, API_SECRET_KEY, token_acesso, token_acesso_segredo)
counter+=1
out.update(progress('Coleta de tweets...', counter, leitorMAX))
tweetsList = search_tweets(twitterConn, term, startDay, finalDay, 2000)
title = (f'Pesquisa de Tweets por Termos\n'
f'Termos usados: {term}\n'
f'Data de início: {startDay} | Data final: {finalDay}\n'
f'{len(tweetsList)} tweets foram coletados\n'
f'As {podium} mais usadas no período')
counter+=1
out.update(progress('Extração de Substantivos', counter, leitorMAX))
nounsInTweets = extract_nouns_list(tweetsList, outTask)
counter+=1
out.update(progress('Cálculo de Frequência de Substantivos', counter, leitorMAX))
nounsFrequency = count_words_frequency(nounsInTweets)
counter+=1
out.update(progress('Identificação de entidades', counter, leitorMAX))
entitiesInTweets = extract_named_entities(tweetsList, outTask)
counter+=1
out.update(progress('Cálculo de Frequência de Entidades', counter, leitorMAX))
entitiesFrequency = count_words_frequency(entitiesInTweets)
title = (f'Pesquisa de Tweets por Termos\n'
f'Termos usados: {term}\n'
f'Data de início: {startDay} | Data final: {finalDay}\n'
f'{len(tweetsList)} tweets foram coletados\n'
f'Os {podium} termos mais usadas no período')
counter+=1
out.update(progress('Plotagem de frequência de substantivos', counter, leitorMAX))
plot_popular_words(nounsFrequency, podium, title)
title = (f'Pesquisa de Tweets por Termos\n'
f'Termos usados: {term}\n'
f'Data de início: {startDay} | Data final: {finalDay}\n'
f'{len(tweetsList)} tweets foram coletados\n'
f'As {podium} entidades mais mencionadas no período')
counter+=1
out.update(progress('Plotagem de frequência de entidades', counter, leitorMAX))
plot_popular_words(entitiesFrequency, podium, title)
#função para desenhar a barra de progresso
def progress(message, value, max=100):
progresso = ((value * 100) / max)
return HTML("""
<span> {message} </span>
<br>
<progress
value='{value}'
max='{max}',
style='width: 99%'
>
{value}
</progress>
<span> {value} | {max} :: {progresso} % :: </span>
""".format(value=value, max=max, progresso=progresso, message=message))
| 3.125 | 3 |
tests/api/test-disp.py | arienchen/pytibrv | 12 | 12769933 |
from pytibrv.status import *
from pytibrv.api import *
from pytibrv.disp import *
import unittest
class DispatcherTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
status = tibrv_Open()
assert status == TIBRV_OK, tibrvStatus_GetText(status)
@classmethod
def tearDownClass(cls):
tibrv_Close()
def test_create(self):
que = TIBRV_DEFAULT_QUEUE
status, disp = tibrvDispatcher_Create(que, 1.0)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status = tibrvDispatcher_SetName(disp, 'TEST')
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
status, name = tibrvDispatcher_GetName(disp)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
self.assertEqual('TEST', name)
status = tibrvDispatcher_Destroy(disp)
self.assertEqual(TIBRV_OK, status, tibrvStatus_GetText(status))
if __name__ == "__main__" :
unittest.main(verbosity=2)
| 2.40625 | 2 |
duke-cs671-fall21-coupon-recommendation/outputs/rules/RF/17_features/numtrees_12/rule_7.py | apcarrik/kaggle | 0 | 12769934 | def findDecision(obj): #obj[0]: Passanger, obj[1]: Weather, obj[2]: Time, obj[3]: Coupon, obj[4]: Coupon_validity, obj[5]: Gender, obj[6]: Age, obj[7]: Maritalstatus, obj[8]: Children, obj[9]: Education, obj[10]: Occupation, obj[11]: Income, obj[12]: Bar, obj[13]: Coffeehouse, obj[14]: Restaurant20to50, obj[15]: Direction_same, obj[16]: Distance
# {"feature": "Occupation", "instances": 85, "metric_value": 0.9879, "depth": 1}
if obj[10]<=19:
# {"feature": "Education", "instances": 80, "metric_value": 0.971, "depth": 2}
if obj[9]<=2:
# {"feature": "Restaurant20to50", "instances": 60, "metric_value": 0.9183, "depth": 3}
if obj[14]<=2.0:
# {"feature": "Maritalstatus", "instances": 56, "metric_value": 0.9403, "depth": 4}
if obj[7]>0:
# {"feature": "Age", "instances": 37, "metric_value": 0.9868, "depth": 5}
if obj[6]<=4:
# {"feature": "Time", "instances": 33, "metric_value": 0.9993, "depth": 6}
if obj[2]<=3:
# {"feature": "Bar", "instances": 25, "metric_value": 0.9896, "depth": 7}
if obj[12]>0.0:
# {"feature": "Coupon", "instances": 14, "metric_value": 0.8631, "depth": 8}
if obj[3]>0:
# {"feature": "Income", "instances": 13, "metric_value": 0.7793, "depth": 9}
if obj[11]<=3:
# {"feature": "Distance", "instances": 10, "metric_value": 0.8813, "depth": 10}
if obj[16]<=2:
# {"feature": "Coffeehouse", "instances": 8, "metric_value": 0.9544, "depth": 11}
if obj[13]>1.0:
# {"feature": "Coupon_validity", "instances": 5, "metric_value": 0.7219, "depth": 12}
if obj[4]>0:
return 'False'
elif obj[4]<=0:
# {"feature": "Passanger", "instances": 2, "metric_value": 1.0, "depth": 13}
if obj[0]<=1:
# {"feature": "Weather", "instances": 2, "metric_value": 1.0, "depth": 14}
if obj[1]<=0:
# {"feature": "Gender", "instances": 2, "metric_value": 1.0, "depth": 15}
if obj[5]<=0:
# {"feature": "Children", "instances": 2, "metric_value": 1.0, "depth": 16}
if obj[8]<=0:
# {"feature": "Direction_same", "instances": 2, "metric_value": 1.0, "depth": 17}
if obj[15]<=0:
return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
else: return 'True'
elif obj[13]<=1.0:
# {"feature": "Gender", "instances": 3, "metric_value": 0.9183, "depth": 12}
if obj[5]<=0:
return 'True'
elif obj[5]>0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[16]>2:
return 'False'
else: return 'False'
elif obj[11]>3:
return 'False'
else: return 'False'
elif obj[3]<=0:
return 'True'
else: return 'True'
elif obj[12]<=0.0:
# {"feature": "Income", "instances": 11, "metric_value": 0.9457, "depth": 8}
if obj[11]<=6:
# {"feature": "Distance", "instances": 6, "metric_value": 0.9183, "depth": 9}
if obj[16]<=1:
return 'False'
elif obj[16]>1:
# {"feature": "Coupon", "instances": 3, "metric_value": 0.9183, "depth": 10}
if obj[3]>0:
return 'True'
elif obj[3]<=0:
return 'False'
else: return 'False'
else: return 'True'
elif obj[11]>6:
return 'True'
else: return 'True'
else: return 'True'
elif obj[2]>3:
# {"feature": "Coffeehouse", "instances": 8, "metric_value": 0.8113, "depth": 7}
if obj[13]>0.0:
return 'True'
elif obj[13]<=0.0:
# {"feature": "Passanger", "instances": 3, "metric_value": 0.9183, "depth": 8}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
return 'True'
else: return 'True'
else: return 'False'
else: return 'True'
elif obj[6]>4:
return 'True'
else: return 'True'
elif obj[7]<=0:
# {"feature": "Gender", "instances": 19, "metric_value": 0.7425, "depth": 5}
if obj[5]>0:
# {"feature": "Coffeehouse", "instances": 11, "metric_value": 0.9457, "depth": 6}
if obj[13]<=1.0:
# {"feature": "Distance", "instances": 6, "metric_value": 0.9183, "depth": 7}
if obj[16]<=2:
# {"feature": "Passanger", "instances": 5, "metric_value": 0.7219, "depth": 8}
if obj[0]<=1:
return 'False'
elif obj[0]>1:
# {"feature": "Time", "instances": 2, "metric_value": 1.0, "depth": 9}
if obj[2]>2:
return 'False'
elif obj[2]<=2:
return 'True'
else: return 'True'
else: return 'False'
elif obj[16]>2:
return 'True'
else: return 'True'
elif obj[13]>1.0:
return 'True'
else: return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
else: return 'True'
elif obj[14]>2.0:
return 'True'
else: return 'True'
elif obj[9]>2:
# {"feature": "Coffeehouse", "instances": 20, "metric_value": 0.971, "depth": 3}
if obj[13]>1.0:
# {"feature": "Gender", "instances": 15, "metric_value": 0.9968, "depth": 4}
if obj[5]>0:
# {"feature": "Income", "instances": 11, "metric_value": 0.9457, "depth": 5}
if obj[11]<=5:
# {"feature": "Direction_same", "instances": 9, "metric_value": 0.7642, "depth": 6}
if obj[15]<=0:
# {"feature": "Distance", "instances": 8, "metric_value": 0.5436, "depth": 7}
if obj[16]>1:
return 'False'
elif obj[16]<=1:
return 'True'
else: return 'True'
elif obj[15]>0:
return 'True'
else: return 'True'
elif obj[11]>5:
return 'True'
else: return 'True'
elif obj[5]<=0:
return 'True'
else: return 'True'
elif obj[13]<=1.0:
return 'False'
else: return 'False'
else: return 'False'
elif obj[10]>19:
return 'False'
else: return 'False'
| 2.609375 | 3 |
olednaytto/main.py | divergentti/Micropython | 0 | 12769935 | <reponame>divergentti/Micropython
"""
Scripti lukee sensoria ja näyttää kolme eri sivua tietoja sekä sensorista että ESP32:sta ja lähettää arvot
mqtt-brokerille.
Ulkoiset kirjastot:
SPI OLED näytölle: sh1160-kirjastoa, jonka voit ladata täältä https://github.com/robert-hh/SH1106
SPI kytkentä oletukset:
SSD1306 NodeMCU-32S(ESP32)
GND ----> GND
VCC ----> 3v3 (3.3V)
D0 ----> GPIO 18 SCK (SPI Clock)
D1 ----> GPIO 23 MOSI (sama kuin SDA)
RES ----> GPIO 17 Reset
DC ----> GPIO 16 Data/Command select
CS ----> GPIO 5 Chip Select
CCS811 sensorille: https://github.com/Notthemarsian/CCS811/blob/master/CCS811.py
I2C kytkentä oletukset:
SCL = 22
SDA = 21
CCS811 muista kytkeä nWake -> GND!
Asynkroninen MQTT: https://github.com/peterhinch/micropython-mqtt/blob/master/mqtt_as/README.md
21.11.2020 <NAME>
22.11.2020 Lisätty DHT22 (AM2302) sensorin luku lämpötilalle ja kosteudelle
24.11.2020 Lisätty näytön kääntö, paikallisajan (dst) laskenta ja himmennys
29.11.2020 Lisätty sensorille lähetettävä tieto kosteudesta ja lämpötilasta, jotka parantavat tarkkuutta sekä
muutettu kaikki lähetettävät arvot käyttämään keskiarvoja, jolloin anturin satunnaiset heitot häviävät.
2.12.2020 Tiputettu prosessorin nopeus 80 MHz lämmöntuoton vähentämiseksi
"""
from machine import I2C, SPI, Pin
import sh1106
import ccs811
import time
import uasyncio as asyncio
import utime
import esp32
from mqtt_as import MQTTClient
import network
import gc
from mqtt_as import config
import machine
import dht
# tuodaan parametrit tiedostosta parametrit.py
from parametrit import CLIENT_ID, MQTT_SERVERI, MQTT_PORTTI, MQTT_KAYTTAJA, \
MQTT_SALASANA, SSID1, SALASANA1, SSID2, SALASANA2, AIHE_CO2, AIHE_TVOC, \
DHT22_KOSTEUS_KORJAUSKERROIN, DHT22_LAMPO_KORJAUSKERROIN, DHT22_KOSTEUS, DHT22_LAMPO
kaytettava_salasana = None
if network.WLAN(network.STA_IF).config('essid') == SSID1:
kaytettava_salasana = SALASANA1
elif network.WLAN(network.STA_IF).config('essid') == SSID2:
kaytettava_salasana = SALASANA2
config['server'] = MQTT_SERVERI
config['ssid'] = network.WLAN(network.STA_IF).config('essid')
config['wifi_pw'] = kaytettava_salasana
config['user'] = MQTT_KAYTTAJA
config['password'] = <PASSWORD>
config['port'] = MQTT_PORTTI
config['client_id'] = CLIENT_ID
client = MQTTClient(config)
edellinen_mqtt_klo = utime.time()
aloitusaika = utime.time()
anturilukuvirheita = 0
def restart_and_reconnect():
aika = ratkaise_aika()
print('%s: Ongelmia. Boottaillaan 1s kuluttua.' % aika)
time.sleep(1)
machine.reset()
# resetoidaan
class SPInaytonohjain:
def __init__(self, res=17, dc=16, cs=5, sck=18, mosi=23, leveys=16, rivit=6, lpikselit=128, kpikselit=64):
self.rivit = []
self.nayttotekstit = []
self.aika = 5 # oletusnäyttöaika
self.rivi = 1
""" Muodostetaan näytönohjaukseen tarvittavat objektit """
# SPI-kytkennan pinnit
self.res = Pin(res) # reset
self.dc = Pin(dc) # data
self.cs = Pin(cs) # chip select
# SPI-objektin luonti, sck = d0, mosi = SDA
self.spi = SPI(2, baudrate=115200, sck=Pin(sck), mosi=Pin(mosi))
# naytto-objektin luonti
self.nayttoleveys = leveys # merkkiä
self.nayttorivit = rivit # riviä
self.pikselit_leveys = lpikselit # pikseliä
self.pikselit_korkeus = kpikselit
self.naytto = sh1106.SH1106_SPI(self.pikselit_leveys, self.pikselit_korkeus, self.spi, self.dc,
self.res, self.cs)
self.naytto.poweron()
self.naytto.init_display()
self.kaanteinen = False
async def pitka_teksti_nayttoon(self, teksti, aika, rivi=1):
self.aika = aika
self.rivi = rivi
self.nayttotekstit.clear()
self.rivit.clear()
""" Teksti (str) ja aika (int) miten pitkään tekstiä näytetään """
self.nayttotekstit = [teksti[y-self.nayttoleveys:y] for y in range(self.nayttoleveys,
len(teksti)+self.nayttoleveys, self.nayttoleveys)]
for y in range(len(self.nayttotekstit)):
self.rivit.append(self.nayttotekstit[y])
if len(self.rivit) > self.nayttorivit:
sivuja = len(self.nayttotekstit) // self.nayttorivit
else:
sivuja = 1
if sivuja == 1:
for z in range(0, len(self.rivit)):
self.naytto.text(self.rivit[z], 0, self.rivi + z * 10, 1)
async def teksti_riville(self, teksti, rivi, aika):
self.aika = aika
""" Teksti (str), rivit (int) ja aika (int) miten pitkään tekstiä näytetään """
if len(teksti) > self.nayttoleveys:
self.naytto.text('<NAME>', 0, 1 + rivi * 10, 1)
elif len(teksti) <= self.nayttoleveys:
self.naytto.text(teksti, 0, 1 + rivi * 10, 1)
async def aktivoi_naytto(self):
self.naytto.sleep(False)
self.naytto.show()
await asyncio.sleep(self.aika)
self.naytto.sleep(True)
self.naytto.init_display()
async def kontrasti(self, kontrasti=255):
if kontrasti > 1 or kontrasti < 255:
self.naytto.contrast(kontrasti)
async def kaanteinen_vari(self, kaanteinen=False):
self.kaanteinen = kaanteinen
self.naytto.invert(kaanteinen)
async def kaanna_180_astetta(self, kaanna=False):
self.naytto.rotate(kaanna)
async def piirra_kehys(self):
if self.kaanteinen is False:
self.naytto.framebuf.rect(1, 1, self.pikselit_leveys-1, self.pikselit_korkeus-1, 0xffff)
else:
self.naytto.framebuf.rect(1, 1, self.pikselit_leveys - 1, self.pikselit_korkeus - 1, 0x0000)
async def piirra_alleviivaus(self, rivi, leveys):
rivikorkeus = self.pikselit_korkeus / self.nayttorivit
alkux = 1
alkuy = 8 + (int(rivikorkeus * rivi))
merkkileveys = int(8 * leveys)
if self.kaanteinen is False:
self.naytto.framebuf.hline(alkux, alkuy, merkkileveys, 0xffff)
else:
self.naytto.framebuf.hline(alkux, alkuy, merkkileveys, 0x0000)
async def resetoi_naytto(self):
self.naytto.reset()
class KaasuSensori:
def __init__(self, i2cvayla=0, scl=22, sda=21, taajuus=400000, osoite=90):
self.i2c = I2C(i2cvayla, scl=Pin(scl), sda=Pin(sda), freq=taajuus)
self.laiteosoite = osoite
self.sensori = ccs811.CCS811(self.i2c)
self.eCO2 = 0
self.tVOC = 0
self.eCO2_keskiarvo = 0
self.eCO2_arvoja = 0
self.tVOC_keskiarvo = 0
self.tVOC_arvoja = 0
self.luettu_aika = utime.time()
async def lue_arvot(self):
while True:
if self.sensori.data_ready():
self.eCO2 = self.sensori.eCO2
self.tVOC = self.sensori.tVOC
self.luettu_aika = utime.time()
await asyncio.sleep_ms(1000)
async def laheta_lampo_ja_kosteus_korjaus(self, lampoin, kosteusin):
if (float(lampoin) > -40) and (float(lampoin) < 150) and (float(kosteusin) > 0) and (float(kosteusin) < 101):
self.sensori.put_envdata(float(kosteusin), float(lampoin))
class LampojaKosteus:
def __init__(self, pinni=4, lukuvali=2):
self.pinni = pinni
self.lukuvali = lukuvali
self.lampo = None
self.kosteus = None
self.anturi = dht.DHT22(Pin(self.pinni))
self.lampo_keskiarvo = 0
self.kosteus_keskiarvo = 0
async def lue_arvot(self):
global anturilukuvirheita
while True:
try:
self.anturi.measure()
except OSError as e:
print("Anturilukuvirhe %s", e)
self.lampo = None
self.kosteus = None
anturilukuvirheita += 1
if anturilukuvirheita > 50:
restart_and_reconnect()
if (self.anturi.temperature() > -40) and (self.anturi.temperature() < 150):
self.lampo = '{:.1f}'.format(self.anturi.temperature() * DHT22_LAMPO_KORJAUSKERROIN)
if (self.anturi.humidity() > 0) and (self.anturi.humidity() < 101):
self.kosteus = '{:.1f}'.format(self.anturi.humidity() * DHT22_KOSTEUS_KORJAUSKERROIN)
await asyncio.sleep(self.lukuvali)
def ratkaise_aika():
(vuosi, kuukausi, kkpaiva, tunti, minuutti, sekunti, viikonpva, vuosipaiva) = utime.localtime()
""" Simppeli DST """
kesa_maalis = utime.mktime((vuosi, 3, (14 - (int(5 * vuosi / 4 + 1)) % 7), 1, 0, 0, 0, 0, 0))
talvi_marras = utime.mktime((vuosi, 10, (7 - (int(5 * vuosi / 4 + 1)) % 7), 1, 0, 0, 0, 0, 0))
if utime.mktime(utime.localtime()) < kesa_maalis:
dst = utime.localtime(utime.mktime(utime.localtime()) + 10800)
elif utime.mktime(utime.localtime()) < talvi_marras:
dst = utime.localtime(utime.mktime(utime.localtime()) + 7200)
else:
dst = utime.localtime(utime.mktime(utime.localtime()) + 7200)
(vuosi, kuukausi, kkpaiva, tunti, minuutti, sekunti, viikonpva, vuosipaiva) = dst
paiva = "%s.%s.%s" % (kkpaiva, kuukausi, vuosi)
kello = "%s:%s:%s" % ("{:02d}".format(tunti), "{:02d}".format(minuutti), "{:02d}".format(sekunti))
return paiva, kello
naytin = SPInaytonohjain()
kaasusensori = KaasuSensori()
tempjarh = LampojaKosteus()
async def kerro_tilannetta():
while True:
# print("RSSI %s" % network.WLAN(network.STA_IF).status('rssi'), end=",")
if tempjarh.lampo is not None:
print('Lampo: %s C' % tempjarh.lampo)
if tempjarh.kosteus is not None:
print('Kosteus: %s %%' % tempjarh.kosteus)
await asyncio.sleep(1)
async def laske_keskiarvot():
""" Luetaan 20 arvoa keskiarvon laskemiseksi. Tämä vähentää anturiheittoja. """
eco2_keskiarvot = []
tvoc_keskiarvot = []
lampo_keskiarvot = []
kosteus_keskiarvot = []
while True:
if kaasusensori.eCO2 is not None:
eco2_keskiarvot.append(kaasusensori.eCO2)
kaasusensori.eCO2_keskiarvo = (sum(eco2_keskiarvot) / len(eco2_keskiarvot))
kaasusensori.eCO2_arvoja = len(eco2_keskiarvot)
# Luetaan 20 arvoa ja poistetaan vanhin
if len(eco2_keskiarvot) == 20:
eco2_keskiarvot.pop(0)
# Tänne ei pitäisi tulla koskaan
elif len(eco2_keskiarvot) > 20:
eco2_keskiarvot.clear()
if kaasusensori.tVOC is not None:
tvoc_keskiarvot.append(kaasusensori.tVOC)
kaasusensori.tVOC_keskiarvo = (sum(tvoc_keskiarvot) / len(tvoc_keskiarvot))
kaasusensori.tVOC_arvoja = len(tvoc_keskiarvot)
if len(tvoc_keskiarvot) == 20:
tvoc_keskiarvot.pop(0)
elif len(tvoc_keskiarvot) > 20:
tvoc_keskiarvot.clear()
if tempjarh.lampo is not None:
lampo_keskiarvot.append(float(tempjarh.lampo))
tempjarh.lampo_keskiarvo = sum(lampo_keskiarvot) / len(lampo_keskiarvot)
if len(lampo_keskiarvot) == 20:
lampo_keskiarvot.pop(0)
elif len(lampo_keskiarvot) > 20:
lampo_keskiarvot.clear()
if tempjarh.kosteus is not None:
kosteus_keskiarvot.append(float(tempjarh.kosteus))
tempjarh.kosteus_keskiarvo = sum(kosteus_keskiarvot) / len(kosteus_keskiarvot)
if len(kosteus_keskiarvot) == 20:
kosteus_keskiarvot.pop(0)
elif len(kosteus_keskiarvot) > 20:
kosteus_keskiarvot.clear()
await asyncio.sleep(1)
async def sivu_1():
await naytin.teksti_riville("PVM: %s" % ratkaise_aika()[0], 0, 5)
await naytin.teksti_riville("KLO: %s" % ratkaise_aika()[1], 1, 5)
await naytin.piirra_alleviivaus(1, 20)
await naytin.teksti_riville("eCO2: %s ppm" % kaasusensori.eCO2, 2, 5)
# Raja-arvot ovat yleisiä CO2:n haitallisuuden arvoja
if kaasusensori.eCO2 > 1200:
await naytin.kaanteinen_vari(True)
else:
await naytin.kaanteinen_vari(False)
await naytin.teksti_riville("tVOC: %s ppb" % kaasusensori.tVOC, 3, 5)
if kaasusensori.tVOC > 100:
await naytin.kaanteinen_vari(True)
else:
await naytin.kaanteinen_vari(False)
if tempjarh.lampo is not None:
await naytin.teksti_riville("Temp: %s C" % tempjarh.lampo, 4, 5)
if tempjarh.kosteus is not None:
await naytin.teksti_riville("Rh: %s %%" % tempjarh.kosteus, 5, 5)
await naytin.kaanna_180_astetta(True)
if (ratkaise_aika()[1] > '20:00:00') and (ratkaise_aika()[1] < '08:00:00'):
await naytin.kontrasti(2)
else:
await naytin.kontrasti(100)
await naytin.aktivoi_naytto()
# await naytin.piirra_alleviivaus(3, 7)
await asyncio.sleep_ms(100)
async def sivu_2():
await naytin.teksti_riville("KESKIARVOT", 0, 5)
await naytin.piirra_alleviivaus(0, 10)
if kaasusensori.eCO2_keskiarvo > 1200:
await naytin.kaanteinen_vari(True)
if kaasusensori.tVOC_keskiarvo > 100:
await naytin.kaanteinen_vari(True)
await naytin.teksti_riville("eCO2:{:0.1f} ppm ".format(kaasusensori.eCO2_keskiarvo), 2, 5)
await naytin.teksti_riville("tVOC:{:0.1f} ppb".format(kaasusensori.tVOC_keskiarvo), 3, 5)
await naytin.teksti_riville("Temp:{:0.1f} C".format(tempjarh.lampo_keskiarvo), 4, 5)
await naytin.teksti_riville("Rh :{:0.1f} %".format(tempjarh.kosteus_keskiarvo), 5, 5)
await naytin.kaanna_180_astetta(True)
if (ratkaise_aika()[1] > '20:00:00') and (ratkaise_aika()[1] < '08:00:00'):
await naytin.kontrasti(2)
else:
await naytin.kontrasti(100)
await naytin.aktivoi_naytto()
await asyncio.sleep_ms(100)
async def sivu_3():
""" Statussivulla näytetään yleisiä tietoja """
await naytin.teksti_riville("STATUS", 0, 5)
await naytin.piirra_alleviivaus(0, 6)
await naytin.teksti_riville("Up s.: %s" % (utime.time() - aloitusaika), 1, 5)
await naytin.teksti_riville("AP: %s" % network.WLAN(network.STA_IF).config('essid'), 2, 5)
await naytin.teksti_riville("rssi: %s" % network.WLAN(network.STA_IF).status('rssi'), 3, 5)
await naytin.teksti_riville("Memfree: %s" % gc.mem_free(), 4, 5)
await naytin.teksti_riville("Hall: %s" % esp32.hall_sensor(), 5, 5)
await naytin.kaanna_180_astetta(True)
# Himmennetään näyttöä yöksi
if (ratkaise_aika()[1] > '20:00:00') and (ratkaise_aika()[1] < '08:00:00'):
await naytin.kontrasti(2)
else:
await naytin.kontrasti(100)
await naytin.aktivoi_naytto()
await asyncio.sleep_ms(100)
async def mqtt_raportoi():
""" Raportoidaan tiedot mqtt-brokerille ja asetetaan samalla ccs811-sensorille uudet lämpö ja kosteus """
global edellinen_mqtt_klo
n = 0
while True:
await asyncio.sleep(5)
# print('mqtt-publish', n)
await client.publish('result', '{}'.format(n), qos=1)
n += 1
if (kaasusensori.eCO2_keskiarvo > 0) and (kaasusensori.tVOC_keskiarvo > 0) and \
(utime.time() - edellinen_mqtt_klo) > 60:
try:
await client.publish(AIHE_CO2, str(kaasusensori.eCO2_keskiarvo), retain=False, qos=0)
await client.publish(AIHE_TVOC, str(kaasusensori.tVOC_keskiarvo), retain=False, qos=0)
await client.publish(DHT22_LAMPO, str(tempjarh.lampo_keskiarvo), retain=False, qos=0)
await client.publish(DHT22_KOSTEUS, str(tempjarh.kosteus_keskiarvo), retain=False, qos=0)
await kaasusensori.laheta_lampo_ja_kosteus_korjaus(tempjarh.kosteus_keskiarvo, tempjarh.lampo_keskiarvo)
edellinen_mqtt_klo = utime.time()
except OSError as e:
await naytin.kaanteinen_vari(True)
await naytin.pitka_teksti_nayttoon("Virhe %s:" % e, 5)
await naytin.aktivoi_naytto()
async def main():
MQTTClient.DEBUG = False
await client.connect()
# Aktivoi seuraava rivi jos haluat nähdä taustatoimintoja
# asyncio.create_task(kerro_tilannetta())
asyncio.create_task(kaasusensori.lue_arvot())
asyncio.create_task(tempjarh.lue_arvot())
asyncio.create_task(laske_keskiarvot())
asyncio.create_task(mqtt_raportoi())
# ESP32 oletusnopeus on 160 MHZ, lasketaan CPU lämmöntuoton vuoksi
machine.freq(80000000)
while True:
await sivu_1()
await sivu_2()
await sivu_3()
gc.collect()
asyncio.run(main())
| 1.734375 | 2 |
PyGems Office Utilities.py | Mamuntheprogrammer/PyGems-Office-Utilities | 1 | 12769936 | <gh_stars>1-10
import re
from tkinter import *
import tkinter as tk
from tkinter.font import Font
import webbrowser
from tkinter import ttk
from tkinter import filedialog,messagebox
from PyPDF2 import PdfFileMerger,PdfFileReader, PdfFileWriter
import glob,string,os,sys,collections,time
import pandas as pd
from pandas import ExcelWriter
from random import *
import random
from sys import exit
x=''
if not sys.warnoptions:
import warnings
warnings.simplefilter("ignore")
#main inherit classes
#----------------------------------------------------------------
win =tk.Tk()
main_menu=tk.Menu(win)
#-----------------main gui title-----------------
win.title("PyGems Office Utilites")
program_directory=sys.path[0]
win.iconphoto(True, PhotoImage(file=os.path.join(program_directory, "ficon.png")))
#TABS SIZES
style = ttk.Style()
settings = {"TNotebook.Tab": {"configure": {"padding": [28,10],
"background": "#fdd57e","font" : ('URW Gothic L', '11', 'bold') },
"TNotebook": {"configure": {"tabmargins": [1, 1, 1, 1] } },
"map": {"background": [("selected", "#C70039"), ("active", "#fc9292")],
"foreground": [("selected", "#ffffff"), ("active", "#000000")] } } }
style.theme_create("mi_estilo", parent="alt", settings=settings)
style.theme_use("mi_estilo")
tab_control = ttk.Notebook(win)
excel_m = ttk.Frame(tab_control)
excel_s = ttk.Frame(tab_control)
pdf_m = ttk.Frame(tab_control)
pdf_s = ttk.Frame(tab_control)
rename = ttk.Frame(tab_control)
tab_control.add(excel_m, text='Excel Merger')
tab_control.add(excel_s, text='Excel Spliter')
tab_control.add(pdf_m, text='Pdf Merger')
tab_control.add(pdf_s, text='Pdf Spliter')
tab_control.add(rename, text='File Renamer')
#---------Button for select directory-------------
statusbar =Label(win, text="Click here to visit : pygems.com ",
bd=1,
relief=SUNKEN,
bg="#37474F",
fg='#fcf9ec',
height=2,
font="Times 13",
cursor="hand2"
)
statusbar.bind("<Button-1>", lambda e: opnlink("https://pygems.com/"))
statusbar.pack(side=BOTTOM, fill=X)
###### Name conflict ###########
def name_merge():
counter = 1
filename = "Merged-{}.pdf"
b=True
while b:
if os.path.isfile(filename.format(counter)):
counter += 1
else:
filename = filename.format(counter)
b=False
return filename
def name_split():
counter = 1
filename = "Splited-{}.pdf"
b=True
while b:
if os.path.isfile(filename.format(counter)):
counter += 1
else:
filename = filename.format(counter)
b=False
return filename
def name_merge_e():
counter = 1
filename = "Merged-{}.xlsx"
b=True
while b:
if os.path.isfile(filename.format(counter)):
counter += 1
else:
filename = filename.format(counter)
b=False
return filename
def name_split_e():
counter = 1
filename = "Splited-{}.xlsx"
b=True
while b:
if os.path.isfile(filename.format(counter)):
counter += 1
else:
filename = filename.format(counter)
b=False
return filename
###############################################################
def opnlink(url):
webbrowser.open_new(url)
#--------------------- wait Screen ------------------
#----------- Excel Merger Start ------------------
#----------- Excel Merger Start ------------------
def fileid():
RNUM =randint(1,10000)
return str(RNUM)
def daction():
entry_d.delete(0, 'end')
daction.folder_selected = filedialog.askdirectory(initialdir="/",title='Please select a directory')
if not daction.folder_selected:
daction.folder_selected=entry_d_var.get()
else:
entry_d.insert(0,daction.folder_selected)
try:
pp=os.chdir(str(daction.folder_selected))
except:
messagebox.showerror("Error", "Files are Read Only Or Wrong Directory !\n Visit : pygems.com For Tutorial")
def mrgall():
try:
btn_txt.set("Merging...")
button_m.configure(state=DISABLED)
messagebox.showinfo("Excel Merger","Before Click OK ! \n Must Close All Opened Excel Files of Working Directory")
d=entry_d_var.get()
os.chdir(d)
x=0
name=name_merge_e()
writer = ExcelWriter(str(name))
for filename in glob.glob("*.xlsx"):
excel_file = pd.ExcelFile(filename)
(_, f_name) = os.path.split(filename)
(f_short_name, _) = os.path.splitext(f_name)
for sheet_name in excel_file.sheet_names:
df_excel = pd.read_excel(filename, sheet_name=sheet_name)
df_excel.to_excel(writer, sheet_name+"- "+str(x+1), index=False)
x=x+1
writer.save()
messagebox.showinfo("Information","Merge Complete")
btn_txt.set("Merge")
# progbar.destroy()
path=os.getcwd()
button_m.configure(state=NORMAL)
webbrowser.open(path)
except:
messagebox.showerror("Error", "Files are Read Only Or Wrong Directory !\n Visit : pygems.com For Tutorial")
btn_txt.set("Merge")
button_m.configure(state=NORMAL)
def defaultmrg():
try:
btn_txt.set("Merging...")
button_m.configure(state=DISABLED)
messagebox.showinfo("Excel Merger","Before Click OK ! \n Must Close All Opened Excel Files of Working Directory")
# pattern ='*.xlsx'
# xllis=glob.glob(pattern)
d=entry_d_var.get()
os.chdir(d)
pa=selct_typ1.get()
name=name_merge_e()
if pa==1:
file_identifier = "*.xlsx"
all_data = pd.DataFrame()
for f in glob.glob(d + "/*" + file_identifier):
df = pd.read_excel(f,header=None)
all_data = all_data.append(df,ignore_index=True,sort=False)
writer = pd.ExcelWriter(str(name), engine='xlsxwriter')
all_data.to_excel(writer, sheet_name='Sheet1',index=False,header=None)
writer.save()
messagebox.showinfo("Information","Merge Complete")
btn_txt.set("Merge")
path=os.getcwd()
button_m.configure(state=NORMAL)
webbrowser.open(path)
elif pa==2:
file_identifier = "*.xlsx"
all_data = pd.DataFrame()
for f in glob.glob(d + "/*" + file_identifier):
df = pd.read_excel(f)
all_data = all_data.append(df,ignore_index=True,sort=False)
name=name_merge_e()
writer = pd.ExcelWriter(str(name), engine='xlsxwriter')
all_data.to_excel(writer, sheet_name='Sheet1',index=False)
writer.save()
messagebox.showinfo("Information","Merge Complete")
btn_txt.set("Merge")
path=os.getcwd()
button_m.configure(state=NORMAL)
except:
messagebox.showerror("Error", "Files are Read Only Or Wrong Directory !\n Visit : pygems.com For Tutorial")
btn_txt.set("Merge")
button_m.configure(state=NORMAL)
def mainf():
r=selct_typ1.get()
if r==1 or r==2:
defaultmrg()
else:
mrgall()
def daction():
entry_d.delete(0, 'end')
daction.folder_selected = filedialog.askdirectory(initialdir="/",title='Please select a directory')
if not daction.folder_selected:
daction.folder_selected=entry_d_var.get()
else:
entry_d.insert(0,daction.folder_selected)
try:
pp=os.chdir(str(daction.folder_selected))
except:
messagebox.showerror("Error", "Files are Read Only Or Wrong Directory !\n Visit : pygems.com For Tutorial")
def empt():
button_m.configure(state=DISABLED)
def pygems():
messagebox.showinfo("Excel Merger","Merge Complete")
#---------sub Frame------------------------
excel_mframe=Frame(excel_m)
excel_sframe=Frame(excel_s)
pdf_mframe=Frame(pdf_m)
pdf_sframe=Frame(pdf_s)
file_rename=Frame(rename)
excel_mframe.pack()
excel_sframe.pack()
pdf_mframe.pack()
pdf_sframe.pack()
file_rename.pack()
#-------EXCEL MERGER --------
rframe=Frame(excel_m)
dframe=Frame(excel_m)
mframe=Frame(excel_m)
pframe=Frame(excel_m)
label_1=Label(excel_mframe,text="PyGems Excel Merger",
bd=0,
bg="#393e46",
fg='#F4511E',
font='Times 20',
width=0,
height=0
)
label_1.pack(fill=X,pady=20)
label_x=Label(pframe,text="Warning : Don't close the application if showing Not Responding, just minimize the application")
label_x.pack()
#-----------Radio Button All-----------
selct_typ1=tk.IntVar()
selct_typ1.set(1)
#radiobtn1 = ttk.Radiobutton(rframe,text="Ignore Header" ,value=1,variable=selct_typ1)
radiobtn2 = ttk.Radiobutton(rframe,text="Default" ,value=1,variable=selct_typ1)
radiobtn3 = ttk.Radiobutton(rframe,text="Same Header" ,value=2,variable=selct_typ1)
radiobtn4 = ttk.Radiobutton(rframe,text="Sheet Wise" ,value=3,variable=selct_typ1)
#------------- directory entry------------
entry_d_var = StringVar()
entry_d=Entry(dframe,width=80,textvariable=entry_d_var,bg='#dedede')
entry_d_txt = entry_d_var.get()
#------------directory Button----------
button_d=tk.Button(dframe,relief=RAISED,font=('Times 10 bold'),text='Select Folder' ,fg='#fcf9ec',bg='#132238',command=daction)
#----------merge button-------------------
btn_txt=StringVar()
button_m=tk.Button(mframe,textvariable=btn_txt,command=mainf,relief=GROOVE,font=('Times 10 bold'),width=22,fg='#fcf9ec',bg='#132238')
btn_txt.set("Merge")
#radio pack
#radiobtn1.pack(side=LEFT,padx=20)
radiobtn2.pack(side=LEFT,padx=20)
radiobtn3.pack(side=LEFT,padx=20)
radiobtn4.pack(side=LEFT,padx=20)
entry_d.pack(ipady=4,side=LEFT,pady=13)
entry_d.focus()
button_d.pack(side=LEFT,padx=10,ipady=2,pady=13)
button_m.pack(pady=20)
#frame pack
#radio button pack
rframe.pack(pady=10)
#directory entry pack
dframe.pack(padx=0)
#merge button pack
mframe.pack(pady=0)
pframe.pack(pady=5)
excel_mframe.config(bg="#D9D9D9")
dframe.config(bg="#ffffff")
rframe.config(bg="#D9D9D9")
mframe.config(bg="#D9D9D9")
###############################################################
###############################################################
###############################################################
#-------------excel spliter -------------------################
sdframe=Frame(excel_sframe)
smframe=Frame(excel_sframe)
seframe=Frame(excel_sframe)
esrframe=Frame(excel_sframe)
pframe=Frame(excel_sframe)
#----- function ------
#---------Button for select directory-------------
def sdaction():
sentry_d.delete(0, 'end')
sdaction.file_selected = filedialog.askopenfilename(initialdir="/",title='Please Select The file',filetypes=(("Excel file","*.xlsx"),("all files","*.")))
#os.chdir(chd)
try:
if not sdaction.file_selected:
sdaction.file_selected=sentry_d_var.get()
else:
sentry_d.insert(0,sdaction.file_selected)
except:
messagebox.showerror("Error", "File is Read Only Or Wrong Directory !\n Visit : pygems.com For Tutorial")
def cerrint():
messagebox.showinfo("Excel Spliter", "Split Done !")
def smfunc():
try:
def xtaddt():
xtaddt.nwin = Toplevel()
wi_gui=250
hi_gui=150
wi_scr=xtaddt.nwin.winfo_screenwidth()
hi_scr=xtaddt.nwin.winfo_screenheight()
x=(wi_scr/2)-(wi_gui/2)
y=(hi_scr/2)-(hi_gui/2)
xtaddt.nwin.geometry('%dx%d+%d+%d'%(wi_gui,hi_gui,x,y))
xtaddt.nwin.title("Row Wise Split")
l=Label(xtaddt.nwin,text="Enter Number Of Files : ")
l.pack(pady=10)
xtaddt.ed=StringVar()
eadd=Entry(xtaddt.nwin,width=40,textvariable=xtaddt.ed)
xtaddt.ed_txt=xtaddt.ed.get()
eadd.pack(padx=30,pady=10)
eadd.focus()
b=Button(xtaddt.nwin,text="OK",command=esokb)
b.pack(pady=10)
def esokb():
esokb.prc=xtaddt.ed.get()
erwsplt()
xtaddt.nwin.destroy()
def erwsplt():
try:
sbtn_txt.set("Spliting...")
sbutton_m.configure(state=DISABLED)
messagebox.showinfo("Excel Merger","Before Click OK ! \n Must Close All Opened Excel Files \n Enable Macro Once (Trust access to the VBA project object model) \n Close workbook named Book1 or Book2 if opened \n Don't close the auto opened Excel Files ")
tto=sentry_d_var.get()
(ro, f_name) = os.path.split(tto)
os.chdir(ro)
df = pd.read_excel(f_name)
total_file =int(esokb.prc)
#total_file=total_file-1
sloop_max_range = len(df)//(total_file)
nxt = sloop_max_range
fracpart = int(len(df)%total_file)
#for sloop initialization
t=0
# name=name_split_e()
for x in range(0,total_file):
df2 = pd.DataFrame()
for y in range(t,sloop_max_range):
df2=df2.append(df.iloc[[y,],:])
writer = ExcelWriter('Splited File_wise'+'-'+str(x)+'.xlsx', engine='xlsxwriter')
df2.to_excel(writer,sheet_name='merged',index=False)
writer.save()
t=sloop_max_range
sloop_max_range=nxt+t
if sloop_max_range+fracpart==len(df):
sloop_max_range=sloop_max_range+fracpart
else:
sloop_max_range=nxt+t
df2.iloc[0:0]
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showinfo("Excel Spliter","Split Complete")
except:
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showerror("Error", "File is Read Only Or Wrong Directory !\n Visit : pygems.com For Tutorial")
def eswplt():
try:
sbtn_txt.set("Spliting...")
sbutton_m.configure(state=DISABLED)
messagebox.showinfo("Excel Merger","Before Click OK ! \n Must Close All Opened Excel Files of Working Directory")
to=sentry_d_var.get()
(ro, f_name) = os.path.split(to)
os.chdir(ro)
x=0
excel_file = pd.ExcelFile(f_name)
(_, f_name) = os.path.split(f_name)
(f_short_name, _) = os.path.splitext(f_name)
for sheet_name in excel_file.sheet_names:
df_excel = pd.read_excel(f_name, sheet_name=sheet_name)
writer = ExcelWriter(sheet_name+"- "+str(x+1)+"-Splited Sheet_wise.xlsx")
df_excel.to_excel(writer, sheet_name+"-"+str(x+1), index=False)
x=x+1
writer.save()
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showinfo("Excel Spliter","Split Complete")
except:
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showerror("Error", "File is Read Only Or Wrong Directory !\n Visit : pygems.com For Tutorial")
def xtaddt2():
messagebox.showinfo("Excel Merger","Before Click OK ! \n Must Close All Opened Excel Files of Working Directory")
xtaddt2.nwin = Toplevel()
wi_gui=400
hi_gui=350
wi_scr=xtaddt2.nwin.winfo_screenwidth()
hi_scr=xtaddt2.nwin.winfo_screenheight()
x=(wi_scr/2)-(wi_gui/2)
y=(hi_scr/2)-(hi_gui/2)
xtaddt2.nwin.geometry('%dx%d+%d+%d'%(wi_gui,hi_gui,x,y))
xtaddt2.nwin.title("Value Wise Split")
ly=Label(xtaddt2.nwin,text="How many columns you want to filter ")
ly.pack(pady=10)
#-----------Radio Button All-----------
xtaddt2.selct_typ1=tk.IntVar()
xtaddt2.selct_typ1.set(1)
#radiobtn1 = ttk.Radiobutton(rframe,text="Ignore Header" ,value=1,variable=selct_typ1)
radiobtn2 = ttk.Radiobutton(xtaddt2.nwin,text="1" ,value=1,variable=xtaddt2.selct_typ1)
radiobtn3 = ttk.Radiobutton(xtaddt2.nwin,text="2" ,value=2,variable=xtaddt2.selct_typ1)
radiobtn4 = ttk.Radiobutton(xtaddt2.nwin,text="3" ,value=3,variable=xtaddt2.selct_typ1)
# xtaddt2.radiovalue=xtaddt2.selct_typ1.get()
# print("from xtaddt2")
# print(xtaddt2.radiovalue)
radiobtn2.pack(pady=5)
radiobtn3.pack(pady=5)
radiobtn4.pack(pady=5)
l=Label(xtaddt2.nwin,text="First Column Name : ")
l.pack(pady=5)
xtaddt2.ed1=StringVar()
eadd=Entry(xtaddt2.nwin,width=40,textvariable=xtaddt2.ed1)
xtaddt2.ed_txt1=xtaddt2.ed1.get()
eadd.pack(padx=30,pady=5)
eadd.focus()
l1=Label(xtaddt2.nwin,text="Second Column Name : ")
l1.pack(pady=5)
xtaddt2.ed2=StringVar()
eadd1=Entry(xtaddt2.nwin,width=40,textvariable=xtaddt2.ed2)
xtaddt2.ed_txt2=xtaddt2.ed2.get()
eadd1.pack(padx=30,pady=5)
l2=Label(xtaddt2.nwin,text="Third Column Name : ")
l2.pack(pady=5)
xtaddt2.ed3=StringVar()
eadd2=Entry(xtaddt2.nwin,width=40,textvariable=xtaddt2.ed3)
xtaddt2.ed_txt3=xtaddt2.ed3.get()
eadd2.pack(padx=30,pady=5)
b=Button(xtaddt2.nwin,text="OK",command=esokb2)
b.pack(pady=10)
def esokb2():
esokb2.r1=xtaddt2.selct_typ1.get()
esokb2.p1=xtaddt2.ed1.get()
esokb2.p2=xtaddt2.ed2.get()
esokb2.p3=xtaddt2.ed3.get()
evwplt()
xtaddt2.nwin.destroy()
def evwplt():
try:
sbtn_txt.set("Spliting...")
sbutton_m.configure(state=DISABLED)
def fone():
try:
sbtn_txt.set("Spliting...")
sbutton_m.configure(state=DISABLED)
to=sentry_d_var.get()
(ro, f_name) = os.path.split(to)
os.chdir(ro)
b=f_name
a=esokb2.p1
name=name_split_e()
df=pd.read_excel(b)
names = df[a].unique().tolist()
writer = pd.ExcelWriter(str(name), engine='xlsxwriter')
for name in names:
mydf=df[(df[a] ==name)]
mydf.to_excel(writer, sheet_name=str(name),index=False)
writer.save()
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showinfo("Excel Spliter","Split Complete")
except:
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showerror("Error", "Check columns name and close the excel file , try again ")
def ftwo():
try:
sbtn_txt.set("Spliting...")
sbutton_m.configure(state=DISABLED)
to=sentry_d_var.get()
(ro, f_name) = os.path.split(to)
os.chdir(ro)
b=f_name
a=esokb2.p1
c=esokb2.p2
name=name_split_e()
df=pd.read_excel(b)
df = df.applymap(str)
writer = pd.ExcelWriter(str(name), engine='xlsxwriter')
df2 = df.drop_duplicates(subset=[a,c])
for i in range(0,len(df2)):
one=str(df2.iloc[i][a])
two=str(df2.iloc[i][c])
t=df[(df[a] == one)&(df[c]==two)]
#t=df[(df['Depot'] ==depo_code)&(df['RSM Territory']==rsm_code)]
t.to_excel(writer, sheet_name=one+"-"+two,index=False)
writer.save()
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showinfo("Excel Spliter","Split Complete")
except:
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showerror("Error", "Check columns name and close the excel file , try again ")
def fthree():
try:
sbtn_txt.set("Spliting...")
sbutton_m.configure(state=DISABLED)
to=sentry_d_var.get()
(ro, f_name) = os.path.split(to)
os.chdir(ro)
b=f_name
a=esokb2.p1
c=esokb2.p2
d=esokb2.p3
df=pd.read_excel(b)
df = df.applymap(str)
RNUM =randint(1000,10000)
writer = pd.ExcelWriter(str(name), engine='xlsxwriter')
df2 = df.drop_duplicates(subset=[a,c,d])
for i in range(0,len(df2)):
one=str(df2.iloc[i][a])
two=str(df2.iloc[i][c])
three=str(df2.iloc[i][d])
t=df[(df[a] == one)&(df[c]==two)&(df[d]==three)]
#t=df[(df['Depot'] ==depo_code)&(df['RSM Territory']==rsm_code)]
t.to_excel(writer, sheet_name=one+"-"+two+"-"+three,index=False)
writer.save()
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showinfo("Excel Spliter","Split Complete")
except:
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showerror("Error", "Check columns name and close the excel file , try again ")
except:
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showerror("Error", "Check columns name and close the excel file , try again ")
h=int(esokb2.r1)
if h==1:
fone()
elif h==2:
ftwo()
else:
fthree()
pa=esselct_typ1.get()
if pa ==1:
eswplt()
elif pa==2:
xtaddt()
elif pa==3:
xtaddt2()
except:
sbtn_txt.set("Split")
sbutton_m.configure(state=NORMAL)
messagebox.showerror("Error", "File is Read Only Or Wrong Directory !\n Visit : pygems.com For Tutorial")
label_1=Label(excel_sframe,text="PyGems Excel Spliter",
bd=0,
bg="#393e46",
fg='#F4511E',
font='Times 20',
width=0,
height=0
)
label_1.pack(pady=20)
label_x=Label(pframe,text="Warning : Don't close the application if showing Not Responding, just minimize the application")
label_x.pack()
esselct_typ1=tk.IntVar()
esselct_typ1.set(1)
radiobtn1 = ttk.Radiobutton(esrframe,text="Sheet Wise Split",value=1,variable=esselct_typ1)
radiobtn2 = ttk.Radiobutton(esrframe,text="File Wise Split" ,value=2,variable=esselct_typ1)
radiobtn3 = ttk.Radiobutton(esrframe,text="Value Wise Split" ,value=3,variable=esselct_typ1)
#radiobtn4 = ttk.Radiobutton(esrframe,text="Random Num" ,value=4,variable=esselct_typ1)
#------------- directory entry------------
sentry_d_var = StringVar()
sentry_d=Entry(sdframe,width=80,textvariable=sentry_d_var,bg='#dedede')
sentry_d_txt = sentry_d_var.get()
#----------------entry box-----------
#------------directory Button----------
sbutton_d=tk.Button(sdframe,relief=RAISED,font=('Times 10 bold'),text='Select File' ,fg='#fcf9ec',bg='#132238',command=sdaction)
#----------merge button-------------------
sbtn_txt=StringVar()
sbutton_m=tk.Button(smframe,textvariable=sbtn_txt,command=smfunc,relief=GROOVE,font=('Times 10 bold'),width=22,fg='#fcf9ec',bg='#132238')
sbtn_txt.set("Split")
radiobtn1.pack(side=LEFT,padx=20)
radiobtn2.pack(side=LEFT,padx=20)
radiobtn3.pack(side=LEFT,padx=20)
#radiobtn3.pack(side=LEFT,padx=20)
sbutton_m.pack(pady=20)
#frame pack
esrframe.pack(pady=10)
#directory entry pack
sdframe.pack(padx=0)
#sentry pack
seframe.pack(pady=0)
#merge button pack
smframe.pack(pady=0)
pframe.pack(pady=5)
excel_sframe.config(bg="#D9D9D9")
smframe.config(bg="#D9D9D9")
sdframe.config(bg="#ffffff")
seframe.config(bg="#D9D9D9")
esrframe.config(bg="#D9D9D9")
#############################################################
#############################################################
##################### PDF MERGER ############################
prframe=Frame(pdf_m)
pdframe=Frame(pdf_m)
pmframe=Frame(pdf_m)
pddframe=Frame(pdf_m)
################ PDF MERGER ########################
def pdaction():
pentry_d.delete(0, 'end')
pfolder_selected = filedialog.askdirectory(initialdir="/",title='Please select a directory')
if not pfolder_selected:
pfolder_selected=pentry_d_var.get()
else:
pentry_d.insert(0,pfolder_selected)
try:
pp=os.chdir(str(pfolder_selected))
except:
messagebox.showerror("Error", "Empty or wrong Directory")
def pempt():
pbutton_m.configure(state=DISABLED)
def pmfunc():
try:
pfolder_selected=pentry_d_var.get()
os.chdir(pfolder_selected)
pattern ='*.pdf'
pdfs=glob.glob(pattern)
if not pdfs:
messagebox.showerror("Error", "Wrong directory or There is no Pdf file found")
pbtn_txt.set("Merge")
else:
pbtn_txt.set("Merging...")
pbutton_m.configure(state=DISABLED)
pm2=pselct_typ1.get()
if pm2==2:
pempt()
merger = PdfFileMerger()
for pdf in pdfs:
merger.append(pdf)
#for name conflict
name=name_merge()
#
merger.write(str(name))
pbutton_m.configure(state=NORMAL)
# webbrowser.open(pfolder_selected)
elif pm2==1:
pempt()
pattern ='*.pdf'
pdfs=glob.glob(pattern)
merger = PdfFileMerger()
outPdf= PdfFileWriter()
name=name_merge()
for pdf in pdfs:
b=open(pdf,'rb')
rpdf = PdfFileReader(b)
if rpdf.getNumPages() % 2 == 1:
outPdf.appendPagesFromReader(rpdf)
outPdf.addBlankPage()
else:
outPdf.appendPagesFromReader(rpdf)
outStream=open(str(name),'wb')
outPdf.write(outStream)
outStream.close()
pbutton_m.configure(state=NORMAL)
pbtn_txt.set("Merge")
ppygems()
webbrowser.open(pfolder_selected)
except:
messagebox.showerror("Error", "Enter a Valid Directory or No pdf found")
def ppygems():
messagebox.showinfo("Pdf Merger ","Merge Complete")
label_1=Label(pdf_m,text="PyGems Pdf Merger",
bd=0,
bg="#393e46",
fg='#F4511E',
font='Times 20',
width=0,
height=0
)
label_1.pack(pady=20)
label_x=Label(pddframe,text="Warning : Don't close the application if showing Not Responding, just minimize the application")
label_x.pack()
#-------------radio Button ----------------
pselct_typ1=tk.IntVar()
pselct_typ1.set(2)
pradiobtn1 = ttk.Radiobutton(prframe,text="Add A Blank Page For Odd Number Of Pages",value=1,variable=pselct_typ1)
pradiobtn2 = ttk.Radiobutton(prframe,text="Default" ,value=2,variable=pselct_typ1)
pradiobtn1.pack(side=LEFT,padx=20)
pradiobtn2.pack(side=LEFT,padx=20)
#------------- directory entry------------
pentry_d_var = StringVar()
pentry_d=Entry(pdframe,width=80,textvariable=pentry_d_var,bg='#dedede')
pentry_d_txt = pentry_d_var.get()
#------------directory Button----------
pbutton_d=tk.Button(pdframe,relief=RAISED,font=('Times 10 bold'),text='Select Folder' ,fg='#fcf9ec',bg='#132238',command=pdaction)
#----------merge button-------------------
pbtn_txt=StringVar()
pbutton_m=tk.Button(pmframe,textvariable=pbtn_txt,command=pmfunc,relief=GROOVE,font=('Times 10 bold'),width=22,fg='#fcf9ec',bg='#132238')
pbtn_txt.set("Merge")
pentry_d.pack(ipady=4,side=LEFT,pady=13)
pentry_d.focus()
pbutton_d.pack(side=LEFT,padx=10,ipady=2,pady=13)
pbutton_m.pack(pady=20)
#frame pack
prframe.pack(pady=5)
#directory entry pack
pdframe.pack(padx=0)
#merge button pack
pmframe.pack(pady=0)
pddframe.pack(pady=0)
pdf_mframe.config(bg="#D9D9D9")
prframe.config(bg="#D9D9D9")
pmframe.config(bg="#D9D9D9")
#####################################################################
################### Pdf Split #########################################
#####################################################################
#####################################################################
psdframe=Frame(pdf_s)
psmframe=Frame(pdf_s)
pseframe=Frame(pdf_s)
psrframe=Frame(pdf_s)
pddsrframe=Frame(pdf_s)
#----- function ------
#---------Button for select directory-------------
def psdaction():
psentry_d.delete(0, 'end')
psdaction.file_selected = filedialog.askopenfilename(initialdir="/",title='Please file',filetypes=(("Pdf file","*.pdf"),("all files","*.")))
ass=psdaction.file_selected.split("/")
chd="/".join(ass[:-1])
os.chdir(chd)
try:
if not psdaction.file_selected:
psdaction.file_selected=sentry_d_var.get()
else:
psentry_d.insert(0,psdaction.file_selected)
except:
messagebox.showerror("Error", "Empty or wrong Directory")
def sempt():
psbutton_m.configure(state=DISABLED)
def errint():
messagebox.showerror("Error", "Read-only or Damaged file !\n Visit : pygems.com For Tutorial")
def cerrint():
messagebox.showinfo("Pdf Spliter", "Split Done !")
def psmfunc():
def splt():
try:
psdir=psentry_d_var.get()
# od=psdir.split("/")
# chd="/".join(od[:-1])
# os.chdir(chd)
inputpdf = PdfFileReader(open(psdaction.file_selected, "rb"))
psbtn_txt.set("Spliting...")
psbutton_m.configure(state=DISABLED)
for i in range(inputpdf.numPages):
output = PdfFileWriter()
output.addPage(inputpdf.getPage(i))
with open("Splited-Document-page-%s.pdf" % i, "wb") as outputStream:
output.write(outputStream)
cerrint()
psbtn_txt.set("Split")
psbutton_m.configure(state=NORMAL)
#webbrowser.open(chd)
except:
errint()
def prxtaddt():
try:
inputpdf = PdfFileReader(open(psdaction.file_selected, "rb"))
prxtaddt.nwin = Toplevel()
wi_gui=350
hi_gui=160
wi_scr=prxtaddt.nwin.winfo_screenwidth()
hi_scr=prxtaddt.nwin.winfo_screenheight()
x=(wi_scr/2)-(wi_gui/2)
y=(hi_scr/2)-(hi_gui/2)
prxtaddt.nwin.geometry('%dx%d+%d+%d'%(wi_gui,hi_gui,x,y))
prxtaddt.nwin.title("Pdf Spliter")
prl=Label(prxtaddt.nwin,text="Enter Sequential Page Number Separated By Comma E.g : 2,3,4 ")
prl.pack(pady=10)
prxtaddt.pred=StringVar()
preadd=Entry(prxtaddt.nwin,width=40,textvariable=prxtaddt.pred)
prxtaddt.pred_txt=prxtaddt.pred.get()
preadd.pack(padx=15,pady=10)
preadd.focus()
prb=Button(prxtaddt.nwin,text="OK",command=prokb)
prb.pack(pady=10)
except:
errint()
def prokb():
prc=prxtaddt.pred.get()
#print("print from :",c)
spltn()
prxtaddt.nwin.destroy()
def spltn():
try:
psdir=psentry_d_var.get()
# od=psdir.split("/")
# chd="/".join(od[:-1])
# os.chdir(chd)
inputpdf = PdfFileReader(open(psdaction.file_selected, "rb"),strict=False)
psbtn_txt.set("Spliting...")
psbutton_m.configure(state=DISABLED)
s=prxtaddt.pred.get()
pnum=s.split(",")
merger = PdfFileMerger()
outPdf= PdfFileWriter()
rpdf = inputpdf
name=name_split()
for i in pnum:
for j in range(inputpdf.numPages):
if j==int(i)-1:
outPdf.addPage(rpdf.getPage(j))
#outPdf.addBlankPage()
outStream=open(str(name),'wb')
outPdf.write(outStream)
outStream.close()
psbtn_txt.set("Split")
psbutton_m.configure(state=NORMAL)
cerrint()
#webbrowser.open(chd)
except:
psbtn_txt.set("Split")
psbutton_m.configure(state=NORMAL)
errint()
a=psselct_typ1.get()
if a==1:
splt()
elif a==2:
prxtaddt()
else:
pass
pslabel_1=Label(pdf_s,text="PyGems Pdf Spliter",
bd=0,
bg="#393e46",
fg='#F4511E',
font='Times 20',
width=0,
height=0
)
pslabel_1.pack(pady=20)
label_x=Label(pddsrframe,text="Warning : Don't close the application if showing Not Responding, just minimize the application")
label_x.pack()
#------------- directory entry------------
psentry_d_var = StringVar()
psentry_d=Entry(psdframe,width=80,textvariable=psentry_d_var,bg='#dedede')
psentry_d_txt = psentry_d_var.get()
#------------- Radio Button ---------
psselct_typ1=tk.IntVar()
psselct_typ1.set(2)
psradiobtn1 = ttk.Radiobutton(psrframe,text="Spit All Pages",value=1,variable=psselct_typ1)
psradiobtn2 = ttk.Radiobutton(psrframe,text="Split By Page Numbers" ,value=2,variable=psselct_typ1)
psradiobtn1.pack(side=LEFT,padx=20)
psradiobtn2.pack(side=LEFT,padx=20)
#------------directory Button----------
psbutton_d=tk.Button(psdframe,relief=RAISED,font=('Times 10 bold'),text='Select File' ,fg='#fcf9ec',bg='#132238',command=psdaction)
#----------merge button-------------------
psbtn_txt=StringVar()
psbutton_m=tk.Button(psmframe,textvariable=psbtn_txt,command=psmfunc,relief=GROOVE,font=('Times 10 bold'),width=22,fg='#fcf9ec',bg='#132238')
psbtn_txt.set("Split")
psentry_d.pack(ipady=4,side=LEFT,pady=13)
psentry_d.focus()
psbutton_d.pack(side=LEFT,padx=10,ipady=2,pady=13)
psbutton_m.pack(pady=20)
#frame pack
psrframe.pack(pady=10)
#directory entry pack
psdframe.pack(padx=0)
#sentry pack
pseframe.pack(pady=0)
#merge button pack
psmframe.pack(pady=0)
pddsrframe.pack(pady=0)
excel_sframe.config(bg="#D9D9D9")
psmframe.config(bg="#D9D9D9")
psdframe.config(bg="#ffffff")
pseframe.config(bg="#D9D9D9")
psrframe.config(bg="#D9D9D9")
#############################################################
#############################################################
##################### Renamer ############################
############################################################
############################################################
rrrframe=Frame(rename)
rrdframe=Frame(rename)
rrmframe=Frame(rename)
rrcframe=Frame(rename)
def rrdaction():
rrentry_d.delete(0, 'end')
rrfolder_selected = filedialog.askdirectory(initialdir="/",title='Please select a directory')
if not rrfolder_selected:
rrfolder_selected=rrentry_d_var.get()
else:
rrentry_d.insert(0,rrfolder_selected)
try:
rrpp=os.chdir(str(rrfolder_selected))
except:
messagebox.showerror("Error", "Empty or wrong Directory")
c=''
def reerror():
messagebox.showerror("Error","Checked All Or Mention File type \n Empty or wrong Directory")
def chkfiletyp():
gftype=pcnumentry_d_var.get()
if gftype=="":
reerror()
def repygems():
messagebox.showinfo("Batch Rename","Batch Rename Complete")
def rrmfunc():
try:
dd=rrentry_d_var.get()
os.chdir(dd)
rrbtn_txt.set("Renaiming...")
rrbutton_m.configure(state=DISABLED)
cboxvalue=cvar.get()
ffftype=pcnumentry_d_var.get()
bfiles = list(filter(lambda x: os.path.isfile(x), os.listdir()))
tfiles=[]
for file in glob.glob("*."+ffftype):
tfiles.append(file)
if cboxvalue==1 and ffftype=="":
files=bfiles
elif cboxvalue !=1 and ffftype:
files=tfiles
else:
pass
def SNrename():
try:
i=1
for file in files:
name,e = file.rsplit('.',1)
os.rename(file,str(i)+'.'+e)
i=i+1
repygems()
except:
reerror()
def RNrename():
try:
for file in files:
name,e = file.rsplit('.',1)
RNUM =randint(1000,10000)
os.rename(file,str(RNUM)+'.'+e)
repygems()
#print("*** RENAME COMPLETED ***")
except:
reerror()
def RSrename():
try:
letters = string.ascii_uppercase
for file in files:
name,e = file.rsplit('.',1)
s=''.join(random.choice(letters) for i in range(5))
os.rename(file,s+'.'+e)
repygems()
#print("*** RENAME COMPLETED ***")
except:
reerror()
def ADDrename():
#c=input("what do u want to Add :")
try:
d=xtaddt.ed.get()
#print("value of d :",d)
for file in files:
name,e = file.rsplit('.',1)
os.rename(file,name+d+'.'+e)
repygems()
except:
reerror()
#repygems()
#print("*** RENAME COMPLETED ***")
def RErename():
#x=input('insert what u want to replace :')
#y=input('insert BY what u want to replace :')
try:
x=xtaddtre.edr.get()
y=xtaddtre.ed2.get()
for file in files:
name,e = file.rsplit('.',1)
rep=name.replace(x,y,)
os.rename(file,rep+'.'+e)
repygems()
except:
reerror()
a=rrselct_typ1.get()
def xtaddt():
xtaddt.nwin = Toplevel()
wi_gui=250
hi_gui=150
wi_scr=xtaddt.nwin.winfo_screenwidth()
hi_scr=xtaddt.nwin.winfo_screenheight()
x=(wi_scr/2)-(wi_gui/2)
y=(hi_scr/2)-(hi_gui/2)
xtaddt.nwin.geometry('%dx%d+%d+%d'%(wi_gui,hi_gui,x,y))
xtaddt.nwin.title("Renamer")
l=Label(xtaddt.nwin,text="Enter what you want to add : ")
l.pack(pady=10)
xtaddt.ed=StringVar()
eadd=Entry(xtaddt.nwin,width=40,textvariable=xtaddt.ed)
xtaddt.ed_txt=xtaddt.ed.get()
eadd.pack(padx=30,pady=10)
eadd.focus()
b=Button(xtaddt.nwin,text="OK",command=okb)
b.pack(pady=10)
def xtaddtre():
xtaddtre.nwin = Toplevel()
wi_gui=250
hi_gui=220
wi_scr=xtaddtre.nwin.winfo_screenwidth()
hi_scr=xtaddtre.nwin.winfo_screenheight()
x=(wi_scr/2)-(wi_gui/2)
y=(hi_scr/2)-(hi_gui/2)
xtaddtre.nwin.geometry('%dx%d+%d+%d'%(wi_gui,hi_gui,x,y))
xtaddtre.nwin.title("Renamer")
l=Label(xtaddtre.nwin,text="Find What")
l.pack(pady=10)
xtaddtre.edr=StringVar()
eadd=Entry(xtaddtre.nwin,width=40,textvariable=xtaddtre.edr)
xtaddtre.ed_txt=xtaddtre.edr.get()
eadd.pack(padx=35,pady=10)
l2=Label(xtaddtre.nwin,text="Replace With")
l2.pack(pady=10)
xtaddtre.ed2=StringVar()
eadd2=Entry(xtaddtre.nwin,width=40,textvariable=xtaddtre.ed2)
xtaddtre.ed_txt=xtaddtre.ed2.get()
eadd2.pack(padx=35,pady=10)
eadd.focus()
b2=Button(xtaddtre.nwin,text="OK",command=okbre)
b2.pack(pady=10)
def okb():
c=xtaddt.ed.get()
#print("print from :",c)
ADDrename()
xtaddt.nwin.destroy()
def okbre():
r=xtaddtre.edr.get()
r2=xtaddtre.ed2.get()
#print("print from :",c)
RErename()
xtaddtre.nwin.destroy()
rrbutton_m.configure(state=NORMAL)
rrbtn_txt.set("Rename")
#ftyp = input("Please insert the file extension u want to rename or press enter to rename all type files : ")
if a==3 :
SNrename()
elif a==4:
RNrename()
elif a==5:
RSrename()
elif a==2:
xtaddt()
elif a==1:
xtaddtre()
else:
reerror()
except:
reerror()
label_1=Label(rename,text="PyGems Batch File Renamer",
bd=0,
bg="#393e46",
fg='#F4511E',
font='Times 20',
width=0,
height=0
)
label_1.pack(pady=20)
#-----------Radio Button All-----------
rrselct_typ1=tk.IntVar()
rrselct_typ1.set(2)
radiobtn1 = ttk.Radiobutton(rrrframe,text="Replace" ,value=1,variable=rrselct_typ1)
radiobtn2 = ttk.Radiobutton(rrrframe,text="Add" ,value=2,variable=rrselct_typ1)
radiobtn3 = ttk.Radiobutton(rrrframe,text="Sequential Num" ,value=3,variable=rrselct_typ1)
radiobtn4 = ttk.Radiobutton(rrrframe,text="Random Num" ,value=4,variable=rrselct_typ1)
radiobtn5 = ttk.Radiobutton(rrrframe,text="Random Text" ,value=5,variable=rrselct_typ1)
#------- Check box --------------------------
cvar=tk.IntVar()
c1=Checkbutton(rrcframe,text="All",variable=cvar)
c1.pack(side=LEFT,padx=20)
#----------------entry box-----------
pc_label=Label(rrcframe,text="File Type E.g: pdf ")
pc_label.pack(pady=5,side=LEFT)
#----entry box--------
pcnumentry_d_var=StringVar()
pcsentry=tk.Entry(rrcframe,textvariable=pcnumentry_d_var)
pcsentry.pack(pady=5,side=LEFT)
#------------- directory entry------------
rrentry_d_var = StringVar()
rrentry_d=Entry(rrdframe,width=80,textvariable=rrentry_d_var,bg='#dedede')
rrentry_d_txt = rrentry_d_var.get()
#------------directory Button----------
rrbutton_d=tk.Button(rrdframe,relief=RAISED,font=('Times 10 bold'),text='Select Folder' ,fg='#fcf9ec',bg='#132238',command=rrdaction)
#----------merge button-------------------
rrbtn_txt=StringVar()
rrbutton_m=tk.Button(rrmframe,textvariable=rrbtn_txt,command=rrmfunc,relief=GROOVE,font=('Times 10 bold'),width=22,fg='#fcf9ec',bg='#132238')
rrbtn_txt.set("Rename")
#radio pack
radiobtn1.pack(side=LEFT,padx=20)
radiobtn2.pack(side=LEFT,padx=20)
radiobtn3.pack(side=LEFT,padx=20)
radiobtn4.pack(side=LEFT,padx=20)
radiobtn5.pack(side=LEFT,padx=20)
rrentry_d.pack(ipady=4,side=LEFT,pady=13)
rrentry_d.focus()
rrbutton_d.pack(side=LEFT,padx=10,ipady=2,pady=13)
rrbutton_m.pack(pady=20)
#frame pack
#radio button pack
rrrframe.pack(pady=15)
rrcframe.pack(pady=15)
#directory entry pack
rrdframe.pack(padx=0)
#merge button pack
rrmframe.pack(pady=0)
rrcframe.config(bg="#D9D9D9")
file_rename.config(bg="#D9D9D9")
rrrframe.config(bg="#D9D9D9")
rrmframe.config(bg="#D9D9D9")
tab_control.pack(expand=1, fill='both')
#main window size
wi_gui=730
hi_gui=400
wi_scr=win.winfo_screenwidth()
hi_scr=win.winfo_screenheight()
x=(wi_scr/2)-(wi_gui/2)
y=(hi_scr/2)-(hi_gui/2)
win.geometry('%dx%d+%d+%d'%(wi_gui,hi_gui,x,y))
#win.iconbitmap(r'C:\Users\Aristo\Desktop\Niloy\Excel_Merger-master\Excel_Merger-master\images\xlsx.ico')
win.resizable(False, False)
win.mainloop() | 2.5 | 2 |
modules/PackerController.py | daveherrald/attack_range | 1 | 12769937 |
from packerpy import PackerExecutable
from jinja2 import Environment, FileSystemLoader
from modules import aws_service
import sys
class PackerController():
def __init__(self, config, log):
self.config = config
self.log = log
self.p = PackerExecutable("/usr/local/bin/packer")
self.packer_amis = []
self.packer_amis.append('splunk-server')
if self.config['phantom_server']=='1':
self.packer_amis.append('phantom-server')
if self.config['kali_machine']=='1':
self.packer_amis.append('kali_machine')
if self.config['windows_domain_controller']=='1':
self.read_and_write_userdata_file()
self.packer_amis.append('windows-domain-controller')
if self.config['windows_server']=='1':
self.read_and_write_userdata_file()
self.packer_amis.append('windows-server')
if self.config['windows_client']=='1':
self.read_and_write_userdata_file()
self.packer_amis.append('windows-client')
def build_amis(self):
self.log.info("[action] > build AMIs\n")
for packer_ami in self.packer_amis:
self.log.info("Generate new Packer AMI packer-" + packer_ami + "-" + self.config['key_name'] + ". This can take some time.")
template = 'packer/' + packer_ami +'/packer.json'
template_vars = self.config
template_vars['splunk_indexer_ip'] = self.config['splunk_server_private_ip']
(ret, out, err) = self.p.build(template,var=template_vars)
if ret:
self.log.error("ERROR: " + str(out))
sys.exit(1)
self.log.info("successfully generated Packer AMI packer-" + packer_ami + "-" + self.config['key_name'])
def read_and_write_userdata_file(self):
j2_env = Environment(loader=FileSystemLoader('packer/script'),trim_blocks=True)
template = j2_env.get_template('userdata.ps1.j2')
userdata_file = template.render(self.config)
with open('packer/script/userdata.ps1', 'w') as file:
file.write(userdata_file)
def destroy_amis(self):
aws_service.deregister_images(self.packer_amis, self.config, self.log)
| 2.21875 | 2 |
html2sql/html2sql.py | quangpq/discrete_inference | 1 | 12769938 | <reponame>quangpq/discrete_inference<filename>html2sql/html2sql.py
from bs4 import BeautifulSoup
def openHTMLfile(name):
path = "/Users/quangphan/Documents/Projects/discrete_thesis/data/" + name + ".html"
with open(path, "r") as f:
soup = BeautifulSoup(f, features="html.parser")
tags = soup.findAll('div')
return converHTML2SQL(tags)
def converHTML2SQL(divTag):
results = []
for tag in divTag:
if 'id' in tag.attrs:
k = tag.attrs['id']
content = str(tag)
content = content.replace("\\", "\\\\").replace("'", """)
statement = f'UPDATE `knowledges` SET `content`=\'{content}\' WHERE `keyphrase` = \'{k}\';\n'
results.append(statement)
return results
if __name__ == "__main__":
filenames = ["Set", "Logic", "Boolean", "Relation"]
statements = []
for filename in filenames:
statements.extend(openHTMLfile(filename))
with open("/Users/quangphan/Documents/Projects/discrete_thesis/sql/html_content.txt", "w") as the_file:
the_file.writelines(statements)
| 2.6875 | 3 |
bin/twitch_mod_input.py | meznak/twitch-app-for-splunk | 0 | 12769939 | import sys
import requests
import logging
from splunklib.modularinput import *
class MI(Script):
def get_scheme(self):
scheme = Scheme("Twitch Input")
scheme.description = 'Collect Twitch channel stats'
scheme.use_external_validation = True
scheme.use_single_instance = True
username_arg = Argument('username')
username_arg.data_type = Argument.data_type_string
username_arg.description = 'Your Twitch username'
scheme.add_argument(username_arg)
# TODO: Add password
return scheme
def validate_input(self, validation_definition):
logging.info("Validating input")
username = validation_definition.parameters['username']
logging.debug(f'username: {username}')
if len(username) < 1:
raise ValueError('Username is required')
# TODO: Validate username
# TODO: Validate credentials
def stream_events(self, inputs, ew):
for input_name, input_item in inputs.inputs.iteritems():
username = str(input_item['username'])
do_work(input_name, ew, username)
def do_work(input_name, ew, username):
EventWriter.log(ew, EventWriter.INFO,
f'Started Twitch queries for {username}')
# TODO: Make queries
# TODO: Write events
if __name__ == '__main__':
MI().run(sys.argv)
| 2.515625 | 3 |
M5/Black Box vs White Box.py | DouglasCarvalhoPereira/Interact-OS-PYTHON | 0 | 12769940 | #CONCEITO DE TESTE DE CAIXA BRANCA OU TESTE DE CAIXA PRETA
# > Test White Block - Testes baseados em informações do software. Sãos testes unitários feitos com
#base em conhecimentos prévios do software. Geralmente testes de CAIXA BRANCA são feitos após o código
#estar escrito. Sendo ele feito com partes diferentes do código.
# > Test Black Block - São testes sem conhecimentos internos do software. Não é necessário saber como o
#código é escrito para esxecutar os tetse. São Block Black testes quando escritos antes mesmo do software ser escrito.
#um exemplo seria digitar um link na barra de endereço e ver se aigintiu o objetivo, não é preciso nenhum conhecimento
#de como funciou o software, o teste é opaco.
# >>>>>>>>> OUTROS TIPOS DE TESTES <<<<<<<<<<<<<<<<<<<<<
#Testes de intergação
"""
- Verifica se as integrações com diferentes partes do projeto estão funcionoando como deveriam. Por exemplo, um teste de
banco de dados que recebe e sincroniza os dados do usuário. Um grupo de testes executaria vários pequenos testes de maneira que as
intergações funcionasse. Talvez seja preciso criar um ambiente de testes. Garante que todas as peças estejam conectadas e funcionando antes
de serem postas no ambiente de produção.
"""
#Testes de Regressão
"""
- Verfica se um problema foi corrigido depois de identificado. Eles faz testes frequentes para verificar se os mesmos erros
não estão ocorrendo novamente ou seja, garante que os mesmos erros não aconteçam de novo. Se um mesmo erro ja solucionado for introduzido novamente
fará com que o teste de regressão FALHE.
"""
#Testes de fumaça - Verificação de construção
"""
- As vezes chamado de teste de construção, utilizado para encontrar grandes BUGS no sistema. Onde a fumaça a fogo, ou seja um programa não irá executar
se houver fumaça. Esse tipo de teste responde perguntas básicas como "O programa executa?".
Teste esse feito antes mesmo de testes mais refinados, uma vez que o teste de fumaça falhar ele não passará em nenhum outro.
"""
#Testes de Carga
"""
- São testes para verificiar se o software funciona com grande grande carga de requisões ou de dados, ou seja um teste de demanda. Quantos usuários
fazendo requisões simultaneas a aplicação suporta.
"""
#Conjunto de testes
"""
Um conjunto de testes mais robusto tem apossibilidade de criar um software profissional. Minimizando os erros e danos tanto para a empresa quanto para os usuários.
Esse são os testes mais comuns, mas ainda existem muitos outros testes a serem estudados.
"""
"""
Desenvolvimento Orientado por Testes
Vamos tratar dos TDD
>>>>>>>>>> TEST DRIVEN DEVELOPMENT <<<<<<<<<
"""
| 2.6875 | 3 |
yalefaces/rename.py | AseemPandita/TripletLoss | 1 | 12769941 | <filename>yalefaces/rename.py
import os
files = [f for f in os.listdir('.') if os.path.isfile(f)]
for f in files:
if 'subject' in f and '.bmp' not in f:
os.rename(f, f+'.bmp') | 2.5 | 2 |
egs/zeroth/s5/data/local/lm/buildLM/_scripts_/sumStatUniqWords.py | SYHPARK/kaldi | 330 | 12769942 | <filename>egs/zeroth/s5/data/local/lm/buildLM/_scripts_/sumStatUniqWords.py
#!/usr/bin/env python3
# summation all the stat from uniqWord.JOB
# build uniq word dictionary with count
#
# Copyright 2017 Atlas Guide (Author : <NAME>)
#
# Apache 2.0
#
import fileinput
import json
import sys
def main():
nLine = 1
word_dict = dict()
#f_out = open('json_words' ,'w')
word_count = 0
for line in fileinput.input():
# empty line
if not line.strip():
continue
if nLine % 1000 == 0:
print(" %d line processed"%nLine, end='\r', file=sys.stderr)
nLine += 1
tstrList = line.split()
if len(tstrList) < 2:
continue
wordList = tstrList[1:]
# from refining, incomming field could be multiple
for curr_word in wordList:
curr_count = int(tstrList[0])
if curr_word not in word_dict:
if len(wordList) == 1:
word_dict[curr_word] = curr_count
else:
word_dict[curr_word] = 1
word_count += 1
else:
word_dict[curr_word] += curr_count
print(" REPORT: {} uniq. words are founded".format(word_count), file=sys.stderr)
print(" now sorting", file=sys.stderr)
sortedResult=sorted(word_dict.items(), key=lambda x:x[1], reverse=True)
#resultDict = {a[0]: a[1] for a in sortedResult}
#json_dump = json.dumps(resultDict, f_out, indent=4, ensure_ascii=False)
for item in sortedResult:
print(item[0], item[1])
if __name__ == '__main__':
main()
| 2.796875 | 3 |
csb/io/plots.py | ujjwalsh/CSB | 10 | 12769943 | """
Plotting facilities, based on Python's MPL library.
The L{Chart} object is a facade which provides intuitive access to MPL's plotting
objects. The following examples show a typical use of L{Chart}:
1. Instantiation
>>> chart = Chart() # a chart with a single plot
>>> chart = Chart(rows=2, columns=2) # a chart with 2x2=4 plots
2. Accessing plots (equivalent to MPL's subplots)
>>> chart.plots[0] # first plot (at row=0, column=0)
Plot (matplotlib.axes.AxesSubplot)
>>> chart.plots[0, 1]
Plot (matplotlib.axes.AxesSubplot) # plot at row=0, column=1
3. Plotting
>>> chart.plots[0].hist(...)
>>> chart.plots[0].set_title('T')
>>> chart.plots[1].scatter(...)
>>> chart.plots[1].set_xlabel('X')
4. Using the GUI
>>> chart.show()
>>> chart.hide()
5. Saving as image
>>> chart.save(filename, format=chart.formats.PDF)
If using the GUI, do not forget to dispose the chart at the end:
>>> chart.dispose()
or simply use the chart in a context manager:
>>> with Chart() as chart:
chart...
"""
import time
import csb.core
from abc import ABCMeta, abstractmethod
from threading import Thread, Event
from matplotlib.figure import Figure
from matplotlib.backends.backend_agg import FigureCanvasAgg
class Backend(Thread):
"""
Abstract class defining the behavior of all Chart GUI backends.
Each backend is a 'daemon' that runs in a new thread and handles
all GUI requests from any L{Chart} instance. A backend service must
behave as a singleton - only one service of a given kind may exist at a
given point in time. L{Chart} clients will request GUI operations
on specific figures, the Backend therefore must keep track of all
windows opened, as well as the figure-to-window mapping.
"""
__metaclass__ = ABCMeta
_instances = {}
@staticmethod
def get(backend, started=True):
"""
Backend factory, ensures one instance per subclass.
@param backend: one of the L{Backend} subclasses
@type backend: type
@param started: if True, ensure that the service is running
@type started: bool
@return: an instance of the backend. The returned service
instance may need to be started.
@rtype: L{Backend}
"""
if not issubclass(backend, Backend):
raise TypeError(backend)
if backend in Backend._instances:
instance = Backend._instances[backend]
else:
instance = backend()
if started and not instance.started:
instance.start()
return instance
@staticmethod
def query(backend):
"""
@param backend: one of the L{Backend} subclasses
@type backend: type
@return: True if a service of type C{backend} is running
@rtype: bool
"""
if not issubclass(backend, Backend):
raise TypeError(backend)
if backend in Backend._instances:
instance = Backend._instances[backend]
return instance.started
else:
return False
def __init__(self):
name = self.__class__
if name in Backend._instances:
raise RuntimeError('Backend {0} has already been initialized'.format(name))
else:
Backend._instances[name] = self
super(Backend, self).__init__()
self._figures = {}
self._started = Event()
self._running = Event()
self.setDaemon(True)
@property
def started(self):
"""
True if the service had been started
@rtype: bool
"""
return self._started.isSet()
@property
def running(self):
"""
True if the service had been started and is currently running
@rtype: bool
"""
return self._running.isSet()
@abstractmethod
def _initapp(self):
"""
Create an instance of the GUI application.
"""
pass
@abstractmethod
def _mainloop(self):
"""
Enter the GUI main loop.
"""
pass
@abstractmethod
def _exit(self):
"""
Delete all frames, exit the GUI main loop and perform any cleanup
needed in order to unblock the thread that started the main loop.
"""
pass
@abstractmethod
def _add(self, figure):
"""
Handle an 'Add new figure' event
"""
pass
@abstractmethod
def _show(self, figure):
"""
Handle a 'Show existing figure' event
"""
pass
@abstractmethod
def _resize(self, figure):
"""
Handle a 'Resize existing figure' event
"""
pass
@abstractmethod
def _hide(self, figure):
"""
Handle a 'Hide existing figure' event
"""
pass
@abstractmethod
def _destroy(self, figure):
"""
Handle a 'Delete existing figure' event
"""
pass
@abstractmethod
def _invoke(self, callable, *args):
"""
Pass a GUI message: invoke C{callable} in a thread-safe way
"""
pass
def invoke(self, callable, *args):
"""
Invoke an asynchronous GUI operation (in a thread-safe way)
"""
if not self._running.isSet():
raise RuntimeError('The backend service is not running')
else:
self._invoke(callable, *args)
def add(self, figure):
"""
Add a new figure.
"""
self.invoke(self._add, figure)
def show(self, figure):
"""
Show existing figure.
"""
self.invoke(self._show, figure)
def resize(self, figure):
"""
Resize existing figure.
"""
self.invoke(self._resize, figure)
def hide(self, figure):
"""
Hide existing figure.
"""
self.invoke(self._hide, figure)
def destroy(self, figure, wait=False):
"""
Destroy existing figure. If C{wait} is True, make sure the asynchronous
figure deletion is complete before returning from the method.
"""
has_figure = (figure in self._figures)
self.invoke(self._destroy, figure)
if has_figure and wait:
while figure in self._figures:
pass
def start(self):
"""
Start the Backend service. This method can be called only once.
"""
try:
super(Backend, self).start()
while not self._running.isSet():
time.sleep(0.05)
except BaseException:
raise RuntimeError("Failed to start the backend service")
def run(self):
"""
Main service method, automatically called by C{start}.
"""
self._started.set()
self._initapp()
self._running.set()
self._mainloop()
self._running.clear()
self._started.clear()
def stop(self):
"""
Stop the Backend service. The Backend object can be safely
disposed afterwards.
"""
self._exit()
self._figures = {}
self.join()
self._running.clear()
self._started.clear()
del Backend._instances[self.__class__]
def client_disposed(self, client):
"""
Fired when a client is being deleted. Will stop the service if no
active clients are remaining.
"""
if self._figures is None or len(self._figures) == 0:
self.stop()
def __del__(self):
if self._started.isSet():
self.stop()
class WxBackendImpl(Backend):
"""
WxPython L{Backend} implementor.
@note: not meant to be instantiated directly, use L{Backend.get} instead.
"""
_wxapp = None
def __init__(self):
import wx
from matplotlib.backends.backend_wx import FigureFrameWx
self._wx = wx
self._FigureFrameWx = FigureFrameWx
super(WxBackendImpl, self).__init__()
@property
def _app(self):
if WxBackendImpl._wxapp is None:
WxBackendImpl._wxapp = self._wx.PySimpleApp()
return WxBackendImpl._wxapp
def _initapp(self):
dummy = self._app
frame = self._wx.Frame(None)
frame.Show()
frame.Hide()
def _mainloop(self):
self._app.MainLoop()
def _add(self, figure):
wx = self._wx
FigureFrameWx = self._FigureFrameWx
if figure not in self._figures:
frame = FigureFrameWx(figure._figure_number, figure)
frame.Show()
frame.Bind(wx.EVT_ACTIVATE, lambda e: e.GetEventObject().Layout())
frame.Bind(wx.EVT_CLOSE, lambda e: self.invoke(self._hide, figure))
self._figures[figure] = frame
def _show(self, figure):
if figure not in self._figures:
self._add(figure)
self._figures[figure].Show()
def _resize(self, figure):
if figure in self._figures:
frame = self._figures[figure]
w = figure.get_figwidth() * figure.get_dpi()
h = figure.get_figheight() * figure.get_dpi()
size = self._wx.Size(w, h)
frame.canvas.SetInitialSize(size)
frame.GetSizer().Fit(frame)
def _hide(self, figure):
if figure in self._figures:
self._figures[figure].Hide()
def _destroy(self, figure):
if figure in self._figures:
frame = self._figures[figure]
if not frame.IsBeingDeleted():
frame.Destroy()
del self._figures[figure]
def _invoke(self, callable, *args):
wx = self._wx
wx.CallAfter(callable, *args)
def _exit(self):
for frame in self._figures.values():
if not frame.IsBeingDeleted():
frame.Destroy()
self._app.Exit()
class Backends(object):
"""
Enumeration of chart backends.
"""
WX_WIDGETS = WxBackendImpl
class PlotsCollection(object):
"""
A list-like collection of all plots in the chart (0-based).
"""
def __init__(self, figure, rows=1, columns=1):
assert rows >= 1 and columns >= 1
self._plots = []
self._figure = figure
self._rows = int(rows)
self._columns = int(columns)
for dummy in range(self._rows * self._columns):
self._plots.append(None)
@property
def _active_plots(self):
return [p for p in self._plots if p is not None]
def _add(self, index=1):
assert 0 <= index < len(self._plots)
plot = self._figure.add_subplot(self._rows, self._columns, index + 1)
self._plots[index] = plot
return plot
def __getitem__(self, location):
if isinstance(location, tuple):
row, col = location
i = row * self._columns + col
else:
i = int(location)
if not (0 <= i < len(self._plots)):
raise IndexError("No such plot: {0}".format(location))
if self._plots[i] is None:
return self._add(i)
else:
return self._plots[i]
def __len__(self):
return len(self._active_plots)
def __iter__(self):
return iter(self._active_plots)
class Chart(object):
"""
Simple and clean facade to Matplotlib's plotting API.
A chart instance abstracts a plotting device, on which one or
multiple related plots can be drawn. Charts can be exported as images, or
visualized interactively. Each chart instance will always open in its own
GUI window, and this window will never block the execution of the rest of
the program, or interfere with other L{Chart}s.
The GUI can be safely opened in the background and closed infinite number
of times, as long as the client program is still running.
By default, a chart contains a single plot:
>>> chart.plot
matplotlib.axes.AxesSubplot
>>> chart.plot.hist(...)
If C{rows} and C{columns} are defined, the chart will contain
C{rows} x C{columns} number of plots (equivalent to MPL's sub-plots).
Each plot can be assessed by its index:
>>> chart.plots[0]
first plot
or by its position in the grid:
>>> chart.plots[0, 1]
plot at row=0, column=1
@param number: chart number; by default this a L{Chart.AUTONUMBER}
@type number: int or None
@param title: chart master title
@type title: str
@param rows: number of rows in the chart window
@type rows: int
@param columns: number of columns in the chart window
@type columns: int
@note: additional arguments are passed directly to Matplotlib's Figure
constructor.
"""
AUTONUMBER = None
_serial = 0
def __init__(self, number=None, title='', rows=1, columns=1, backend=Backends.WX_WIDGETS, *fa, **fk):
if number == Chart.AUTONUMBER:
Chart._serial += 1
number = Chart._serial
if rows < 1:
rows = 1
if columns < 1:
columns = 1
self._rows = int(rows)
self._columns = int(columns)
self._number = int(number)
self._title = str(title)
self._figure = Figure(*fa, **fk)
self._figure._figure_number = self._number
self._figure.suptitle(self._title)
self._beclass = backend
self._hasgui = False
self._plots = PlotsCollection(self._figure, self._rows, self._columns)
self._canvas = FigureCanvasAgg(self._figure)
formats = [ (f.upper(), f) for f in self._canvas.get_supported_filetypes() ]
self._formats = csb.core.Enum.create('OutputFormats', **dict(formats))
def __getitem__(self, i):
if i in self._plots:
return self._plots[i]
else:
raise KeyError('No such plot number: {0}'.format(i))
def __enter__(self):
return self
def __exit__(self, *a, **k):
self.dispose()
@property
def _backend(self):
return Backend.get(self._beclass, started=True)
@property
def _backend_started(self):
return Backend.query(self._beclass)
@property
def title(self):
"""
Chart title
@rtype: str
"""
return self._title
@property
def number(self):
"""
Chart number
@rtype: int
"""
return self._number
@property
def plots(self):
"""
Index-based access to the plots in this chart
@rtype: L{PlotsCollection}
"""
return self._plots
@property
def plot(self):
"""
First plot
@rtype: matplotlib.AxesSubplot
"""
return self._plots[0]
@property
def rows(self):
"""
Number of rows in this chart
@rtype: int
"""
return self._rows
@property
def columns(self):
"""
Number of columns in this chart
@rtype: int
"""
return self._columns
@property
def width(self):
"""
Chart's width in inches
@rtype: int
"""
return self._figure.get_figwidth()
@width.setter
def width(self, inches):
self._figure.set_figwidth(inches)
if self._backend_started:
self._backend.resize(self._figure)
@property
def height(self):
"""
Chart's height in inches
@rtype: int
"""
return self._figure.get_figheight()
@height.setter
def height(self, inches):
self._figure.set_figheight(inches)
if self._backend_started:
self._backend.resize(self._figure)
@property
def dpi(self):
"""
Chart's DPI
@rtype: int
"""
return self._figure.get_dpi()
@dpi.setter
def dpi(self, dpi):
self._figure.set_dpi(dpi)
self._backend.resize(self._figure)
@property
def formats(self):
"""
Supported output file formats
@rtype: L{csb.core.enum}
"""
return self._formats
def show(self):
"""
Show the GUI window (non-blocking).
"""
if not self._hasgui:
self._backend.add(self._figure)
self._hasgui = True
self._backend.show(self._figure)
def hide(self):
"""
Hide (but do not dispose) the GUI window.
"""
self._backend.hide(self._figure)
def dispose(self):
"""
Dispose the GUI interface. Must be called at the end if any
chart.show() calls have been made. Automatically called if using
the chart in context manager ("with" statement).
@note: Failing to call this method if show() has been called at least
once may cause backend-related errors.
"""
if self._backend_started:
service = self._backend
if service and service.running:
service.destroy(self._figure, wait=True)
service.client_disposed(self)
def save(self, file, format='png', crop=False, dpi=None, *a, **k):
"""
Save all plots to an image.
@param file: destination file name
@type file: str
@param format: output image format; see C{chart.formats} for enumeration
@type format: str or L{csb.core.EnumItem}
@param crop: if True, crop the image (equivalent to MPL's bbox=tight)
@type crop: bool
@note: additional arguments are passed directly to MPL's savefig method
"""
if 'bbox_inches' in k:
bbox = k['bbox_inches']
del k['bbox_inches']
else:
if crop:
bbox = 'tight'
else:
bbox = None
self._canvas.print_figure(file, format=str(format), bbox_inches=bbox, dpi=dpi, *a, **k)
| 3.484375 | 3 |
src/nwb_conversion_tools/tools/neo/neo.py | NeurodataWithoutBorders/nwbn-conversion-tools | 0 | 12769944 | """Author: <NAME>."""
from typing import Optional, Tuple
import distutils.version
import uuid
from datetime import datetime
from pathlib import Path
import warnings
import numpy as np
import neo.io.baseio
import pynwb
from hdmf.backends.hdf5 import H5DataIO
from ..nwb_helpers import add_device_from_metadata
from ...utils import OptionalFilePathType
response_classes = dict(
voltage_clamp=pynwb.icephys.VoltageClampSeries,
current_clamp=pynwb.icephys.CurrentClampSeries,
izero=pynwb.icephys.IZeroClampSeries,
)
stim_classes = dict(
voltage_clamp=pynwb.icephys.VoltageClampStimulusSeries,
current_clamp=pynwb.icephys.CurrentClampStimulusSeries,
)
# TODO - get electrodes metadata
def get_electrodes_metadata(neo_reader, electrodes_ids: list, block: int = 0) -> list:
"""
Get electrodes metadata from Neo reader.
The typical information we look for is the information accepted by pynwb.icephys.IntracellularElectrode:
- name – the name of this electrode
- device – the device that was used to record from this electrode
- description – Recording description, description of electrode (e.g., whole-cell, sharp, etc)
- comment: Free-form text (can be from Methods)
- slice – Information about slice used for recording.
- seal – Information about seal used for recording.
- location – Area, layer, comments on estimation, stereotaxis coordinates (if in vivo, etc).
- resistance – Electrode resistance COMMENT: unit: Ohm.
- filtering – Electrode specific filtering.
- initial_access_resistance – Initial access resistance.
Parameters
----------
neo_reader ([type]): Neo reader
electrodes_ids (list): List of electrodes ids.
block (int, optional): Block id. Defaults to 0.
Returns
-------
list: List of dictionaries containing electrodes metadata.
"""
return []
def get_number_of_electrodes(neo_reader) -> int:
"""Get number of electrodes from Neo reader."""
# TODO - take in account the case with multiple streams.
return len(neo_reader.header["signal_channels"])
def get_number_of_segments(neo_reader, block: int = 0) -> int:
"""Get number of segments from Neo reader."""
return neo_reader.header["nb_segment"][block]
def get_command_traces(neo_reader, segment: int = 0, cmd_channel: int = 0) -> Tuple[list, str, str]:
"""
Get command traces (e.g. voltage clamp command traces).
Parameters
----------
neo_reader : neo.io.baseio
segment : int, optional
Defaults to 0.
cmd_channel : int, optional
ABF command channel (0 to 7). Defaults to 0.
"""
try:
traces, titles, units = neo_reader.read_raw_protocol()
return traces[segment][cmd_channel], titles[segment][cmd_channel], units[segment][cmd_channel]
except Exception as e:
msg = ".\n\n WARNING - get_command_traces() only works for AxonIO interface."
e.args = (str(e) + msg,)
return e
def get_conversion_from_unit(unit: str) -> float:
"""
Get conversion (to Volt or Ampere) from unit in string format.
Parameters
----------
unit (str): Unit as string. E.g. pA, mV, uV, etc...
Returns
-------
float: conversion to Ampere or Volt
"""
if unit in ["pA", "pV"]:
conversion = 1e-12
elif unit in ["nA", "nV"]:
conversion = 1e-9
elif unit in ["uA", "uV"]:
conversion = 1e-6
elif unit in ["mA", "mV"]:
conversion = 1e-3
elif unit in ["A", "V"]:
conversion = 1.0
else:
conversion = 1.0
warnings.warn("No valid units found for traces in the current file. Gain is set to 1, but this might be wrong.")
return float(conversion)
def get_nwb_metadata(neo_reader, metadata: dict = None):
"""
Return default metadata for all recording fields.
Parameters
----------
neo_reader: Neo reader object
metadata: dict, optional
Metadata info for constructing the nwb file.
"""
metadata = dict(
NWBFile=dict(
session_description="Auto-generated by NwbRecordingExtractor without description.",
identifier=str(uuid.uuid4()),
),
Icephys=dict(Device=[dict(name="Device", description="no description")]),
)
return metadata
def add_icephys_electrode(neo_reader, nwbfile, metadata: dict = None):
"""
Add icephys electrodes to nwbfile object.
Will always ensure nwbfile has at least one icephys electrode.
Will auto-generate a linked device if the specified name does not exist in the nwbfile.
Parameters
----------
neo_reader : neo.io.baseio
nwbfile : NWBFile
NWBFile object to add the icephys electrode to.
metadata : dict, optional
Metadata info for constructing the nwb file.
Should be of the format
metadata['Icephys']['Electrodes'] = [
{
'name': my_name,
'description': my_description,
'device_name': my_device_name
},
...
]
"""
assert isinstance(nwbfile, pynwb.NWBFile), "'nwbfile' should be of type pynwb.NWBFile"
if len(nwbfile.devices) == 0:
warnings.warn("When adding Icephys Electrode, no Devices were found on nwbfile. Creating a Device now...")
add_device_from_metadata(nwbfile=nwbfile, modality="Icephys", metadata=metadata)
if metadata is None:
metadata = dict()
if "Icephys" not in metadata:
metadata["Icephys"] = dict()
defaults = [
dict(
name=f"icephys_electrode_{elec_id}",
description="no description",
device_name=[i.name for i in nwbfile.devices.values()][0],
)
for elec_id in range(get_number_of_electrodes(neo_reader))
]
if "Electrodes" not in metadata["Icephys"] or len(metadata["Icephys"]["Electrodes"]) == 0:
metadata["Icephys"]["Electrodes"] = defaults
assert all(
[isinstance(x, dict) for x in metadata["Icephys"]["Electrodes"]]
), "Expected metadata['Icephys']['Electrodes'] to be a list of dictionaries!"
# Create Icephys electrode from metadata
for elec in metadata["Icephys"]["Electrodes"]:
if elec.get("name", defaults[0]["name"]) not in nwbfile.icephys_electrodes:
device_name = elec.get("device_name", defaults[0]["device_name"])
if device_name not in nwbfile.devices:
new_device_metadata = dict(Ecephys=dict(Device=[dict(name=device_name)]))
add_device_from_metadata(nwbfile, modality="Icephys", metadata=new_device_metadata)
warnings.warn(
f"Device '{device_name}' not detected in "
"attempted link to icephys electrode! Automatically generating."
)
electrode_kwargs = dict(
name=elec.get("name", defaults[0]["name"]),
description=elec.get("description", defaults[0]["description"]),
device=nwbfile.devices[device_name],
)
nwbfile.create_icephys_electrode(**electrode_kwargs)
def add_icephys_recordings(
neo_reader,
nwbfile: pynwb.NWBFile,
metadata: dict = None,
icephys_experiment_type: Optional[str] = None,
stimulus_type: Optional[str] = None,
skip_electrodes: Tuple[int] = (),
compression: str = "gzip",
):
"""
Add icephys recordings (stimulus/response pairs) to nwbfile object.
Parameters
----------
neo_reader : neo.io.baseio
nwbfile : NWBFile
metadata : dict, optional
icephys_experiment_type : str, optional
Type of Icephys experiment. Allowed types are: 'voltage_clamp', 'current_clamp' and 'izero'.
The default is 'voltage_clamp'.
stimulus_type : str
skip_electrodes: tuple
compression: str | bool
"""
n_segments = get_number_of_segments(neo_reader, block=0)
if icephys_experiment_type is None:
icephys_experiment_type = "voltage_clamp"
if stimulus_type is None:
stimulus_type = "not described"
# Check for protocol data (only ABF2), necessary for stimuli data
if neo_reader._axon_info["fFileVersionNumber"] < 2:
n_commands = 0
warnings.warn(
f"Protocol section is only present in ABF2 files. {neo_reader.filename} has version "
f"{neo_reader._axon_info['fFileVersionNumber']}. Saving experiment as 'i_zero'..."
)
else:
protocol = neo_reader.read_raw_protocol()
n_commands = len(protocol[0])
if n_commands == 0:
icephys_experiment_type = "izero"
warnings.warn(
f"No command data found by neo reader in file {neo_reader.filename}. Saving experiment as 'i_zero'..."
)
else:
assert (
n_commands == n_segments
), f"File contains inconsistent number of segments ({n_segments}) and commands ({n_commands})"
assert icephys_experiment_type in ["voltage_clamp", "current_clamp", "izero"], (
f"'icephys_experiment_type' should be 'voltage_clamp', 'current_clamp' or 'izero', but received value "
f"{icephys_experiment_type}"
)
# Check and auto-create electrodes, in case they don't exist yet in nwbfile
if len(nwbfile.icephys_electrodes) == 0:
warnings.warn(
"When adding Icephys Recording, no Icephys Electrodes were found on nwbfile. Creating Electrodes now..."
)
add_icephys_electrode(
neo_reader=neo_reader,
nwbfile=nwbfile,
metadata=metadata,
)
if getattr(nwbfile, "intracellular_recordings", None):
ri = max(nwbfile.intracellular_recordings["responses"].index)
else:
ri = -1
if getattr(nwbfile, "icephys_simultaneous_recordings", None):
simultaneous_recordings_offset = len(nwbfile.icephys_simultaneous_recordings)
else:
simultaneous_recordings_offset = 0
if getattr(nwbfile, "icephys_sequential_recordings", None):
sessions_offset = len(nwbfile.icephys_sequential_recordings)
else:
sessions_offset = 0
relative_session_start_time = metadata["Icephys"]["Sessions"][sessions_offset]["relative_session_start_time"]
session_stimulus_type = metadata["Icephys"]["Sessions"][sessions_offset]["stimulus_type"]
# Sequential icephys recordings
simultaneous_recordings = list()
for si in range(n_segments):
# Parallel icephys recordings
recordings = list()
for ei, electrode in enumerate(
list(nwbfile.icephys_electrodes.values())[: len(neo_reader.header["signal_channels"]["units"])]
):
if ei in skip_electrodes:
continue
# Starting time is the signal starting time within .abf file + time
# relative to first session (first .abf file)
ri += 1
starting_time = neo_reader.get_signal_t_start(block_index=0, seg_index=si)
starting_time = starting_time + relative_session_start_time
sampling_rate = neo_reader.get_signal_sampling_rate()
response_unit = neo_reader.header["signal_channels"]["units"][ei]
response_conversion = get_conversion_from_unit(unit=response_unit)
response_gain = neo_reader.header["signal_channels"]["gain"][ei]
response_name = f"{icephys_experiment_type}-response-{si + 1 + simultaneous_recordings_offset:02}-ch-{ei}"
response = response_classes[icephys_experiment_type](
name=response_name,
description=f"Response to: {session_stimulus_type}",
electrode=electrode,
data=H5DataIO(
data=neo_reader.get_analogsignal_chunk(block_index=0, seg_index=si, channel_indexes=ei),
compression=compression,
),
starting_time=starting_time,
rate=sampling_rate,
conversion=response_conversion * response_gain,
gain=np.nan,
)
if icephys_experiment_type != "izero":
stim_unit = protocol[2][ei]
stim_conversion = get_conversion_from_unit(unit=stim_unit)
stimulus = stim_classes[icephys_experiment_type](
name=f"stimulus-{si + 1 + simultaneous_recordings_offset:02}-ch-{ei}",
description=f"Stim type: {session_stimulus_type}",
electrode=electrode,
data=protocol[0][si][ei],
rate=sampling_rate,
starting_time=starting_time,
conversion=stim_conversion,
gain=np.nan,
)
icephys_recording = nwbfile.add_intracellular_recording(
electrode=electrode, response=response, stimulus=stimulus
)
else:
icephys_recording = nwbfile.add_intracellular_recording(electrode=electrode, response=response)
recordings.append(icephys_recording)
sim_rec = nwbfile.add_icephys_simultaneous_recording(recordings=recordings)
simultaneous_recordings.append(sim_rec)
nwbfile.add_icephys_sequential_recording(
simultaneous_recordings=simultaneous_recordings, stimulus_type=stimulus_type
)
# TODO
# # Add a list of sequential recordings table indices as a repetition
# run_index = nwbfile.add_icephys_repetition(
# sequential_recordings=[
# seq_rec,
# ]
# )
# # Add a list of repetition table indices as a experimental condition
# nwbfile.add_icephys_experimental_condition(
# repetitions=[
# run_index,
# ]
# )
def add_all_to_nwbfile(
neo_reader,
nwbfile=None,
metadata: dict = None,
compression: Optional[str] = "gzip",
icephys_experiment_type: Optional[str] = "voltage_clamp",
stimulus_type: Optional[str] = None,
skip_electrodes: Tuple[int] = (),
):
"""
Auxiliary static method for nwbextractor.
Adds all recording related information from recording object and metadata to the nwbfile object.
Parameters
----------
neo_reader: Neo reader object
nwbfile: NWBFile
nwb file to which the recording information is to be added
metadata: dict
metadata info for constructing the nwb file (optional).
Check the auxiliary function docstrings for more information
about metadata format.
compression: str (optional, defaults to "gzip")
Type of compression to use. Valid types are "gzip" and "lzf".
Set to None to disable all compression.
icephys_experiment_type: str (optional)
Type of Icephys experiment. Allowed types are: 'voltage_clamp', 'current_clamp' and 'izero'.
If no value is passed, 'voltage_clamp' is used as default.
stimulus_type: str, optional
skip_electrodes: str, optional
"""
if nwbfile is not None:
assert isinstance(nwbfile, pynwb.NWBFile), "'nwbfile' should be of type pynwb.NWBFile"
add_device_from_metadata(nwbfile=nwbfile, modality="Icephys", metadata=metadata)
add_icephys_electrode(
neo_reader=neo_reader,
nwbfile=nwbfile,
metadata=metadata,
)
add_icephys_recordings(
neo_reader=neo_reader,
nwbfile=nwbfile,
metadata=metadata,
icephys_experiment_type=icephys_experiment_type,
stimulus_type=stimulus_type,
skip_electrodes=skip_electrodes,
compression=compression,
)
def write_neo_to_nwb(
neo_reader: neo.io.baseio.BaseIO,
save_path: OptionalFilePathType = None,
overwrite: bool = False,
nwbfile=None,
metadata: dict = None,
compression: Optional[str] = "gzip",
icephys_experiment_type: Optional[str] = None,
stimulus_type: Optional[str] = None,
skip_electrodes: Optional[tuple] = (),
):
"""
Primary method for writing a Neo reader object to an NWBFile.
Parameters
----------
neo_reader: Neo reader
save_path: PathType
Required if an nwbfile is not passed. Must be the path to the nwbfile
being appended, otherwise one is created and written.
overwrite: bool
If using save_path, whether to overwrite the NWBFile if it already exists.
nwbfile: NWBFile
Required if a save_path is not specified. If passed, this function
will fill the relevant fields within the nwbfile.
metadata: dict
metadata info for constructing the nwb file (optional). Should be of the format
metadata['Ecephys'] = {}
with keys of the forms
metadata['Ecephys']['Device'] = [
{
'name': my_name,
'description': my_description
},
...
]
metadata['Ecephys']['ElectrodeGroup'] = [
{
'name': my_name,
'description': my_description,
'location': electrode_location,
'device': my_device_name
},
...
]
metadata['Ecephys']['Electrodes'] = [
{
'name': my_name,
'description': my_description
},
...
]
metadata['Ecephys']['ElectricalSeries'] = {
'name': my_name,
'description': my_description
}
Note that data intended to be added to the electrodes table of the NWBFile should be set as channel
properties in the RecordingExtractor object.
compression: str (optional, defaults to "gzip")
Type of compression to use. Valid types are "gzip" and "lzf".
Set to None to disable all compression.
icephys_experiment_type: str (optional)
Type of Icephys experiment. Allowed types are: 'voltage_clamp', 'current_clamp' and 'izero'.
If no value is passed, 'voltage_clamp' is used as default.
"""
if nwbfile is not None:
assert isinstance(nwbfile, pynwb.NWBFile), "'nwbfile' should be of type pynwb.NWBFile"
assert (
distutils.version.LooseVersion(pynwb.__version__) >= "1.3.3"
), "'write_neo_to_nwb' not supported for version < 1.3.3. Run pip install --upgrade pynwb"
assert save_path is None or nwbfile is None, "Either pass a save_path location, or nwbfile object, but not both!"
if metadata is None:
metadata = get_nwb_metadata(neo_reader=neo_reader)
kwargs = dict(
neo_reader=neo_reader,
metadata=metadata,
compression=compression,
icephys_experiment_type=icephys_experiment_type,
stimulus_type=stimulus_type,
skip_electrodes=skip_electrodes,
)
if nwbfile is None:
if Path(save_path).is_file() and not overwrite:
read_mode = "r+"
else:
read_mode = "w"
with pynwb.NWBHDF5IO(str(save_path), mode=read_mode) as io:
if read_mode == "r+":
nwbfile = io.read()
else:
nwbfile_kwargs = dict(
session_description="Auto-generated by NwbRecordingExtractor without description.",
identifier=str(uuid.uuid4()),
)
if metadata is not None and "NWBFile" in metadata:
nwbfile_kwargs.update(metadata["NWBFile"])
nwbfile = pynwb.NWBFile(**nwbfile_kwargs)
add_all_to_nwbfile(nwbfile=nwbfile, **kwargs)
io.write(nwbfile)
else:
add_all_to_nwbfile(nwbfile=nwbfile, **kwargs)
| 2.3125 | 2 |
tasks/openqa/e2eqa/async_indexer.py | DevSinghSachan/emdr2 | 51 | 12769945 | import os
import torch
import torch.distributed as dist
from megatron import get_args
from megatron import print_rank_0
from megatron.indexer_emdr2 import IndexBuilder
from megatron.checkpointing import get_checkpoint_tracker_filename
from megatron.initialize import init_distributed, _init_autoresume, _set_random_seed, \
_write_args_to_tensorboard, _initialize_mem_buffs
from megatron.mpu.initialize import set_data_parallel_group, set_model_parallel_group, init_emdr2_groups, \
get_train_group, get_index_group, get_data_parallel_group, get_new_chkpt_ready, get_new_index_ready, \
get_gloo_comm_group, get_exit_interval, initialize_mips_group, mips_is_initialized
NEW_INDEX_READY = None
NEW_CHKPT_READY = None
EXIT_INTERVAL = None
def pprint(*args):
print(*args, flush=True)
def initialize_and_run_async_megatron(allow_no_cuda=False):
if not allow_no_cuda:
assert torch.cuda.is_available(), "Megatron required CUDA."
args = get_args()
assert args.async_indexer and args.max_training_rank is not None
init_distributed()
setup_emdr2_groups_and_vars()
pprint("finished setting up EMDR2 groups")
if mips_is_initialized():
print('MIPS group is already initialized')
else:
initialize_mips_group()
_initialize_mem_buffs()
# _init_autoresume()
# pprint("finished setting up autoresume")
# Random seeds for reproducibility.
if args.rank == 0:
pprint('> setting random seeds to {} ...'.format(args.seed))
_set_random_seed(args.seed)
# Write arguments to tensorboard.
_write_args_to_tensorboard()
pprint('finished writing args to tensorboard')
torch.distributed.barrier()
if torch.distributed.get_rank() < args.max_training_rank:
torch.distributed.barrier(get_data_parallel_group())
print_rank_0("Trainer Group: All trainers ready.")
return
else:
runner = AsyncIndexBuilder(args.rank)
torch.distributed.barrier(get_data_parallel_group())
print_rank_0("Indexer Group: All indexers ready.")
runner.run_async()
def setup_emdr2_groups_and_vars():
args = get_args()
world_size = dist.get_world_size()
max_training_rank = args.max_training_rank
# assuming no model parallelism right now
set_model_parallel_group(dist.new_group([args.rank]))
init_emdr2_groups(max_training_rank, world_size)
if args.rank < max_training_rank:
set_data_parallel_group(get_train_group())
else:
set_data_parallel_group(get_index_group())
class AsyncIndexBuilder(IndexBuilder):
def __init__(self, rank):
super().__init__(call_load_attributes_func=False)
self.rank = rank
args = get_args()
self.main_builder_idx = args.max_training_rank
self.exit_handle = None
# Get the path of the correct model to load
iteration = 0
tracker_filename = get_checkpoint_tracker_filename(args.load)
if os.path.isfile(tracker_filename):
with open(tracker_filename, 'r') as f:
iteration = int(f.read().strip())
if iteration > 0:
model_load_path = args.load
key_list = ['retriever/biencoder_model']
else:
model_load_path = args.pretrained_dpr_load
key_list = None
# Load the context encoder weights
self.load_attributes(custom_load_path=model_load_path, key_list=key_list)
global NEW_INDEX_READY
NEW_INDEX_READY = get_new_index_ready()
global NEW_CHKPT_READY
NEW_CHKPT_READY = get_new_chkpt_ready()
def run_async(self):
args = get_args()
global NEW_CHKPT_READY
# When the indexing starts, wait for the NEW_CHKPT_READY signal from trainer process of rank=0
dist.broadcast(NEW_CHKPT_READY, 0, group=get_gloo_comm_group())
while True:
if self.is_main_builder:
print("Starting Indexing again!", flush=True)
self.build_and_save_index()
self.send_index_ready_signal()
self.load_attributes(custom_load_path=args.load,
key_list=['retriever/biencoder_model'])
def send_index_ready_signal(self):
global NEW_INDEX_READY
global NEW_CHKPT_READY
# send handle
if self.is_main_builder:
print("indexer group: broadcasting NEW INDEX READY MESSAGE", flush=True)
dist.broadcast(NEW_INDEX_READY,
self.main_builder_idx,
group=get_gloo_comm_group(),
async_op=True)
# recv handle
dist.broadcast(NEW_CHKPT_READY, 0, group=get_gloo_comm_group())
| 2.15625 | 2 |
track_2_openCV/player_tracking.py | Batlytics/Batlytics | 0 | 12769946 | import cv2
import numpy as np
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_drawing_styles = mp.solutions.drawing_styles
mp_pose = mp.solutions.pose
# ---------------------------------------------------------------------
filter = np.array(
[
[0, -1, 0],
[-1, 5, -1],
[0,-1, 0]
]
)
def sharpen(img):
sharpen_img = cv2.filter2D(img, -1, filter)
return sharpen_img
# ---------------------------------------------------------------
dim = (720, 385)
cap = cv2.VideoCapture('../video_file/Hackathon_high_home_1_Trim.mp4')
ret, frame1 = cap.read()
frame1 = cv2.resize(frame1, dim)
pts1 = np.float32([[502,57], [218,57], [690,320], [30,320]])
pts2 = np.float32([[0,0], [dim[0], 0],[0, dim[1]], [dim[0], dim[1]]])
matrix = cv2.getPerspectiveTransform(pts1, pts2)
frame1 = cv2.warpPerspective(frame1, matrix, dim)
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, dim)
matrix = cv2.getPerspectiveTransform(pts1, pts2)
frame2 = cv2.warpPerspective(frame2, matrix, dim)
frame1 = sharpen(frame1)
frame2 = sharpen(frame2)
while True:
diff = cv2.absdiff(frame1, frame2)
gray = cv2.cvtColor(diff, cv2.COLOR_BGR2GRAY)
blur = cv2.bilateralFilter(gray, 10, 510, 50)
_, thresh = cv2.threshold(blur, 20, 255, cv2.THRESH_BINARY)
dilated = cv2.dilate(thresh, None, iterations=3)
contours, _ = cv2.findContours(dilated, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
for contour in contours:
(x, y, w, h) = cv2.boundingRect(contour)
print(x,y)
if cv2.contourArea(contour) > 100 and cv2.contourArea(contour) < 450:
cv2.rectangle(frame1, (x,y), (x+w, y+h), (255, 255, 0), 1)
# elif cv2.contourArea(contour) < 30:
# cv2.rectangle(frame1, (x,y), (x+w, y+h), (0, 255, 0), 2)
# else:
# cv2.rectangle(frame1, (x,y), (x+w, y+h), (255, 255, 0), 2)
cv2.imshow('video', frame1)
frame1 = frame2
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, dim)
matrix = cv2.getPerspectiveTransform(pts1, pts2)
frame2 = cv2.warpPerspective(frame2, matrix, dim)
frame2 = sharpen(frame2)
if cv2.waitKey(27) & 0xFF==ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# import cv2
# import sys
# (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.')
# if __name__ == '__main__' :
# # Set up tracker.
# # Instead of MIL, you can also use
# tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']
# tracker_type = tracker_types[-1]
# if int(minor_ver) < 3:
# tracker = cv2.Tracker_create(tracker_type)
# else:
# if tracker_type == 'BOOSTING':
# tracker = cv2.TrackerBoosting_create()
# if tracker_type == 'MIL':
# tracker = cv2.TrackerMIL_create()
# if tracker_type == 'KCF':
# tracker = cv2.TrackerKCF_create()
# if tracker_type == 'TLD':
# tracker = cv2.TrackerTLD_create()
# if tracker_type == 'MEDIANFLOW':
# tracker = cv2.TrackerMedianFlow_create()
# if tracker_type == 'GOTURN':
# tracker = cv2.TrackerGOTURN_create()
# if tracker_type == 'MOSSE':
# tracker = cv2.TrackerMOSSE_create()
# if tracker_type == "CSRT":
# tracker = cv2.TrackerCSRT_create()
# # Read video
# video = cv2.VideoCapture("../video_file/Hackathon_high_home_1_Trim.mp4")
# # Exit if video not opened.
# if not video.isOpened():
# print("Could not open video")
# sys.exit()
# # Read first frame.
# ok, frame = video.read()
# if not ok:
# print('Cannot read video file')
# sys.exit()
# # Define an initial bounding box
# bbox = (287, 23, 86, 320)
# # Uncomment the line below to select a different bounding box
# bbox = cv2.selectROI(frame)
# # Initialize tracker with first frame and bounding box
# ok = tracker.init(frame, bbox)
# while True:
# # Read a new frame
# ok, frame = video.read()
# if not ok:
# break
# # Start timer
# timer = cv2.getTickCount()
# # Update tracker
# ok, bbox = tracker.update(frame)
# # Calculate Frames per second (FPS)
# fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);
# # Draw bounding box
# if ok:
# # Tracking success
# p1 = (int(bbox[0]), int(bbox[1]))
# p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
# cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1)
# else :
# # Tracking failure
# cv2.putText(frame, "Tracking failure detected", (100,80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,(0,0,255),2)
# # Display tracker type on frame
# cv2.putText(frame, tracker_type + " Tracker", (100,20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
# # Display FPS on frame
# cv2.putText(frame, "FPS : " + str(int(fps)), (100,50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50), 2);
# # Display result
# cv2.imshow("Tracking", frame)
# # Exit if ESC pressed
# k = cv2.waitKey(1) & 0xff
# if k == 27 : break | 2.484375 | 2 |
data.py | nickgerend/GoodRead | 0 | 12769947 | # Written by: <NAME>, @dataoutsider
# Viz: "Good Read", enjoy!
import pandas as pd
import numpy as np
import requests
from bs4 import BeautifulSoup
import re
import matplotlib.pyplot as plt
import os
#region Site 1: https://mostrecommendedbooks.com/best-books
def crawl(url):
content = requests.get(url)
soup = BeautifulSoup(content.text, 'html.parser')
return soup
main_page = 'https://mostrecommendedbooks.com/best-books'
links = []
for tag in crawl(main_page).find_all('ul', {'class':'styles_sub-best-books__1VZwz'}):
for attribute in tag.find_all('a'):
element = attribute.get('href')
link = element.replace(element, main_page+element[1:])
links.append(link)
books = {}
for page in links:
link = page.replace('-booksbest','')
category_s = re.search('best-(.*)-books', link)
category = category_s.group(1).replace('-', ' ')
soup = crawl(link)
titles = []
authors = []
for tag in soup.find_all('div', {'class':'styles_book-category-text__272Fl'}):
book = ''
for name in tag.find_all('h2'):
book = name.text
if book in books:
books[book]['category'].append(category)
else:
books[book] = {}
books[book]['category'] = [category]
books[book]['title'] = book
for a in tag.find_all('h3'):
books[book]['author'] = a.text
break
break
df = pd.DataFrame.from_dict({(i): books[i] for i in books.keys() }, orient='index')
print(df)
df.to_csv(os.path.dirname(__file__) + '/mostrecommendedbooks.csv', encoding='utf-8', index=False)
#endregion
#region example
recommended_books = pd.read_csv(os.path.dirname(__file__) + '/most_recommended.csv', header=0, names=['recommender', 'title', 'author'])
recommended_books_reshaped = recommended_books.groupby(['title', 'author'])['recommender'].apply(lambda x: '|'.join(x)).reset_index()
recommended_books_reshaped['title'] = recommended_books_reshaped['title'].str.replace('"','')
query_books = recommended_books_reshaped['title'].tolist()
query_authors = recommended_books_reshaped['author'].tolist()
#endregion | 3.15625 | 3 |
GRU/gru.py | AndryRafam/Movie-Sentiment-Analysis | 1 | 12769948 | import tensorflow as tf
import numpy as np
import pandas as pd
import re
import nltk
import string
import random
random.seed(0)
np.random.seed(0)
tf.random.set_seed(42)
tf.random.set_seed(42)
from nltk.tokenize import word_tokenize
from nltk.tokenize.treebank import TreebankWordDetokenizer
df = pd.read_csv("imdb.csv")
def preprocess(x):
x = x.lower()
x = x.encode("ascii","ignore").decode()
x = re.sub("https*\S+"," ",x)
x = re.sub("@\S+"," ",x)
x = re.sub("#\S+"," ",x)
x = re.sub("\'\w+","",x)
x = re.sub("[%s]" % re.escape(string.punctuation)," ", x)
x = re.sub("\w*\d+\w*","",x)
x = re.sub("\s{2,}"," ",x)
return x
temp = []
data_to_list = df["review"].values.tolist()
for i in range(len(data_to_list)):
temp.append(preprocess(data_to_list[i]))
def tokenize(y):
for x in y:
yield(word_tokenize(str(x)))
data_words = list(tokenize(temp))
def detokenize(txt):
return TreebankWordDetokenizer().detokenize(txt)
final_data = []
for i in range(len(data_words)):
final_data.append(detokenize(data_words[i]))
print(final_data[:5])
final_data = np.array(final_data)
import pickle
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import ModelCheckpoint
from sklearn.model_selection import train_test_split
max_words = 20000
max_len = 200
tokenizer = Tokenizer(num_words = max_words)
tokenizer.fit_on_texts(final_data)
sequences = tokenizer.texts_to_sequences(final_data)
tweets = pad_sequences(sequences,maxlen=max_len)
with open("tokenizer.pickle","wb") as handle:
pickle.dump(tokenizer,handle,protocol=pickle.HIGHEST_PROTOCOL)
print(tweets)
labels = np.array(df["sentiment"])
l = []
for i in range(len(labels)):
if labels[i]=="negative":
l.append(0)
elif labels[i]=="positive":
l.append(1)
l = np.array(l)
labels = tf.keras.utils.to_categorical(l,2,dtype="int32")
del l
x_train,x_test,y_train,y_test = train_test_split(tweets,labels,random_state=42)
x_train,x_val,y_train,y_val = train_test_split(x_train,y_train,test_size=0.25,random_state=42)
inputs = tf.keras.Input(shape=(None,),dtype="int32")
x = layers.Embedding(max_words,128)(inputs)
x = layers.GRU(64,return_sequences=True)(x)
x = layers.GRU(64)(x)
outputs = layers.Dense(2,activation="sigmoid")(x)
model = tf.keras.Model(inputs,outputs)
model.summary()
model.compile(optimizer="adam",loss="binary_crossentropy",metrics=["accuracy"])
checkpoint = ModelCheckpoint("model_gru.hdf5",monitor="val_accuracy",verbose=1,save_best_only=True,save_weights_only=False)
model.fit(x_train,y_train,batch_size=32,epochs=5,validation_data=(x_val,y_val),callbacks=[checkpoint])
best = tf.keras.models.load_model("model_gru.hdf5")
loss,acc = best.evaluate(x_test,y_test,verbose=2)
predictions = best.evaluate(x_test)
print("Test acc: {:.2f} %".format(100*acc))
print("Test loss: {:.2f} %".format(100*loss))
| 2.90625 | 3 |
src/ner/issa_ner/script/main_threading.py | issa-project/knowledge-graph | 0 | 12769949 | <reponame>issa-project/knowledge-graph
import concurrent.futures
import os
import pathlib
import traceback
import cotools
import tqdm
import pycld2
#TODO: obvious!
import sys
sys.path.append('/mnt/c/Users/abobashe/Documents/ISSA/AgritropToRDF/ner/cord19_ner')
from utils.config import Config
from utils.iodata import Output
from wrapper.wrapper_annotator import WrapperAnnotator
def func_entity_fishing(d):
"""
Helper function for processing a paper in a thread with Entity-fishing
:param d: content of the paper
:return: result of the annotation with Entity-fishing in JSON || None if the JSON annotation exists already
"""
d_json = {}
paper_id = d['paper_id']
title = d["metadata"]["title"]
if os.path.isfile(path_output + '/entity-fishing/' + folder + '/' + paper_id + '.json'):
pbar.update()
return None
try:
body_text = cotools.text(d)
isreliable, textbytesfound, details, vectors = pycld2.detect(body_text, returnVectors=True)
lang = vectors[0][3]
# None or out of range
except Exception:
lang = 'en'
d_json["paper_id"] = paper_id
d_json["lang"] = lang
try:
abstract = cotools.abstract(d)
d_json["abstract"] = wa.request_entity_fishing(abstract, lang)
# no abstract
except Exception:
pass
d_json["title"] = wa.request_entity_fishing(title, lang)
d_json["body_text"] = wa.request_entity_fishing(body_text, lang)
d_json["ref_entries"] = {}
for key, value in d["ref_entries"].items():
d_json["ref_entries"][key] = wa.request_entity_fishing(value["text"])
# d_json["bib_entries"] = {}
# for key, value in d["bib_entries"].items():
# d_json["bib_entries"][key] = wa.request_entity_fishing(value["title"])
d_json["back_matter"] = []
for matter in d["back_matter"]:
for key, value in matter.items():
if key == 'text':
text = {'text': wa.request_entity_fishing(value)}
d_json["back_matter"].append(text)
Output().save_json(d_json, path_output + '/entity-fishing/' + folder + '/' + d["paper_id"] + '.json')
#print(d_json)
pbar.update()
return d_json
#return d_json
def func_dbpedia_spotlight(d):
"""
Helper function for processing a paper in a thread with DBpedia Spotlight
:param d: content of the paper
:return: result of the annotation with DBpedia Spotlight in JSON || None if the JSON annotation exists already
"""
d_json = {}
paper_id = d['paper_id']
title = d["metadata"]["title"]
print(path_output)
print(folder)
if os.path.isfile(path_output + '/dbpedia-spotlight/' + folder + '/' + paper_id + '.json'):
pbar.update()
return None
try:
body_text = cotools.text(d)
isreliable, textbytesfound, details, vectors = pycld2.detect(body_text, returnVectors=True)
lang = vectors[0][3]
print(lang)
# None or out of range
except:
lang = 'en'
#if os.path.isfile('/data/CORD19-Annotation-multi/entity-fishing/' + folder + '/' + paper_id + '.json'):
# return None
d_json["paper_id"] = paper_id
d_json["lang"] = lang
try:
abstract = cotools.abstract(d)
d_json["abstract"] = wa.request_dbpedia_spotlight(abstract, lang)
# no abstract
except Exception:
pass
d_json["title"] = wa.request_dbpedia_spotlight(title, lang)
d_json["body_text"] = wa.request_dbpedia_spotlight(body_text, lang)
#TODO: ASk Franck if we need this
#d_json["ref_entries"] = {}
#for key, value in d["ref_entries"].items():
# d_json["ref_entries"][key] = wa.request_dbpedia_spotlight(value["text"])
#d_json["bib_entries"] = {}
#for key, value in d["bib_entries"].items():
# d_json["bib_entries"][key] = wa.request_dbpedia_spotlight(value["title"])
#d_json["back_matter"] = []
#for matter in d["back_matter"]:
# for key, value in matter.items():
# if key == 'text':
# text = {'text': wa.request_dbpedia_spotlight(value)}
# d_json["back_matter"].append(text)
Output().save_json(d_json, path_output + '/dbpedia-spotlight/' + d["paper_id"] + '.json')
pbar.update()
return d_json
def func_ncbo(d):
"""
Helper function for processing a paper in a thread with NCBO BioPortal Annotator+
:param d: content of the paper
:return: result of the annotation with NCBO BioPortal Annotator+ in JSON ||
None if the JSON annotation exists already
"""
d_json = {}
paper_id = d['paper_id']
title = d["metadata"]["title"]
if os.path.isfile(path_output + '/ncbo/' + folder + '/' + paper_id + '.json'):
return None
try:
body_text = cotools.text(d)
isreliable, textbytesfound, details, vectors = pycld2.detect(body_text, returnVectors=True)
lang = vectors[0][3]
# None or out of range
except:
lang = 'en'
if os.path.isfile('/data/CORD19-Annotation-multi/entity-fishing/' + folder + '/' + paper_id + '.json'):
return None
d_json["paper_id"] = paper_id
d_json["lang"] = lang
try:
abstract = cotools.abstract(d)
d_json["abstract"] = wa.request_ncbo_plus(abstract, lang)
# no abstract
except:
pass
body_text = cotools.text(d)
d_json["paper_id"] = paper_id
d_json["title"] = wa.request_ncbo_plus(title, lang)
d_json["body_text"] = wa.request_ncbo_plus(body_text, lang)
d_json["ref_entries"] = {}
for key, value in d["ref_entries"].items():
d_json["ref_entries"][key] = wa.request_ncbo_plus(value["text"], lang)
d_json["back_matter"] = []
for matter in d["back_matter"]:
for key, value in matter.items():
if key == 'text':
text = {'text': wa.request_ncbo_plus(value)}
d_json["back_matter"].append(text)
#"""
pbar.update()
Output().save_json(d_json, path_output + '/ncbo/' + folder + '/' + d["paper_id"] + '.json')
return d_json
if __name__ == '__main__':
# Path to the CORD-19 dataset
project_resources = Config.project_resources
# Path where the annotated files will be saved
path_output = Config.corpus_annotated
pathlib.Path(os.path.dirname(project_resources)).mkdir(parents=True, exist_ok=True)
pathlib.Path(os.path.dirname(path_output)).mkdir(parents=True, exist_ok=True)
if Config.DOWNLOAD_CORPUS:
cotools.download(dir=project_resources)
wa = WrapperAnnotator()
folders_corpus = ["pdf_json", "pmc_json"]
for folder in folders_corpus:
data = cotools.Paperset(project_resources + '/' + folder)
# You may want to change the number of workers
if Config.ENTITY_FISHING:
with tqdm.tqdm(total=len(data)) as pbar:
with concurrent.futures.ProcessPoolExecutor() as executor:
executor.map(func_entity_fishing, data)
if Config.DBPEDIA_SPOTLIGHT:
with tqdm.tqdm(total=len(data)) as pbar:
with concurrent.futures.ProcessPoolExecutor() as executor:
executor.map(func_dbpedia_spotlight, data)
if Config.NCBO_BIOPORTAL:
with tqdm.tqdm(total=len(data)) as pbar:
with concurrent.futures.ProcessPoolExecutor() as executor:
executor.map(func_ncbo, data)
| 1.90625 | 2 |
disparity.py | penguinmenac3/computervision-toolkit | 1 | 12769950 | <reponame>penguinmenac3/computervision-toolkit
import cv2
import time
stereo = cv2.StereoBM_create(numDisparities=16, blockSize=55)
def depth_map(imgL, imgR):
return stereo.compute(imgL, imgR)
def test():
cap_l = cv2.VideoCapture(2)
cap_r = cv2.VideoCapture(1)
last_time = time.time()
while True:
# Capture frame-by-frame
ret_l, left = cap_l.read()
ret_r, right = cap_r.read()
if ret_l and ret_r:
left = cv2.cvtColor(left, cv2.COLOR_BGR2GRAY)
right = cv2.cvtColor(right, cv2.COLOR_BGR2GRAY)
disparity = depth_map(left, right)
# Display the resulting frame
cv2.imshow('frame', disparity)
print("FPS: %.1f" % (1 / (time.time() - last_time)))
last_time = time.time()
if cv2.waitKey(1) & 0xFF == ord('q'):
break
# When everything done, release the capture
cap_l.release()
cap_r.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
test()
| 2.703125 | 3 |