content stringlengths 5 1.05M |
|---|
from talon import Module
mod = Module()
@mod.capture(rule="matching")
def cursorless_matching_pair_symbol(m) -> str:
return {"modifier": {"type": "matchingPairSymbol"}}
|
# Generated by Django 3.2.5 on 2021-07-20 17:30
from django.db import migrations
def secrets_to_nbsecrets(apps, schema_editor):
try:
ObjectChange = apps.get_model('extras', 'ObjectChange')
ContentType = apps.get_model('contenttypes', 'ContentType')
ctsecret = ContentType.objects.get(app_label='secrets', model='secret')
ctsecretrole = ContentType.objects.get(app_label='secrets', model='secretrole')
ctuserkey = ContentType.objects.get(app_label='secrets', model='userkey')
ctsessionkey = ContentType.objects.get(app_label='secrets', model='sessionkey')
ctnbsecret = ContentType.objects.get(app_label='netbox_secretstore', model='secret')
ctnbsecretrole = ContentType.objects.get(app_label='netbox_secretstore', model='secretrole')
ctnbuserkey = ContentType.objects.get(app_label='netbox_secretstore', model='userkey')
ctnbsessionkey = ContentType.objects.get(app_label='netbox_secretstore', model='sessionkey')
ObjectChange.objects.filter(changed_object_type_id=ctsecret.id).update(changed_object_type_id=ctnbsecret.id)
ObjectChange.objects.filter(changed_object_type_id=ctsecretrole.id).update(changed_object_type_id=ctnbsecretrole.id)
ObjectChange.objects.filter(changed_object_type_id=ctsessionkey.id).update(changed_object_type_id=ctnbsessionkey.id)
ObjectChange.objects.filter(changed_object_type_id=ctuserkey.id).update(changed_object_type_id=ctnbuserkey.id)
except ContentType.DoesNotExist:
pass
def nbsecrets_to_secrets(apps, schema_editor):
try:
ObjectChange = apps.get_model('extras', 'ObjectChange')
ContentType = apps.get_model('contenttypes', 'ContentType')
ctsecret = ContentType.objects.get(app_label='secrets', model='secret')
ctsecretrole = ContentType.objects.get(app_label='secrets', model='secretrole')
ctuserkey = ContentType.objects.get(app_label='secrets', model='userkey')
ctsessionkey = ContentType.objects.get(app_label='secrets', model='sessionkey')
ctnbsecret = ContentType.objects.get(app_label='netbox_secretstore', model='secret')
ctnbsecretrole = ContentType.objects.get(app_label='netbox_secretstore', model='secretrole')
ctnbuserkey = ContentType.objects.get(app_label='netbox_secretstore', model='userkey')
ctnbsessionkey = ContentType.objects.get(app_label='netbox_secretstore', model='sessionkey')
ObjectChange.objects.filter(changed_object_type_id=ctnbsecret.id).update(changed_object_type_id=ctsecret.id)
ObjectChange.objects.filter(changed_object_type_id=ctnbsecretrole.id).update(changed_object_type_id=ctsecretrole.id)
ObjectChange.objects.filter(changed_object_type_id=ctnbsessionkey.id).update(changed_object_type_id=ctsessionkey.id)
ObjectChange.objects.filter(changed_object_type_id=ctnbuserkey.id).update(changed_object_type_id=ctuserkey.id)
except ContentType.DoesNotExist:
pass
class Migration(migrations.Migration):
dependencies = [
('netbox_secretstore', '0002_rename_table'),
]
operations = [
migrations.RunPython(code=secrets_to_nbsecrets, reverse_code=nbsecrets_to_secrets)
]
|
# Data Parallel Control (dpctl)
#
# Copyright 2020-2021 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pytest
import dpctl.tensor as dpt
class Dummy:
@staticmethod
def abs(a):
return a
@staticmethod
def add(a, b):
if isinstance(a, dpt.usm_ndarray):
return a
else:
return b
@staticmethod
def subtract(a, b):
if isinstance(a, dpt.usm_ndarray):
return a
else:
return b
@staticmethod
def multiply(a, b):
if isinstance(a, dpt.usm_ndarray):
return a
else:
return b
@pytest.mark.parametrize("namespace", [None, Dummy()])
def test_fp_ops(namespace):
X = dpt.usm_ndarray(1, "d")
X._set_namespace(namespace)
assert X.__array_namespace__() is namespace
X[0] = -2.5
X.__abs__()
X.__add__(1.0)
X.__radd__(1.0)
X.__sub__(1.0)
X.__rsub__(1.0)
X.__mul__(1.0)
X.__rmul__(1.0)
X.__truediv__(1.0)
X.__rtruediv__(1.0)
X.__floordiv__(1.0)
X.__rfloordiv__(1.0)
X.__pos__()
X.__neg__()
X.__eq__(-2.5)
X.__ne__(-2.5)
X.__le__(-2.5)
X.__ge__(-2.5)
X.__gt__(-2.0)
X.__iadd__(X)
X.__isub__(X)
X.__imul__(X)
X.__itruediv__(1.0)
X.__ifloordiv__(1.0)
@pytest.mark.parametrize("namespace", [None, Dummy()])
def test_int_ops(namespace):
X = dpt.usm_ndarray(1, "i4")
X._set_namespace(namespace)
assert X.__array_namespace__() is namespace
X.__lshift__(2)
X.__rshift__(2)
X.__rlshift__(2)
X.__rrshift__(2)
X.__ilshift__(2)
X.__irshift__(2)
X.__and__(X)
X.__rand__(X)
X.__iand__(X)
X.__or__(X)
X.__ror__(X)
X.__ior__(X)
X.__xor__(X)
X.__rxor__(X)
X.__ixor__(X)
X.__invert__()
X.__mod__(5)
X.__rmod__(5)
X.__imod__(5)
X.__pow__(2)
X.__rpow__(2)
X.__ipow__(2)
@pytest.mark.parametrize("namespace", [None, Dummy()])
def test_mat_ops(namespace):
M = dpt.from_numpy(np.eye(3, 3, dtype="d"))
M._set_namespace(namespace)
assert M.__array_namespace__() is namespace
M.__matmul__(M)
M.__imatmul__(M)
M.__rmatmul__(M)
|
import subprocess
import traceback
# for python 3.x
def runtime_exec(command):
msg = {}
try:
result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)
if int(result.returncode) != 0:
print('script output: {0}'.format(result.stdout))
print('script error: {0}'.format(result.stderr))
msg["stderr"] = str(result.stderr)
msg["stdout"] = result.stdout
print('script executed successfully {0}'.format(result.stdout))
return msg
except subprocess.CalledProcessError as e:
print("Calledprocerr")
print(traceback.format_exc())
msg["Error"] = "Error performing get_application_log " + str(e)
return msg
import os
if __name__ == '__main__':
curr_dir = os.getcwd()
command_arr = [curr_dir, 'dir.bat']
command = '\\'.join(command_arr)
runtime_exec(command) |
#!/usr/bin/env python
"""Django DDP Presentation Project."""
import os.path
from setuptools import setup, find_packages
setup(
name='dddppp',
version='0.0.1',
description=__doc__,
long_description=open('README.rst').read(),
author='Tyson Clugg',
author_email='tyson@clugg.net',
url='https://github.com/tysonclugg/dddppp',
packages=find_packages(),
include_package_data=True,
install_requires=[
'Django>=1.8.3',
'django-ddp>=0.10.0',
'django-mptt>=0.7.4',
'django-mptt-admin>=0.2.1',
'django-orderable>=3.1.0',
'pybars3>=0.9.1',
'whitenoise>=2.0',
],
scripts=[
'manage.py',
],
classifiers=[
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 3",
"Topic :: Internet :: WWW/HTTP",
],
)
|
# Generated by Django 2.2.2 on 2019-08-31 20:05
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('source_optics', '0020_statistic_days_before_joined'),
]
operations = [
migrations.AddField(
model_name='statistic',
name='days_active',
field=models.IntegerField(blank=True, default=0),
),
migrations.AddField(
model_name='statistic',
name='longevity',
field=models.IntegerField(blank=True, default=0),
),
]
|
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class LoadTestErrors(Model):
"""LoadTestErrors.
:param count:
:type count: int
:param occurrences:
:type occurrences: int
:param types:
:type types: list of :class:`object <microsoft.-visual-studio.-test-service.-web-api-model.v4_1.models.object>`
:param url:
:type url: str
"""
_attribute_map = {
'count': {'key': 'count', 'type': 'int'},
'occurrences': {'key': 'occurrences', 'type': 'int'},
'types': {'key': 'types', 'type': '[object]'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, count=None, occurrences=None, types=None, url=None):
super(LoadTestErrors, self).__init__()
self.count = count
self.occurrences = occurrences
self.types = types
self.url = url
|
import pandas as pd
import numpy as np
import re
import pickle as pk
np.random.seed(seed=7)
data_path = './data-toxic-kaggle/train.csv'
pkl_path = './data-toxic-kaggle/toxic_comments_100.pkl'
perturbed_path = './data-toxic-kaggle/toxic_comments_100_perturbed.pkl' # perturbed by Edwin's script
perturbed_path = './data-toxic-kaggle/toxic_comments_100_mm_even_p1.pkl' # perturbed by Steffen_even script
def clean_str(string, TREC=False):
"""
Tokenization/string cleaning for all datasets except for SST.
Every dataset is lower cased except for TREC
"""
string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string)
string = re.sub(r"[0-9]+", " ", string)
string = re.sub(r"\'s", " \'s", string)
string = re.sub(r"\'ve", " \'ve", string)
string = re.sub(r"n\'t", " n\'t", string)
string = re.sub(r"\'re", " \'re", string)
string = re.sub(r"\'d", " \'d", string)
string = re.sub(r"\'ll", " \'ll", string)
string = re.sub(r",", " ", string)
string = re.sub(r"!", " ", string)
string = re.sub(r"\(", " ", string)
string = re.sub(r"\)", " ", string)
string = re.sub(r"\?", " ", string)
string = re.sub(r"\s{2,}", " ", string)
return string.strip() if TREC else string.strip().lower()
def take_sample():
df = pd.read_csv(data_path)
toxic = df.loc[df['toxic'] == 1]
toxic_comments = toxic['comment_text'].tolist()
print("The number of toxic comments: %d"%len(toxic_comments))
toxic_comments_filterd = []
for comment in toxic_comments:
if len(comment) < 50:
toxic_comments_filterd.append(comment)
print("The number of comments whose lengths are less than 50: %d"%len(toxic_comments_filterd))
np.random.shuffle(toxic_comments_filterd)
# take 100 samples from comments with length less than 50
toxic_comments_filterd = toxic_comments_filterd[:100]
# clean comments
toxic_comments_cleand = []
for comment in toxic_comments_filterd:
comment_clean = clean_str(comment, True)
toxic_comments_cleand.append(comment_clean)
print(toxic_comments_cleand)
with open(pkl_path, 'wb') as f:
pk.dump(toxic_comments_cleand, f)
def load_samples(perturbed_path, original_path ,verbose=False):
with open(pkl_path, 'rb') as f:
toxic_comments_clean = pk.load(f)
# I asked Edwin to perturb the data
with open(perturbed_path, 'rb') as f:
toxic_comments_perturbed = pk.load(f)
if verbose == True:
for i in range(100):
print("%s --> %s"%(toxic_comments_clean[i],toxic_comments_perturbed[i]))
return toxic_comments_clean, toxic_comments_perturbed
def convert_conll_to_pkl(txt_path,pkl_path):
with open(txt_path, 'r') as f:
lines = f.readlines()
sentences = []
sent = []
for line in lines:
if line != '\n':
sent.append(line.strip())
else:
sentences.append(sent)
sent = []
out_lines = []
for sent in sentences:
out_lines.append(' '.join(sent))
with open(pkl_path,'wb') as f:
pk.dump(out_lines,f)
if __name__=="__main__":
#load_samples(0.1,True)
### Use the below command if you would like to convert the output of the steffen's script to pkl
for p in [0.1, 0.2, 0.4, 0.6, 0.8]:
convert_conll_to_pkl(txt_path= 'data-toxic-kaggle/toxic_comments_100_mm_even_p%.1f.txt'%p,
pkl_path= 'data-toxic-kaggle/toxic_comments_100_mm_even_p%.1f.pkl'%p)
|
#!/usr/bin/env python3
from typing import Optional
class CircularQueue:
def __init__(self, capacity: int = 10):
self._capacity = capacity + 1
self._items = [None] * self._capacity
self._head = 0
self._tail = 0
@property
def capacity(self) -> int:
return self._capacity - 1
def is_empty(self) -> bool:
return self._tail == self._head
def is_full(self) -> bool:
return (self._tail + 1) % self._capacity == self._head
def enqueue(self, val: int) -> bool:
if self.is_full():
return False
self._items[self._tail] = val
self._tail = (self._tail + 1) % self._capacity
return True
def dequeue(self) -> Optional[int]:
if self.is_empty():
return None
item = self._items[self._head]
self._head = (self._head + 1) % self._capacity
return item
def __str__(self):
return f"{list(item for item in self._items[self._head:self._tail])}"
def __repr__(self):
return f"<CircularQueue items: {self._items}, capacity: {self._capacity}, head: {self._head}, tail: {self._tail}>"
def test_circular_queue():
q = CircularQueue(10)
assert q.capacity == 10
assert q.is_empty()
assert q.dequeue() is None
q.enqueue(1)
q.enqueue(2)
q.enqueue(0)
assert q.dequeue() == 1
assert q.dequeue() == 2
for i in range(1, 10):
assert q.enqueue(i)
assert q.is_full()
assert not q.enqueue(10)
|
from flask import Flask
from flask_migrate import Migrate
from .config import Config
from .models import db
from .handlers import routes
def create_app():
app = Flask(__name__)
app.config.from_object(Config)
db.init_app(app)
db.app = app
migrate = Migrate(app, db)
for route in routes:
app.register_blueprint(route)
return app
|
# -*- coding: utf-8 -*-
"""Workchain to run Quantum ESPRESSO calculations that generate DoS and PDoS for a structure.
This requires four computations:
- SCF (pw.x), to generate the initial wavefunction.
- NSCF (pw.x), to generate eigenvalues, generally with a denser k-point mesh and tetrahedra occupations.
- Total DoS (dos.x), to generate total densities of state.
- Partial DoS (projwfc.x), to generate partial densities of state, by projecting wavefunctions onto atomic orbitals.
Additional functionality:
- Setting ``'align_to_fermi': True`` in the inputs will ensure that the energy range is centred around the Fermi
energy when `Emin` and `Emax` are provided for both the `dos` and `projwfc` inputs. This is useful when you are only
interested in a certain energy range around the Fermi energy. By default the energy range is extracted from the
NSCF calculation.
Storage memory management:
The wavefunction file(s) created by the nscf calculation can get very large (>100Gb).
These files must be copied to dos and projwfc calculations, so storage memory limits can easily be exceeded.
If this is an issue, setting the input ``serial_clean`` to ``True`` will not run these calculations in parallel,
but instead run in serial and clean directories when they are no longer required:
- Run the scf workchain
- Run the nscf workchain, then clean the scf calculation directories
- Run the dos calculation, then clean its directory
- Run the projwfc calculation, then clean its directory
Setting the input ``clean_workdir`` to ``True``, will clean any remaining directories, after the whole workchain has
terminated.
Also note that projwfc will fail if the scf/nscf calculations were run with a different number of procs/pools and
``wf_collect=.false.`` (this setting is deprecated in newer version of QE).
Related Resources:
- `Electronic structure calculations user guide <https://www.quantum-espresso.org/Doc/pw_user_guide/node10.html>`_
- `Density of States calculation blog <https://blog.levilentz.com/density-of-states-calculation/>`_
- `Quantum ESPRESSO tutorial slides <http://indico.ictp.it/event/7921/session/320/contribution/1261/material/0/0.pdf>`_
.. warning::
For QE v6.1, there is an issue using ``tetrahedra`` occupations, as is recommended for ``nscf``,
and both ``dos.x`` and ``projwfc.x`` will raise errors when reading the xml file
(see `this post <https://lists.quantum-espresso.org/pipermail/users/2017-November/039656.html>`_).
"""
from aiida import orm, plugins
from aiida.common import AttributeDict
from aiida.engine import ToContext, WorkChain, if_
from aiida.orm.nodes.data.base import to_aiida_type
import jsonschema
from aiida_quantumespresso.calculations.pw import PwCalculation
from aiida_quantumespresso.utils.mapping import prepare_process_inputs
from .protocols.utils import ProtocolMixin
def get_parameter_schema():
"""Return the ``PdosWorkChain`` input parameter schema."""
return {
'$schema': 'http://json-schema.org/draft-07/schema',
'type': 'object',
'required': ['DeltaE'],
'additionalProperties': False,
'properties': {
'Emin': {
'description': 'min energy (eV) for DOS plot',
'type': 'number'
},
'Emax': {
'description': 'max energy (eV) for DOS plot',
'type': 'number'
},
'DeltaE': {
'description': 'energy grid step (eV)',
'type': 'number',
'minimum': 0
},
'ngauss': {
'description': 'Type of gaussian broadening.',
'type': 'integer',
'enum': [0, 1, -1, -99]
},
'degauss': {
'description': 'gaussian broadening, Ry (not eV!)',
'type': 'number',
'minimum': 0
},
}
}
def validate_inputs(value, _):
"""Validate the top level namespace.
- Check that either the `scf` or `nscf.pw.parent_folder` inputs is provided.
- Check that the `Emin`, `Emax` and `DeltaE` inputs are the same for the `dos` and `projwfc` namespaces.
- Check that `Emin` and `Emax` are provided in case `align_to_fermi` is set to `True`.
"""
# Check that either the `scf` input or `nscf.pw.parent_folder` is provided.
import warnings
if 'scf' in value and 'parent_folder' in value['nscf']['pw']:
warnings.warn(
'Both the `scf` and `nscf.pw.parent_folder` inputs were provided. The SCF calculation will '
'be run with the inputs provided in `scf` and the `nscf.pw.parent_folder` will be ignored.'
)
elif not 'scf' in value and not 'parent_folder' in value['nscf']['pw']:
return 'Specifying either the `scf` or `nscf.pw.parent_folder` input is required.'
for par in ['Emin', 'Emax', 'DeltaE']:
if value['dos']['parameters']['DOS'].get(par, None) != value['projwfc']['parameters']['PROJWFC'].get(par, None):
return f'The `{par}`` parameter has to be equal for the `dos` and `projwfc` inputs.'
if value.get('align_to_fermi', False):
for par in ['Emin', 'Emax']:
if value['dos']['parameters']['DOS'].get(par, None) is None:
return f'The `{par}`` parameter must be set in case `align_to_fermi` is set to `True`.'
def validate_scf(value, _):
"""Validate the scf parameters."""
parameters = value['pw']['parameters'].get_dict()
if parameters.get('CONTROL', {}).get('calculation', 'scf') != 'scf':
return '`CONTOL.calculation` in `scf.pw.parameters` is not set to `scf`.'
def validate_nscf(value, _):
"""Validate the nscf parameters."""
parameters = value['pw']['parameters'].get_dict()
if parameters.get('CONTROL', {}).get('calculation', 'scf') != 'nscf':
return '`CONTOL.calculation` in `nscf.pw.parameters` is not set to `nscf`.'
if parameters.get('SYSTEM', {}).get('occupations', None) != 'tetrahedra':
return '`SYSTEM.occupations` in `nscf.pw.parameters` is not set to `tetrahedra`.'
def validate_dos(value, _):
"""Validate DOS parameters.
- shared: Emin | Emax | DeltaE
- dos.x only: ngauss | degauss | bz_sum
- projwfc.x only: ngauss | degauss | pawproj | n_proj_boxes | irmin(3,n_proj_boxes) | irmax(3,n_proj_boxes)
"""
jsonschema.validate(value['parameters'].get_dict()['DOS'], get_parameter_schema())
def validate_projwfc(value, _):
"""Validate DOS parameters.
- shared: Emin | Emax | DeltaE
- dos.x only: ngauss | degauss | bz_sum
- projwfc.x only: ngauss | degauss | pawproj | n_proj_boxes | irmin(3,n_proj_boxes) | irmax(3,n_proj_boxes)
"""
jsonschema.validate(value['parameters'].get_dict()['PROJWFC'], get_parameter_schema())
def clean_calcjob_remote(node):
"""Clean the remote directory of a ``CalcJobNode``."""
cleaned = False
try:
node.outputs.remote_folder._clean() # pylint: disable=protected-access
cleaned = True
except (IOError, OSError, KeyError):
pass
return cleaned
def clean_workchain_calcs(workchain):
"""Clean all remote directories of a workchain's descendant calculations."""
cleaned_calcs = []
for called_descendant in workchain.called_descendants:
if isinstance(called_descendant, orm.CalcJobNode):
if clean_calcjob_remote(called_descendant):
cleaned_calcs.append(called_descendant.pk)
return cleaned_calcs
PwBaseWorkChain = plugins.WorkflowFactory('quantumespresso.pw.base')
DosCalculation = plugins.CalculationFactory('quantumespresso.dos')
ProjwfcCalculation = plugins.CalculationFactory('quantumespresso.projwfc')
class PdosWorkChain(ProtocolMixin, WorkChain):
"""A WorkChain to compute Total & Partial Density of States of a structure, using Quantum Espresso."""
@classmethod
def define(cls, spec):
# yapf: disable
"""Define the process specification."""
super().define(spec)
spec.input('structure', valid_type=orm.StructureData, help='The input structure.')
spec.input(
'serial_clean',
valid_type=orm.Bool,
serializer=to_aiida_type,
required=False,
help=('If ``True``, calculations will be run in serial, '
'and work directories will be cleaned before the next step.')
)
spec.input(
'clean_workdir',
valid_type=orm.Bool,
serializer=to_aiida_type,
default=lambda: orm.Bool(False),
help='If ``True``, work directories of all called calculation will be cleaned at the end of execution.'
)
spec.input(
'dry_run',
valid_type=orm.Bool,
serializer=to_aiida_type,
required=False,
help='Terminate workchain steps before submitting calculations (test purposes only).'
)
spec.input(
'align_to_fermi',
valid_type=orm.Bool,
serializer=to_aiida_type,
default=lambda: orm.Bool(False),
help=(
'If true, Emin=>Emin-Efermi & Emax=>Emax-Efermi, where Efermi is taken from the `nscf` calculation. '
'Note that it only makes sense to align `Emax` and `Emin` to the fermi level in case they are actually '
'provided by in the `dos` and `projwfc` inputs, since otherwise the '
)
)
spec.expose_inputs(
PwBaseWorkChain,
namespace='scf',
exclude=('clean_workdir', 'pw.structure', 'pw.parent_folder'),
namespace_options={
'help': 'Inputs for the `PwBaseWorkChain` of the `scf` calculation.',
'validator': validate_scf,
'required': False,
'populate_defaults': False,
}
)
spec.expose_inputs(
PwBaseWorkChain,
namespace='nscf',
exclude=('clean_workdir', 'pw.structure'),
namespace_options={
'help': 'Inputs for the `PwBaseWorkChain` of the `nscf` calculation.',
'validator': validate_nscf
}
)
spec.inputs['nscf']['pw'].validator = PwCalculation.validate_inputs_base
spec.expose_inputs(
DosCalculation,
namespace='dos',
exclude=('parent_folder',),
namespace_options={
'help': ('Input parameters for the `dos.x` calculation. Note that the `Emin`, `Emax` and `DeltaE` '
'values have to match with those in the `projwfc` inputs.'),
'validator': validate_dos
}
)
spec.expose_inputs(
ProjwfcCalculation,
namespace='projwfc',
exclude=('parent_folder',),
namespace_options={
'help': ('Input parameters for the `projwfc.x` calculation. Note that the `Emin`, `Emax` and `DeltaE` '
'values have to match with those in the `dos` inputs.'),
'validator': validate_projwfc
}
)
spec.inputs.validator = validate_inputs
spec.outline(
cls.setup,
if_(cls.should_run_scf)(
cls.run_scf,
cls.inspect_scf,
),
cls.run_nscf,
cls.inspect_nscf,
if_(cls.serial_clean)(
cls.run_dos_serial,
cls.inspect_dos_serial,
cls.run_projwfc_serial,
cls.inspect_projwfc_serial
).else_(
cls.run_pdos_parallel,
cls.inspect_pdos_parallel,
),
cls.results,
)
spec.exit_code(202, 'ERROR_INVALID_INPUT_KPOINTS',
message='Neither the `kpoints` nor the `kpoints_distance` input was specified for base or nscf namespaces.')
spec.exit_code(401, 'ERROR_SUB_PROCESS_FAILED_SCF',
message='the SCF sub process failed')
spec.exit_code(402, 'ERROR_SUB_PROCESS_FAILED_NSCF',
message='the NSCF sub process failed')
spec.exit_code(403, 'ERROR_SUB_PROCESS_FAILED_DOS',
message='the DOS sub process failed')
spec.exit_code(404, 'ERROR_SUB_PROCESS_FAILED_PROJWFC',
message='the PROJWFC sub process failed')
spec.exit_code(404, 'ERROR_SUB_PROCESS_FAILED_BOTH',
message='both the DOS and PROJWFC sub process failed')
spec.expose_outputs(PwBaseWorkChain, namespace='nscf')
spec.expose_outputs(DosCalculation, namespace='dos')
spec.expose_outputs(ProjwfcCalculation, namespace='projwfc')
@classmethod
def get_protocol_filepath(cls):
"""Return ``pathlib.Path`` to the ``.yaml`` file that defines the protocols."""
from importlib_resources import files
from . import protocols
return files(protocols) / 'pdos.yaml'
@classmethod
def get_builder_from_protocol(
cls, pw_code, dos_code, projwfc_code, structure, protocol=None, overrides=None, **kwargs
):
"""Return a builder prepopulated with inputs selected according to the chosen protocol.
:param pw_code: the ``Code`` instance configured for the ``quantumespresso.pw`` plugin.
:param dos_code: the ``Code`` instance configured for the ``quantumespresso.dos`` plugin.
:param projwfc_code: the ``Code`` instance configured for the ``quantumespresso.projwfc`` plugin.
:param structure: the ``StructureData`` instance to use.
:param protocol: protocol to use, if not specified, the default will be used.
:param overrides: optional dictionary of inputs to override the defaults of the protocol.
:param kwargs: additional keyword arguments that will be passed to the ``get_builder_from_protocol`` of all the
sub processes that are called by this workchain.
:return: a process builder instance with all inputs defined ready for launch.
"""
inputs = cls.get_protocol_inputs(protocol, overrides)
args = (pw_code, structure, protocol)
scf = PwBaseWorkChain.get_builder_from_protocol(*args, overrides=inputs.get('scf', None), **kwargs)
scf['pw'].pop('structure', None)
scf.pop('clean_workdir', None)
nscf = PwBaseWorkChain.get_builder_from_protocol(*args, overrides=inputs.get('nscf', None), **kwargs)
nscf['pw'].pop('structure', None)
nscf['pw']['parameters']['SYSTEM'].pop('smearing', None)
nscf['pw']['parameters']['SYSTEM'].pop('degauss', None)
nscf.pop('clean_workdir', None)
builder = cls.get_builder()
builder.structure = structure
builder.clean_workdir = orm.Bool(inputs['clean_workdir'])
builder.scf = scf
builder.nscf = nscf
builder.dos.code = dos_code # pylint: disable=no-member
builder.dos.parameters = orm.Dict(dict=inputs.get('dos', {}).get('parameters')) # pylint: disable=no-member
builder.dos.metadata = inputs.get('dos', {}).get('metadata') # pylint: disable=no-member
builder.projwfc.code = projwfc_code # pylint: disable=no-member
builder.projwfc.parameters = orm.Dict(dict=inputs.get('projwfc', {}).get('parameters')) # pylint: disable=no-member
builder.projwfc.metadata = inputs.get('projwfc', {}).get('metadata') # pylint: disable=no-member
return builder
def setup(self):
"""Initialize context variables that are used during the logical flow of the workchain."""
self.ctx.serial_clean = 'serial_clean' in self.inputs and self.inputs.serial_clean.value
self.ctx.dry_run = 'dry_run' in self.inputs and self.inputs.dry_run.value
def serial_clean(self):
"""Return whether dos and projwfc calculations should be run in serial.
The calculation remote folders will be cleaned before the next process step.
"""
return self.ctx.serial_clean
def should_run_scf(self):
"""Return whether the work chain should run an SCF calculation."""
return 'scf' in self.inputs
def run_scf(self):
"""Run an SCF calculation, to generate the wavefunction."""
inputs = AttributeDict(self.exposed_inputs(PwBaseWorkChain, 'scf'))
inputs.pw.structure = self.inputs.structure
inputs.metadata.call_link_label = 'scf'
inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
if self.ctx.dry_run:
return inputs
future = self.submit(PwBaseWorkChain, **inputs)
self.report(f'launching SCF PwBaseWorkChain<{future.pk}>')
return ToContext(workchain_scf=future)
def inspect_scf(self):
"""Verify that the SCF calculation finished successfully."""
workchain = self.ctx.workchain_scf
if not workchain.is_finished_ok:
self.report(f'SCF PwBaseWorkChain failed with exit status {workchain.exit_status}')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_SCF
self.ctx.scf_parent_folder = workchain.outputs.remote_folder
def run_nscf(self):
"""Run an NSCF calculation, to generate eigenvalues with a denser k-point mesh.
This calculation modifies the base scf calculation inputs by:
- Using the parent folder from the scf calculation.
- Replacing the kpoints, if an alternative is specified for nscf.
- Changing ``SYSTEM.occupations`` to 'tetrahedra'.
- Changing ``SYSTEM.nosym`` to True, to avoid generation of additional k-points in low symmetry cases.
- Replace the ``pw.metadata.options``, if an alternative is specified for nscf.
"""
inputs = AttributeDict(self.exposed_inputs(PwBaseWorkChain, 'nscf'))
if 'scf' in self.inputs:
inputs.pw.parent_folder = self.ctx.scf_parent_folder
inputs.pw.structure = self.inputs.structure
inputs.metadata.call_link_label = 'nscf'
inputs = prepare_process_inputs(PwBaseWorkChain, inputs)
if self.ctx.dry_run:
return inputs
future = self.submit(PwBaseWorkChain, **inputs)
self.report(f'launching NSCF PwBaseWorkChain<{future.pk}>')
return ToContext(workchain_nscf=future)
def inspect_nscf(self):
"""Verify that the NSCF calculation finished successfully."""
workchain = self.ctx.workchain_nscf
if not workchain.is_finished_ok:
self.report(f'NSCF PwBaseWorkChain failed with exit status {workchain.exit_status}')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_NSCF
if self.ctx.serial_clean:
# we no longer require the scf remote folder, so can clean it
cleaned_calcs = clean_workchain_calcs(self.ctx.workchain_scf)
if cleaned_calcs:
self.report(f"cleaned remote folders of SCF calculations: {' '.join(map(str, cleaned_calcs))}")
self.ctx.nscf_emin = workchain.outputs.output_band.get_array('bands').min()
self.ctx.nscf_emax = workchain.outputs.output_band.get_array('bands').max()
self.ctx.nscf_parent_folder = workchain.outputs.remote_folder
self.ctx.nscf_fermi = workchain.outputs.output_parameters.dict.fermi_energy
def _generate_dos_inputs(self):
"""Run DOS calculation, to generate total Densities of State."""
dos_inputs = AttributeDict(self.exposed_inputs(DosCalculation, 'dos'))
dos_inputs.parent_folder = self.ctx.nscf_parent_folder
dos_parameters = self.inputs.dos.parameters.get_dict()
if dos_parameters.pop('align_to_fermi', False):
dos_parameters['DOS']['Emin'] = dos_parameters['Emin'] + self.ctx.nscf_fermi
dos_parameters['DOS']['Emax'] = dos_parameters['Emax'] + self.ctx.nscf_fermi
dos_inputs.parameters = orm.Dict(dict=dos_parameters)
dos_inputs['metadata']['call_link_label'] = 'dos'
return dos_inputs
def _generate_projwfc_inputs(self):
"""Run Projwfc calculation, to generate partial Densities of State."""
projwfc_inputs = AttributeDict(self.exposed_inputs(ProjwfcCalculation, 'projwfc'))
projwfc_inputs.parent_folder = self.ctx.nscf_parent_folder
projwfc_parameters = self.inputs.projwfc.parameters.get_dict()
if projwfc_parameters.pop('align_to_fermi', False):
projwfc_parameters['PROJWFC']['Emin'] = projwfc_parameters['Emin'] + self.ctx.nscf_fermi
projwfc_parameters['PROJWFC']['Emax'] = projwfc_parameters['Emax'] + self.ctx.nscf_fermi
projwfc_inputs.parameters = orm.Dict(dict=projwfc_parameters)
projwfc_inputs['metadata']['call_link_label'] = 'projwfc'
return projwfc_inputs
def run_dos_serial(self):
"""Run DOS calculation."""
dos_inputs = self._generate_dos_inputs()
if self.ctx.dry_run:
return dos_inputs
future_dos = self.submit(DosCalculation, **dos_inputs)
self.report(f'launching DosCalculation<{future_dos.pk}>')
return ToContext(calc_dos=future_dos)
def inspect_dos_serial(self):
"""Verify that the DOS calculation finished successfully, then clean its remote directory."""
calculation = self.ctx.calc_dos
if not calculation.is_finished_ok:
self.report(f'DosCalculation failed with exit status {calculation.exit_status}')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_DOS
if self.ctx.serial_clean:
# we no longer require the dos remote folder, so can clean it
if clean_calcjob_remote(calculation):
self.report(f'cleaned remote folder of DosCalculation<{calculation.pk}>')
def run_projwfc_serial(self):
"""Run Projwfc calculation."""
projwfc_inputs = self._generate_projwfc_inputs()
if self.ctx.dry_run:
return projwfc_inputs
future_projwfc = self.submit(ProjwfcCalculation, **projwfc_inputs)
self.report(f'launching ProjwfcCalculation<{future_projwfc.pk}>')
return ToContext(calc_projwfc=future_projwfc)
def inspect_projwfc_serial(self):
"""Verify that the Projwfc calculation finished successfully, then clean its remote directory."""
calculation = self.ctx.calc_projwfc
if not calculation.is_finished_ok:
self.report(f'ProjwfcCalculation failed with exit status {calculation.exit_status}')
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_PROJWFC
if self.ctx.serial_clean:
# we no longer require the projwfc remote folder, so can clean it
if clean_calcjob_remote(calculation):
self.report(f'cleaned remote folder of ProjwfcCalculation<{calculation.pk}>')
def run_pdos_parallel(self):
"""Run DOS and Projwfc calculations in parallel."""
dos_inputs = self._generate_dos_inputs()
projwfc_inputs = self._generate_projwfc_inputs()
if self.ctx.dry_run:
return dos_inputs, projwfc_inputs
future_dos = self.submit(DosCalculation, **dos_inputs)
self.report(f'launching DosCalculation<{future_dos.pk}>')
self.to_context(**{'calc_dos': future_dos})
future_projwfc = self.submit(ProjwfcCalculation, **projwfc_inputs)
self.report(f'launching ProjwfcCalculation<{future_projwfc.pk}>')
self.to_context(**{'calc_projwfc': future_projwfc})
def inspect_pdos_parallel(self):
"""Verify that the DOS and Projwfc calculations finished successfully."""
error_codes = []
calculation = self.ctx.calc_dos
if not calculation.is_finished_ok:
self.report(f'DosCalculation failed with exit status {calculation.exit_status}')
error_codes.append(self.exit_codes.ERROR_SUB_PROCESS_FAILED_DOS)
calculation = self.ctx.calc_projwfc
if not calculation.is_finished_ok:
self.report(f'ProjwfcCalculation failed with exit status {calculation.exit_status}')
error_codes.append(self.exit_codes.ERROR_SUB_PROCESS_FAILED_PROJWFC)
if len(error_codes) > 1:
return self.exit_codes.ERROR_SUB_PROCESS_FAILED_BOTH
if len(error_codes) == 1:
return error_codes[0]
def results(self):
"""Attach the desired output nodes directly as outputs of the workchain."""
self.report('workchain successfully completed')
self.out_many(self.exposed_outputs(self.ctx.workchain_nscf, PwBaseWorkChain, namespace='nscf'))
self.out_many(self.exposed_outputs(self.ctx.calc_dos, DosCalculation, namespace='dos'))
self.out_many(self.exposed_outputs(self.ctx.calc_projwfc, ProjwfcCalculation, namespace='projwfc'))
def on_terminated(self):
"""Clean the working directories of all child calculations if `clean_workdir=True` in the inputs."""
super().on_terminated()
if self.inputs.clean_workdir.value is False:
self.report('remote folders will not be cleaned')
return
cleaned_calcs = clean_workchain_calcs(self.node)
if cleaned_calcs:
self.report(f"cleaned remote folders of calculations: {' '.join(map(str, cleaned_calcs))}")
|
#!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
ground.py
File containing functions used on ground
Distributed under the 3-Clause BSD License (below)
Copyright 2019 Rensselaer Polytechnic Institute
(Dr. John Christian, Devin Renshaw, Grace Quintero)
Redistribution and use in source and binary forms,
with or without modification, are permitted provided
that the following conditions are met:
1. Redistributions of source code must retain the above
copyright notice, this list of conditions and the
following disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
3. Neither the name of the copyright holder nor the names
of its contributors may be used to endorse or promote
products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND
CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES,
INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
################################
#LOAD LIBRARIES
################################
from array_transformations import check_axis_decorator
################################
#SUPPORT FUNCTIONS
################################
def kvector(input_cat):
import numpy as np
import rpi_core
m, q, sorted_cat = rpi_core.kvec_values(input_cat)
nrow = len(sorted_cat)
k = np.ones((nrow, 1), dtype=int)
k[0] = 0
k[-1] = nrow
for x in np.arange(1, nrow-1, 1, dtype=int):
# Create k-vector catalog by finding the number of
# items in the original catalog that are below the
# value of the line
l = k[x-1][0] # grab the previous (smaller) element in the array
z = m*(x+1) + q # eqn 1
for y in np.arange(k[x-1], nrow-1, 1, dtype=int):
# If the calculated z matches/exceeds that of the current catalog
# entry, increment l by 1
if z >= sorted_cat[y, 1]: # (eqn 2)
l += 1
else:
break
k[x] = l
return k, m, q, sorted_cat
@check_axis_decorator(2)
def equatorial2vector(ra_de, axis=None):
# Axis indicates if array is 2xn array (axis=0), or nx2 array (axis=1)
# Axis = 0: each column represents individual equatorial elements (RA or DE)
# Axis = 1: each row represents individual equatorial elements (RA or DE)
import numpy as np
ra_de_rad = np.radians(ra_de)
if ra_de_rad.ndim == 1:
ra = np.array(ra_de_rad[0])
de = np.array(ra_de_rad[1])
else:
if axis == 0:
# axis = 0 2x3
# | RA RA RA |
# | DE DE DE |
ra = ra_de_rad[0, :]
de = ra_de_rad[1, :]
elif axis == 1:
# axis = 1 4x2
# | RA DE |
# | RA DE |
# | RA DE |
# | RA DE |
ra = ra_de_rad[:, 0]
de = ra_de_rad[:, 1]
else:
raise
los_vector = np.array([np.cos(de)*np.cos(ra),
np.cos(de)*np.sin(ra),
np.sin(de)])
return los_vector, ra, de
@check_axis_decorator(3)
def vector2equatorial(v, axis=None):
# Axis indicates if array is 3xn array (axis=0), or nx3 array (axis=1)
# Axis = 0: each star's data is a column
# Axis = 1: each star's data is a row
import numpy as np
if axis == 0:
de = np.arcsin(v[2, :])
ra = np.arctan2(v[1, :], v[0, :])
ra_de = np.vstack((ra, de))
elif axis == 1:
de = np.arcsin(v[:, 2])
ra = np.arctan2(v[:, 1], v[:, 0])
ra_de = np.hstack((ra, de))
return np.degrees(ra_de)
@check_axis_decorator(2)
def lpq_orthonormal_basis(ra_de_deg, axis=None):
# Axis indicates if array is nx2 array (axis=0), or 2xn array (axis=1)
# Axis = 0: each star's data is a column
# Axis = 1: each star's data is a row
import numpy as np
los, ra_rad, de_rad = equatorial2vector(ra_de_deg, axis=axis)
p_hat = np.array([
-np.sin(ra_rad),
np.cos(ra_rad),
np.zeros(len(ra_rad))])
q_hat = np.array([
-np.sin(de_rad) * np.cos(de_rad),
-np.sin(de_rad) * np.sin(de_rad),
np.cos(de_rad)])
return los, p_hat, q_hat
@check_axis_decorator(2)
def proper_motion_correction(ra_de_deg, pm_rade_mas, plx_mas, rB, t, t_ep, axis=None):
import array_transformations as xforms
import math
import numpy as np
import numpy.matlib as matlib
if ra_de_deg.shape != pm_rade_mas.shape:
raise ValueError("ERROR ["+str(__name__)+"]: Dimensions for RA/DEC and proper motion do not agree")
deg2rad = math.pi/180;
arcsec2deg = 1/3600;
mas2arcsec = 1/1000;
mas2rad = mas2arcsec*arcsec2deg*deg2rad;
AU = 1.496e8; #in km
# return 3xn Line-of-Sight vector array and orthogonal
# unit vectors p and q so that {p,q,l} form a
# right-handed orthonormal basis
los, p_hat, q_hat = lpq_orthonormal_basis(ra_de_deg, axis=axis)
# Convert proper motion of right ascension and declination to rad/yr 2xn array
pm_rade_rad = mas2rad*pm_rade_mas
mu_ra = pm_rade_rad[0, :]
mu_de = pm_rade_rad[1, :]
pm = (t - t_ep)*(mu_ra*p_hat + mu_de*q_hat)
# Convert parallax to rads 1xn array
plx_rad = mas2rad*plx_mas
# Define au (Astronomical Units) in kilometers
au = 1.496e8 # km
# Find location of observer in units of AU
if rB is None:
rB = np.array([[149597870.693],[0],[0]])
rObs_AU = rB/au;
# Define rB as BCRF position (in km) of celestial object that the spacecraft orbits
# if no rB provided, assume that s/c is orbiting Earth: 149597870.693 km
# Incorporate proper motion into already existing u vector:
# ui = l_i+(t-t_ep)*(mua_i*p+mud_i*q)-(w_i*rB)/AU
r_au_mat = matlib.repmat(rObs_AU, 1, len(plx_rad))
plx = -plx_rad*r_au_mat;
u = los + pm + plx
return xforms.normalize_vector_array(u), los
def get_Hipparcos_data(starcat_file, magnitude_name, excess_rows,
brightness_thresh=None, index_col=2):
import pandas as pd
# read catalog file using Pandas
starcat = pd.read_csv(starcat_file,skiprows=excess_rows,sep='\t',
comment='#',index_col=index_col)
# return all stars brighter than given brightness threshold
if brightness_thresh is None:
return starcat
else:
return starcat[starcat[magnitude_name]<brightness_thresh]
def read_star_catalog(starcat_file, brightness_thresh, t=None, cat_ep=None,
rB=None, excess_rows=None, index_col=2):
from astropy.time import Time
import numpy as np
import ground
if t is None:
t = Time.now().byear
else:
t = Time(t).byear
# t = 2024.25 TODO: why is this here? What was this used for?
if cat_ep is None:
cat_ep = 1991.250
cat_ep = Time(cat_ep, format='byear').byear
# Column header names in Hipparcos star catalog
HIP_ID = 'HIP'
MAG = 'Hpmag'
# RAJ2000 = '_RAJ2000'
# DEJ2000 = '_DEJ2000'
RA = 'RArad'
DE = 'DErad'
PLX = 'Plx'
PM_RA = 'pmRA'
PM_DE = 'pmDE'
# B_V = 'B-V'
starcat = get_Hipparcos_data(starcat_file, MAG, excess_rows,
brightness_thresh=brightness_thresh,
index_col=index_col)
ra_de = np.array([starcat[RA].values, starcat[DE].values])
pm_rade = np.array([starcat[PM_RA].values, starcat[PM_DE].values])
plx = np.array(starcat[PLX].values)
# Correct catalog entries for proper motion: 'u' is array of unit vectors to each star
u, _ = ground.proper_motion_correction(ra_de, pm_rade, plx, rB, t, cat_ep)
return u, starcat
def create_star_catalog(starcat_file, brightness_thresh, cat_ep=None, t=None, rB=None,
excess_rows=None, index_col=2, save_vals=False, fov=None, save_dir=None):
import os
import rpi_core
import numpy as np
import itertools as it
# Correct catalog entries for proper motion
u, _ = read_star_catalog(
starcat_file, brightness_thresh, excess_rows=excess_rows,
cat_ep=cat_ep, t=t, rB=rB, index_col=index_col)
# Create star pairs using nchoosek
# star_idx = np.arange(0, len(u[0]))
star_idx = np.arange(0, u.shape[1])
star_pairs = np.array( list(it.combinations(star_idx, 2)) )
# Form star pairs unit vectors into an nx6 array
u_starpairs = np.vstack((u[:, star_pairs[:, 0]],
u[:, star_pairs[:, 1]]))
# Calculate interstar angles
istar_angle = rpi_core.interstar_angle(u_starpairs)
# Remove star pairs that fall outside field of view angle
if fov is not None:
sp_fov = np.where(istar_angle < 2.0*fov)[0]
istar_angle = istar_angle[sp_fov]
star_pairs = star_pairs[sp_fov, :]
del sp_fov, fov
# Create star pair catalog with interstar angle
k, m, q, isa_cat = kvector(istar_angle)
isa_cat_idx = np.hstack((star_pairs, isa_cat))
if save_vals:
if save_dir == None:
print("[CREATE_STAR_CATALOG]: no directory provided, exiting without saving")
return k, m, q, u, isa_cat_idx
# Save values or return them if in a function
np.save(os.path.join(save_dir,'k'), k)
np.save(os.path.join(save_dir,'m'), m)
np.save(os.path.join(save_dir,'q'), q)
np.save(os.path.join(save_dir,'u'), u)
# np.save(os.path.join(save_dir,'isa_cat'), isa_cat)
np.save(os.path.join(save_dir,'indexed_star_pairs'), isa_cat_idx)
return k, m, q, u, isa_cat_idx
def create_darkframe(img_list, numImages):
import cv2 as cv
import numpy as np
# TODO: use GLOB to read images better
# TODO: explicitly use filenames instead of grabbing the first n images?
# TODO: use context manager to open images
n_images = len(img_list)
if n_images < numImages: print("Length of image list {0} is less than requested "
"number of images {1}".format(n_images, numImages))
numImages = min(numImages, n_images)
if len(img_list) > numImages: img_list = img_list[0:numImages]
if numImages == 0:
print('No images found for dark frame creation.')
return None
img_sample = cv.imread(img_list[0], cv.IMREAD_GRAYSCALE)
img_size = img_sample.shape[0:2]
img_dtype = img_sample.dtype
images = np.zeros((img_size[0], img_size[1], numImages))
for count, filename in enumerate(img_list):
img = cv.imread(filename, cv.IMREAD_GRAYSCALE)
if img is None:
raise('Image {0} (count={1}) is None'.format(filename, count))
images[:, :, count] = img
# count += 1
if images is None:
print('No images found for darkframe generator. Returning None')
return None
images_median = np.median(images, axis=2)
return images_median.astype(img_dtype)
def create_distortion_map(camera_json, distortion_map_path, save_dist_map=True):
import numpy as np
import cam_matrix
import array_transformations as xforms
# this function only allows Brown distortion at the moment
c, img_size, dist_coefs = cam_matrix.read_cam_json(camera_json)
c_inv = cam_matrix.cam_matrix_inv(c)
# img_size must be the reverse of that listed in MATLAB for sub2ind and ind2sub to play nice
# img_size = array([img_size[1], img_size[0]])
#dist_coefs = array([k_1, k_2, p_1, p_2, k_3])
k1 = dist_coefs[0]
k2 = dist_coefs[1]
k3 = dist_coefs[4]
p1 = dist_coefs[2]
p2 = dist_coefs[3]
# cam_resolution, [ncols, nrows]
nrows = img_size[1]
ncols = img_size[0]
# img_size = flip(img_size)
# Initalize holding arrays for interpolation variables
idxUndistorted = np.zeros([nrows*ncols, 1], dtype=int)
idxDistorted = np.zeros([nrows*ncols, 4], dtype=int)
interpWeights = np.zeros([nrows*ncols, 4])
# Loop through every pixel in undistorted image and obtain bilinear interpolation weights
# Bilinear interpolation follows Eqs. 3.6.1-3.6.5 from [Press et al, 2007]
#Set tolerance for repeated pixels
#subpix_tol = 0.0001;
#Initalize pixel counter
# DO NOT use 'enumerate' in this case since count requires that certain
# parameters are met to increment
count = 0;
# Using 0-indexing
indexing = 0
for row in np.arange(indexing, nrows+indexing):
for col in np.arange(indexing, ncols+indexing):
# Get current pixel coordinates in undistorted image.
# Written in homogeneous coordinates.
uv = np.array( [[col], [row]] ) + 1-indexing
uv_h = xforms.camera2homogeneous(uv)
# uv1 = array([[col], [row], [1.0]])
# Convert undistorted pixel coord to xy coord
# The inverse of Eq. 7 in [Christian et al, 2016].
xy = xforms.vector_array_transform(c_inv, uv_h)
# Isolate x and y coordinates in undistorted image
x = xy[0, 0];
y = xy[1, 0];
# Compute r^2 in undistorted coordinates
r2 = x**2 + y**2;
# Additional even power of r needed for Brown model
r4 = r2*r2;
r6 = r4*r2;
# See pp 375-376 of [Bradski & Kaehler, 2008].
# Also see Eq. 6 of [Christian et al, 2016].
xy_dist_pre = (1 + k1*r2 + k2*r4 + k3*r6) * np.array([x, y])
xy_distorted_post = np.array([2*p1*x*y + p2*(r2+2*x**2), p1*(r2+2*y**2) + 2*p2*x*y])
xy_distorted = xforms.camera2homogeneous(xy_dist_pre + xy_distorted_post)
# Get distorted uv coordinate
# See Eq. 7 of [Christian et al, 2016].
uvDistorted = xforms.vector_array_transform(c, xy_distorted)-1
# Find indices for the four pixels that surround the query pixel the raw (distorted) image
# Remember that [1,1] is upper lefthand corner of image.
# Column number is u-direction. Row number is v-direction.
colLeft = np.floor(uvDistorted[0]).astype('int') # From [Press et al, 2007]: x_{1i}
colRight = np.ceil( uvDistorted[0]).astype('int') # From [Press et al, 2007]: x_{1(i+1)}
rowUp = np.floor(uvDistorted[1]).astype('int') # From [Press et al, 2007]: x_{2j}
rowDown = np.ceil( uvDistorted[1]).astype('int') # From [Press et al, 2007]: x_{2(j+1)}
# Check to make sure all four of the query pixels in the new image
# lie inside of the original (distorted) image.
if rowUp >= 0 and colLeft >= 0 and rowDown < nrows and colRight < ncols:
# Store index of current pixel
order = 'F'
output_fmt = 'P'
idxUndistorted[count, 0] = xforms.sub2ind(
img_size, row, col, order=order, output_fmt=output_fmt)
# Get the indices of corresponding surrounding pixels
idxUpperLeft = xforms.sub2ind(
img_size,rowUp,colLeft,order=order, indexing=indexing, output_fmt=output_fmt)
idxUpperRight = xforms.sub2ind(
img_size,rowUp,colRight, order=order, output_fmt=output_fmt)
idxLowerRight = xforms.sub2ind(
img_size,rowDown,colRight, order=order, output_fmt=output_fmt)
idxLowerLeft = xforms.sub2ind(
img_size,rowDown,colLeft, order=order, output_fmt=output_fmt)
# Store indicies from distorted image in clockwise order, starting from upper left
idxDistorted[count, :] = [idxUpperLeft,
idxUpperRight,
idxLowerRight,
idxLowerLeft]
# Compute distance from distorded point to surrounding pixel centers
# See Eq. 3.6.4 from [Press et al, 2007]. Note there is no
# denominator since the distance between adjacent pixels is
# always unity (we devide by 1). Baseline equation is augmented
# with check to protect against case where distorted point lies
# exactly on an integer pixel value. This leads to floor() and
# ceil() producing the same value and must be handled differently.
if rowDown==rowUp: # abs(colRight-colLeft)<subpix_tol
s = 0.5
else:
# Note: What we call "s" here is called "u" in [Press et al,
# 2007]. We choose to use "s" to avoid possibility of
# confusion with pixel [u,v] coordinates.
s = uvDistorted[1] - rowUp
if colRight==colLeft: # abs(colRight-colLeft)<subpix_tol
t = 0.5
else:
t = uvDistorted[0] - colLeft
# Get weights based on distances. What we call the interpolation
# "weights" are coefficients in Eq. 3.6.5 from [Press et al, 2007].
wtUpperLeft = (1-t)*(1-s)
wtUpperRight = t*(1-s)
wtLowerRight = t*s
wtLowerLeft = (1-t)*s
# Store indicies from distorted image in clockwise order, starting from upper left
# This ordering scheme is consistent with index storage for idxDistorted
interpWeights[count,:] = [ wtUpperLeft, wtUpperRight, wtLowerRight, wtLowerLeft ]
# Increment counter
count += 1
# Keep only the usable elements
idxUndistorted = idxUndistorted[0:count, :]
idxDistorted = idxDistorted[0:count, :]
interpWeights = interpWeights[0:count, :]
np.savez(distortion_map_path, c=c, cam_resolution=img_size, dist_coefs=dist_coefs, idxUndistorted=idxUndistorted,idxDistorted=idxDistorted,interpWeights=interpWeights)
return c, img_size, dist_coefs, idxUndistorted, idxDistorted, interpWeights
def find_file(name, path):
# finds file {name} in the {path} using the os.walk command
import os
# find file name in path
for root, dirs, files in os.walk(path):
if name in files:
return os.path.join(root, name)
else:
raise FileNotFoundError("ERROR ["+str(__name__)+"]: File {0} not found.".format(name))
def find_files_pattern(pattern, path, exclude=None):
import os, fnmatch
result = []
for root, dirs, files in os.walk(path):
for name in files:
if fnmatch.fnmatch(name, pattern):
result.append(os.path.join(root, name))
if exclude is not None:
result = [i for i in result if exclude not in i]
return result
|
#!/usr/bin/env python3.8
# Copyright 2020 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import textwrap
import unittest
from unittest import mock
from typing import AbstractSet, Iterable
import action_tracer
class ToolCommandTests(unittest.TestCase):
def test_empty_command(self):
with self.assertRaises(IndexError):
action_tracer.ToolCommand().tool
def test_command_no_args(self):
command = action_tracer.ToolCommand(tokens=['echo'])
self.assertEqual(command.env_tokens, [])
self.assertEqual(command.tool, 'echo')
self.assertEqual(command.args, [])
def test_command_with_env(self):
command = action_tracer.ToolCommand(tokens=['TMPDIR=/my/tmp', 'echo'])
self.assertEqual(command.env_tokens, ['TMPDIR=/my/tmp'])
self.assertEqual(command.tool, 'echo')
self.assertEqual(command.args, [])
def test_command_with_args(self):
command = action_tracer.ToolCommand(
tokens=['ln', '-f', '-s', 'foo', 'bar'])
self.assertEqual(command.env_tokens, [])
self.assertEqual(command.tool, 'ln')
self.assertEqual(command.args, ['-f', '-s', 'foo', 'bar'])
def test_unwrap_no_change(self):
tokens = ['ln', '-f', '-s', 'foo', 'bar']
command = action_tracer.ToolCommand(tokens=tokens)
self.assertEqual(command.unwrap().tokens, tokens)
def test_unwrap_one_level(self):
tokens = ['wrapper', '--opt', 'foo', '--', 'bar.sh', 'arg']
command = action_tracer.ToolCommand(tokens=tokens)
self.assertEqual(command.unwrap().tokens, ['bar.sh', 'arg'])
def test_unwrap_one_of_many_level(self):
tokens = [
'wrapper', '--opt', 'foo', '--', 'bar.sh', 'arg', '--', 'inner.sh'
]
command = action_tracer.ToolCommand(tokens=tokens)
command2 = command.unwrap()
self.assertEqual(command2.tokens, ['bar.sh', 'arg', '--', 'inner.sh'])
command3 = command2.unwrap()
self.assertEqual(command3.tokens, ['inner.sh'])
class IsKnownWrapperTests(unittest.TestCase):
def test_action_tracer_is_not_wrapper(self):
command = action_tracer.ToolCommand(
tokens=[
'path/to/python3.x', 'path/to/not_a_wrapper.py', '--opt1',
'arg1'
])
self.assertFalse(action_tracer.is_known_wrapper(command))
def test_action_tracer_is_not_wrapper_implicit_interpreter(self):
command = action_tracer.ToolCommand(
tokens=['path/to/not_a_wrapper.py', '--opt1', 'arg1'])
self.assertFalse(action_tracer.is_known_wrapper(command))
def test_action_tracer_is_wrapper(self):
command = action_tracer.ToolCommand(
tokens=[
'path/to/python3.x', 'path/to/action_tracer.py', '--', 'foo.sh',
'arg1', 'arg2'
])
self.assertTrue(action_tracer.is_known_wrapper(command))
def test_action_tracer_is_wrapper_extra_python_flag(self):
command = action_tracer.ToolCommand(
tokens=[
'path/to/python3.x', '-S', 'path/to/action_tracer.py', '--',
'foo.sh', 'arg1', 'arg2'
])
self.assertTrue(action_tracer.is_known_wrapper(command))
def test_action_tracer_is_wrapper_implicit_interpreter(self):
command = action_tracer.ToolCommand(
tokens=['path/to/action_tracer.py', '--', 'foo.sh', 'arg1', 'arg2'])
self.assertTrue(action_tracer.is_known_wrapper(command))
class DepEdgesParseTests(unittest.TestCase):
def test_invalid_input(self):
with self.assertRaises(ValueError):
action_tracer.parse_dep_edges(
"output.txt input1.txt") # missing ":"
def test_output_only(self):
dep = action_tracer.parse_dep_edges("output.txt:")
self.assertEqual(dep.ins, set())
self.assertEqual(dep.outs, {"output.txt"})
def test_multipl_outputs_only(self):
dep = action_tracer.parse_dep_edges("output.txt output2.txt :")
self.assertEqual(dep.ins, set())
self.assertEqual(dep.outs, {"output.txt", "output2.txt"})
def test_output_with_one_input(self):
dep = action_tracer.parse_dep_edges("output.txt:input.cc")
self.assertEqual(dep.ins, {"input.cc"})
self.assertEqual(dep.outs, {"output.txt"})
def test_output_with_multiple_inputs(self):
dep = action_tracer.parse_dep_edges(
"output.txt:input.cc includes/header.h")
self.assertEqual(dep.ins, {"input.cc", "includes/header.h"})
self.assertEqual(dep.outs, {"output.txt"})
def test_output_with_multiple_inputs_unusual_spacing(self):
dep = action_tracer.parse_dep_edges(
" output.txt : input.cc includes/header.h ")
self.assertEqual(dep.ins, {"input.cc", "includes/header.h"})
self.assertEqual(dep.outs, {"output.txt"})
def test_file_name_with_escaped_space(self):
dep = action_tracer.parse_dep_edges(
"output.txt: source\\ input.cc includes/header.h")
self.assertEqual(dep.ins, {"source input.cc", "includes/header.h"})
self.assertEqual(dep.outs, {"output.txt"})
class ParseDepFileTests(unittest.TestCase):
def test_empty(self):
depfile = action_tracer.parse_depfile([])
self.assertEqual(depfile.deps, [])
self.assertEqual(depfile.all_ins, set())
self.assertEqual(depfile.all_outs, set())
def test_multiple_ins_multiple_outs(self):
depfile = action_tracer.parse_depfile(["a b: c d"])
self.assertEqual(
depfile.deps, [
action_tracer.DepEdges(ins={"c", "d"}, outs={"a", "b"}),
])
self.assertEqual(depfile.all_ins, {"c", "d"})
self.assertEqual(depfile.all_outs, {"a", "b"})
def test_two_deps(self):
depfile = action_tracer.parse_depfile([
"A: B",
"C: D E",
])
self.assertEqual(
depfile.deps, [
action_tracer.DepEdges(ins={"B"}, outs={"A"}),
action_tracer.DepEdges(ins={"D", "E"}, outs={"C"}),
])
self.assertEqual(depfile.all_ins, {"B", "D", "E"})
self.assertEqual(depfile.all_outs, {"A", "C"})
def test_continuation(self):
depfile = action_tracer.parse_depfile(
[
"a \\\n",
"b: \\\n",
"c \\\n",
"d",
])
self.assertEqual(
depfile.deps, [
action_tracer.DepEdges(ins={"c", "d"}, outs={"a", "b"}),
])
self.assertEqual(depfile.all_ins, {"c", "d"})
self.assertEqual(depfile.all_outs, {"a", "b"})
def test_carriage_continuation(self):
depfile = action_tracer.parse_depfile(
[
"a \\\r\n",
"b: c \\\r\n",
"d e",
])
self.assertEqual(
depfile.deps, [
action_tracer.DepEdges(ins={"c", "d", "e"}, outs={"a", "b"}),
])
self.assertEqual(depfile.all_ins, {"c", "d", "e"})
self.assertEqual(depfile.all_outs, {"a", "b"})
def test_space_in_filename(self):
depfile = action_tracer.parse_depfile(["a\\ b: c d"])
self.assertEqual(
depfile.deps, [
action_tracer.DepEdges(ins={"c", "d"}, outs={"a b"}),
])
self.assertEqual(depfile.all_ins, {"c", "d"})
self.assertEqual(depfile.all_outs, {"a b"})
def test_consecutive_backslashes(self):
with self.assertRaises(ValueError):
depfile = action_tracer.parse_depfile(["a\\\\ b: c"])
def test_trailing_escaped_whitespace(self):
with self.assertRaises(ValueError):
depfile = action_tracer.parse_depfile([
"a \\ \r\n",
"b: c",
])
with self.assertRaises(ValueError):
depfile = action_tracer.parse_depfile([
"a \\ \n",
"b: c",
])
with self.assertRaises(ValueError):
depfile = action_tracer.parse_depfile([
"a \\ \n",
"b: c",
])
def test_unfinished_line_continuation(self):
with self.assertRaises(ValueError):
depfile = action_tracer.parse_depfile([
"a \\\n",
"b: c \\\n",
])
def test_blank_line(self):
depfile = action_tracer.parse_depfile(
[
"a:b",
" ",
"b:",
])
self.assertEqual(
depfile.deps, [
action_tracer.DepEdges(ins={"b"}, outs={"a"}),
action_tracer.DepEdges(ins=set(), outs={"b"}),
])
self.assertEqual(depfile.all_ins, {"b"})
self.assertEqual(depfile.all_outs, {"a", "b"})
def test_comment(self):
depfile = action_tracer.parse_depfile(
[
" # a:b",
"b:",
])
self.assertEqual(
depfile.deps, [
action_tracer.DepEdges(ins=set(), outs={"b"}),
])
self.assertEqual(depfile.all_ins, set())
self.assertEqual(depfile.all_outs, {"b"})
def test_continuation_blank_line(self):
with self.assertRaises(ValueError):
depfile = action_tracer.parse_depfile([
"a: \\\n",
"",
"b",
])
def test_continuation_comment(self):
with self.assertRaises(ValueError):
depfile = action_tracer.parse_depfile([
"a: \\\n",
"# comment",
"b",
])
class ParseFsatraceOutputTests(unittest.TestCase):
def test_empty_stream(self):
self.assertEqual(
list(action_tracer.parse_fsatrace_output([])),
[],
)
def test_ignore_malformed_line(self):
self.assertEqual(
list(action_tracer.parse_fsatrace_output(["invalid_line"])),
[],
)
def test_read(self):
self.assertEqual(
list(action_tracer.parse_fsatrace_output(["r|README.md"])),
[action_tracer.Read("README.md")],
)
def test_write(self):
self.assertEqual(
list(action_tracer.parse_fsatrace_output(["w|main.o"])),
[action_tracer.Write("main.o")],
)
def test_touch(self):
self.assertEqual(
list(action_tracer.parse_fsatrace_output(["t|file.stamp"])),
[action_tracer.Write("file.stamp")],
)
def test_delete(self):
self.assertEqual(
list(action_tracer.parse_fsatrace_output(["d|remove-me.tmp"])),
[action_tracer.Delete("remove-me.tmp")],
)
def test_move(self):
self.assertEqual(
list(
action_tracer.parse_fsatrace_output(["m|dest.txt|source.txt"])),
[
action_tracer.Delete("source.txt"),
action_tracer.Write("dest.txt"),
],
)
def test_sequence(self):
self.assertEqual(
list(
action_tracer.parse_fsatrace_output(
[
"m|dest.txt|source.txt",
"r|input.txt",
"w|output.log",
])),
[
action_tracer.Delete("source.txt"),
action_tracer.Write("dest.txt"),
action_tracer.Read("input.txt"),
action_tracer.Write("output.log"),
],
)
class MatchConditionsTests(unittest.TestCase):
def test_no_conditions(self):
self.assertFalse(action_tracer.MatchConditions().matches("foo/bar"))
def test_prefix_matches(self):
self.assertTrue(
action_tracer.MatchConditions(prefixes={"fo"}).matches("foo/bar"))
def test_suffix_matches(self):
self.assertTrue(
action_tracer.MatchConditions(suffixes={"ar"}).matches("foo/bar"))
def test_component_matches(self):
self.assertTrue(
action_tracer.MatchConditions(
components={"bar", "bq"}).matches("foo/bar/baz.txt"))
class AccessShouldCheckTests(unittest.TestCase):
def test_no_required_prefix(self):
ignore_conditions = action_tracer.MatchConditions()
self.assertTrue(
action_tracer.Read("book").should_check(
ignore_conditions=ignore_conditions, required_path_prefix=""))
self.assertTrue(
action_tracer.Write("block").should_check(
ignore_conditions=ignore_conditions, required_path_prefix=""))
def test_required_prefix_matches(self):
ignore_conditions = action_tracer.MatchConditions()
prefix = "/home/project"
self.assertTrue(
action_tracer.Read("/home/project/book").should_check(
ignore_conditions=ignore_conditions,
required_path_prefix=prefix))
self.assertTrue(
action_tracer.Write("/home/project/out/block").should_check(
ignore_conditions=ignore_conditions,
required_path_prefix=prefix))
def test_required_prefix_no_match(self):
ignore_conditions = action_tracer.MatchConditions()
prefix = "/home/project"
self.assertFalse(
action_tracer.Read("book").should_check(
ignore_conditions=ignore_conditions,
required_path_prefix=prefix))
self.assertFalse(
action_tracer.Write("output/log").should_check(
ignore_conditions=ignore_conditions,
required_path_prefix=prefix))
def test_no_ignored_prefix(self):
ignore_conditions = action_tracer.MatchConditions(prefixes={})
self.assertTrue(
action_tracer.Read("book").should_check(
ignore_conditions=ignore_conditions))
self.assertTrue(
action_tracer.Write("output/log").should_check(
ignore_conditions=ignore_conditions))
def test_ignored_prefix_matches(self):
ignore_conditions = action_tracer.MatchConditions(prefixes={"/tmp"})
self.assertFalse(
action_tracer.Read("/tmp/book").should_check(
ignore_conditions=ignore_conditions))
self.assertFalse(
action_tracer.Write("/tmp/log").should_check(
ignore_conditions=ignore_conditions))
def test_ignored_prefix_no_match(self):
ignore_conditions = action_tracer.MatchConditions(
prefixes={"/tmp", "/no/look/here"})
self.assertTrue(
action_tracer.Read("book").should_check(
ignore_conditions=ignore_conditions))
self.assertTrue(
action_tracer.Write("out/log").should_check(
ignore_conditions=ignore_conditions))
def test_no_ignored_suffix(self):
ignore_conditions = action_tracer.MatchConditions(suffixes={})
self.assertTrue(
action_tracer.Read("book").should_check(
ignore_conditions=ignore_conditions))
self.assertTrue(
action_tracer.Write("output/log").should_check(
ignore_conditions=ignore_conditions))
def test_ignored_suffix_matches(self):
# e.g. from compiler --save-temps
ignore_conditions = action_tracer.MatchConditions(suffixes={".ii"})
self.assertFalse(
action_tracer.Read("book.ii").should_check(
ignore_conditions=ignore_conditions))
self.assertFalse(
action_tracer.Write("tmp/log.ii").should_check(
ignore_conditions=ignore_conditions))
def test_ignored_suffix_no_match(self):
# e.g. from compiler --save-temps
ignore_conditions = action_tracer.MatchConditions(
suffixes={".ii", ".S"})
self.assertTrue(
action_tracer.Read("book.txt").should_check(
ignore_conditions=ignore_conditions))
self.assertTrue(
action_tracer.Write("out/process.log").should_check(
ignore_conditions=ignore_conditions))
def test_ignored_path_components_no_match(self):
ignore_conditions = action_tracer.MatchConditions(
components={"__auto__", ".generated"})
self.assertTrue(
action_tracer.Read("book").should_check(
ignore_conditions=ignore_conditions))
self.assertTrue(
action_tracer.Write("out/log").should_check(
ignore_conditions=ignore_conditions))
def test_ignored_path_components_matches(self):
ignore_conditions = action_tracer.MatchConditions(
components={"__auto__", ".generated"})
self.assertFalse(
action_tracer.Read("library/__auto__/book").should_check(
ignore_conditions=ignore_conditions))
self.assertFalse(
action_tracer.Write(".generated/out/log").should_check(
ignore_conditions=ignore_conditions))
class CheckAccessAllowedTests(unittest.TestCase):
def test_allowed_read(self):
self.assertTrue(
action_tracer.Read("foo.txt").allowed(
allowed_reads={"foo.txt"}, allowed_writes={}))
def test_forbiddden_read(self):
self.assertFalse(
action_tracer.Read("bar.txt").allowed(
allowed_reads={}, allowed_writes={}))
def test_allowed_write(self):
self.assertTrue(
action_tracer.Write("foo.txt").allowed(
allowed_reads={}, allowed_writes={"foo.txt"}))
def test_forbiddden_write(self):
self.assertFalse(
action_tracer.Write("baz.txt").allowed(
allowed_reads={}, allowed_writes={}))
def test_allowed_delete(self):
self.assertTrue(
action_tracer.Delete("foo.txt").allowed(
allowed_reads={}, allowed_writes={"foo.txt"}))
def test_forbiddden_delete(self):
self.assertFalse(
action_tracer.Delete("baz.txt").allowed(
allowed_reads={}, allowed_writes={}))
class FormatAccessSetTest(unittest.TestCase):
def test_empty(self):
self.assertEqual(str(action_tracer.FSAccessSet()), "[empty accesses]")
def test_reads(self):
self.assertEqual(
str(action_tracer.FSAccessSet(reads={"c", "a", "b"})),
textwrap.dedent(
"""\
Reads:
a
b
c"""))
def test_writes(self):
self.assertEqual(
str(action_tracer.FSAccessSet(writes={"e", "f", "d"})),
textwrap.dedent(
"""\
Writes:
d
e
f"""))
def test_deletes(self):
self.assertEqual(
str(action_tracer.FSAccessSet(deletes={"r", "q", "p"})),
textwrap.dedent(
"""\
Deletes:
p
q
r"""))
def test_reads_writes(self):
files = {"c", "a", "b"}
self.assertEqual(
str(action_tracer.FSAccessSet(reads=files, writes=files)),
textwrap.dedent(
"""\
Reads:
a
b
c
Writes:
a
b
c"""))
def test_writes_deletes(self):
files = {"c", "a", "b"}
self.assertEqual(
str(action_tracer.FSAccessSet(writes=files, deletes=files)),
textwrap.dedent(
"""\
Writes:
a
b
c
Deletes:
a
b
c"""))
class FinalizeFileSystemAccessesTest(unittest.TestCase):
def test_no_accesses(self):
self.assertEqual(
action_tracer.finalize_filesystem_accesses([]),
action_tracer.FSAccessSet())
def test_reads(self):
self.assertEqual(
action_tracer.finalize_filesystem_accesses(
[
action_tracer.Read("r1.txt"),
action_tracer.Read("r2.txt"),
]), action_tracer.FSAccessSet(reads={"r1.txt", "r2.txt"}))
def test_writes(self):
self.assertEqual(
action_tracer.finalize_filesystem_accesses(
[
action_tracer.Write("wb.txt"),
action_tracer.Write("wa.txt"),
]), action_tracer.FSAccessSet(writes={"wa.txt", "wb.txt"}))
def test_reads_writes_no_deletes(self):
self.assertEqual(
action_tracer.finalize_filesystem_accesses(
[
action_tracer.Read("r2.txt"),
action_tracer.Write("wb.txt"),
action_tracer.Write("wa.txt"),
action_tracer.Read("r1.txt"),
]),
action_tracer.FSAccessSet(
reads={"r1.txt", "r2.txt"}, writes={"wa.txt", "wb.txt"}))
def test_read_after_write(self):
self.assertEqual(
action_tracer.finalize_filesystem_accesses(
[
action_tracer.Write("temp.txt"),
action_tracer.Read("temp.txt"),
]), action_tracer.FSAccessSet(reads=set(), writes={"temp.txt"}))
def test_delete(self):
self.assertEqual(
action_tracer.finalize_filesystem_accesses(
[
action_tracer.Delete("d1.txt"),
action_tracer.Delete("d2.txt"),
]), action_tracer.FSAccessSet(deletes={"d1.txt", "d2.txt"}))
def test_delete_after_write(self):
self.assertEqual(
action_tracer.finalize_filesystem_accesses(
[
action_tracer.Write("temp.txt"),
action_tracer.Delete("temp.txt"),
]), action_tracer.FSAccessSet())
def test_write_after_delete(self):
self.assertEqual(
action_tracer.finalize_filesystem_accesses(
[
action_tracer.Delete("temp.txt"),
action_tracer.Write("temp.txt"),
]), action_tracer.FSAccessSet(writes={"temp.txt"}))
def test_write_read_delete(self):
self.assertEqual(
action_tracer.finalize_filesystem_accesses(
[
action_tracer.Write("temp.txt"),
action_tracer.Read("temp.txt"),
action_tracer.Delete("temp.txt"),
]), action_tracer.FSAccessSet())
class CheckAccessPermissionsTests(unittest.TestCase):
def test_no_accesses(self):
self.assertEqual(
action_tracer.check_access_permissions(
action_tracer.FSAccessSet(), action_tracer.AccessConstraints()),
action_tracer.FSAccessSet(),
)
def test_ok_read(self):
self.assertEqual(
action_tracer.check_access_permissions(
action_tracer.FSAccessSet(reads={"readable.txt"}),
action_tracer.AccessConstraints(
allowed_reads={"readable.txt"})),
action_tracer.FSAccessSet(),
)
def test_forbidden_read(self):
read = "unreadable.txt"
self.assertEqual(
action_tracer.check_access_permissions(
action_tracer.FSAccessSet(reads={read}),
action_tracer.AccessConstraints()),
action_tracer.FSAccessSet(reads={read}),
)
def test_ok_write(self):
self.assertEqual(
action_tracer.check_access_permissions(
action_tracer.FSAccessSet(writes={"writeable.txt"}),
action_tracer.AccessConstraints(
allowed_writes={"writeable.txt"})),
action_tracer.FSAccessSet(),
)
def test_forbidden_writes(self):
# make sure multiple violations accumulate
bad_writes = {
"unwriteable.txt",
"you-shall-not-pass.txt",
}
self.assertEqual(
action_tracer.check_access_permissions(
action_tracer.FSAccessSet(writes=bad_writes),
action_tracer.AccessConstraints()),
action_tracer.FSAccessSet(writes=bad_writes),
)
def test_read_from_temporary_writes_ok(self):
temp_file = "__file.tmp"
reads = {temp_file}
writes = {
"unwriteable.txt",
temp_file,
}
self.assertEqual(
action_tracer.check_access_permissions(
action_tracer.FSAccessSet(reads=reads, writes=writes),
action_tracer.AccessConstraints()),
action_tracer.FSAccessSet(reads=set(), writes=writes),
)
class CheckMissingWritesTests(unittest.TestCase):
def test_no_accesses(self):
self.assertEqual(
action_tracer.check_missing_writes([], {}),
{},
)
def test_only_reads(self):
self.assertEqual(
action_tracer.check_missing_writes(
[action_tracer.Read("newspaper.pdf")],
{},
),
{},
)
def test_excess_write(self):
self.assertEqual(
action_tracer.check_missing_writes(
[action_tracer.Write("side-effect.txt")],
{},
),
{},
)
def test_fulfilled_write(self):
self.assertEqual(
action_tracer.check_missing_writes(
[action_tracer.Write("compiled.o")],
{"compiled.o"},
),
set(),
)
def test_missing_write(self):
self.assertEqual(
action_tracer.check_missing_writes(
[],
{"write-me.out"},
),
{"write-me.out"},
)
def test_missing_and_fulfilled_write(self):
self.assertEqual(
action_tracer.check_missing_writes(
[action_tracer.Write("compiled.o")],
{
"write-me.out",
"compiled.o",
},
),
{"write-me.out"},
)
def test_written_then_deleted(self):
self.assertEqual(
action_tracer.check_missing_writes(
[
action_tracer.Write("compiled.o"),
action_tracer.Delete("compiled.o"),
],
{"compiled.o"},
),
{"compiled.o"},
)
def test_deleted_then_written(self):
self.assertEqual(
action_tracer.check_missing_writes(
[
action_tracer.Delete("compiled.o"),
action_tracer.Write("compiled.o"),
],
{"compiled.o"},
),
set(),
)
def abspaths(container: Iterable[str]) -> AbstractSet[str]:
return {os.path.abspath(f) for f in container}
class AccessConstraintsTests(unittest.TestCase):
def test_empty_action(self):
action = action_tracer.Action(inputs=["script.sh"])
self.assertEqual(
action.access_constraints(),
action_tracer.AccessConstraints(
allowed_reads=abspaths({"script.sh"})))
def test_have_inputs(self):
action = action_tracer.Action(
inputs=["script.sh", "input.txt", "main.cc"])
self.assertEqual(
action.access_constraints(),
action_tracer.AccessConstraints(
allowed_reads=abspaths({"script.sh", "input.txt", "main.cc"})))
def test_have_outputs(self):
action = action_tracer.Action(inputs=["script.sh"], outputs=["main.o"])
self.assertEqual(
action.access_constraints(),
action_tracer.AccessConstraints(
allowed_reads=abspaths({"script.sh", "main.o"}),
allowed_writes=abspaths({"main.o"}),
required_writes=abspaths({"main.o"})))
def test_have_depfile_writeable_inputs(self):
action = action_tracer.Action(inputs=["script.sh"], depfile="foo.d")
with mock.patch.object(os.path, 'exists',
return_value=True) as mock_exists:
with mock.patch("builtins.open", mock.mock_open(
read_data="foo.o: foo.cc foo.h\n")) as mock_file:
constraints = action.access_constraints(
writeable_depfile_inputs=True)
mock_exists.assert_called_once()
mock_file.assert_called_once()
self.assertEqual(
constraints,
action_tracer.AccessConstraints(
allowed_reads=abspaths(
{"script.sh", "foo.d", "foo.o", "foo.cc", "foo.h"}),
allowed_writes=abspaths({"foo.d", "foo.o", "foo.cc", "foo.h"})))
def test_have_depfile_nonwritable_inputs(self):
action = action_tracer.Action(inputs=["script.sh"], depfile="foo.d")
with mock.patch.object(os.path, 'exists',
return_value=True) as mock_exists:
with mock.patch("builtins.open", mock.mock_open(
read_data="foo.o: foo.cc foo.h\n")) as mock_file:
constraints = action.access_constraints(
writeable_depfile_inputs=False)
mock_exists.assert_called_once()
mock_file.assert_called_once()
self.assertEqual(
constraints,
action_tracer.AccessConstraints(
allowed_reads=abspaths(
{"script.sh", "foo.d", "foo.o", "foo.cc", "foo.h"}),
allowed_writes=abspaths({"foo.d", "foo.o"})))
def test_links_are_followed(self):
def fake_realpath(s: str) -> str:
return f'test/realpath/{s}'
action = action_tracer.Action(inputs=["script.sh"], depfile="foo.d")
with mock.patch.object(os.path, 'exists',
return_value=True) as mock_exists:
with mock.patch("builtins.open", mock.mock_open(
read_data="foo.o: foo.cc foo.h\n")) as mock_file:
with mock.patch.object(os.path, 'realpath',
wraps=fake_realpath) as mock_realpath:
constraints = action.access_constraints(
writeable_depfile_inputs=False)
mock_exists.assert_called_once()
mock_file.assert_called_once()
mock_realpath.assert_called()
self.assertEqual(
constraints,
action_tracer.AccessConstraints(
allowed_reads=abspaths(
{
"test/realpath/script.sh",
"test/realpath/foo.d",
"test/realpath/foo.o",
"test/realpath/foo.cc",
"test/realpath/foo.h",
}),
allowed_writes=abspaths({"foo.d", "foo.o"})))
def test_have_nonexistent_depfile(self):
action = action_tracer.Action(depfile="foo.d")
with mock.patch.object(os.path, 'exists',
return_value=False) as mock_exists:
constraints = action.access_constraints()
mock_exists.assert_called()
self.assertEqual(
constraints,
action_tracer.AccessConstraints(
allowed_writes=abspaths({"foo.d"}),
allowed_reads=abspaths({"foo.d"})))
class DiagnoseStaleOutputsTest(unittest.TestCase):
def test_no_accesses_no_constraints(self):
output_diagnostics = action_tracer.diagnose_stale_outputs(
accesses=[],
access_constraints=action_tracer.AccessConstraints(),
)
self.assertEqual(
output_diagnostics,
action_tracer.StalenessDiagnostics(),
)
def test_missing_write_no_inputs(self):
required_writes = {"write.me"}
output_diagnostics = action_tracer.diagnose_stale_outputs(
accesses=[],
access_constraints=action_tracer.AccessConstraints(
required_writes=required_writes),
)
self.assertEqual(
output_diagnostics,
action_tracer.StalenessDiagnostics(
required_writes=required_writes,
nonexistent_outputs={"write.me"}),
)
def test_missing_write_with_used_input(self):
used_input = "read.me"
required_writes = {"write.me"}
output_diagnostics = action_tracer.diagnose_stale_outputs(
accesses=[action_tracer.Read(used_input)],
access_constraints=action_tracer.AccessConstraints(
allowed_reads={used_input},
required_writes=required_writes,
),
)
self.assertEqual(
output_diagnostics,
action_tracer.StalenessDiagnostics(
required_writes=required_writes,
nonexistent_outputs={"write.me"}),
)
def test_stale_output_no_inputs(self):
required_writes = {"write.me"}
with mock.patch.object(os.path, 'exists',
return_value=True) as mock_exists:
output_diagnostics = action_tracer.diagnose_stale_outputs(
accesses=[],
access_constraints=action_tracer.AccessConstraints(
required_writes=required_writes),
)
mock_exists.assert_called_once()
self.assertEqual(
output_diagnostics,
action_tracer.StalenessDiagnostics(required_writes=required_writes),
)
def test_stale_output_with_used_input(self):
def fake_read_ctime(path: str):
if path.startswith("read"):
return 200
raise ValueError(f'Unexpected path: {path}')
def fake_write_ctime(path: str):
if path.startswith("write"):
return 100
raise ValueError(f'Unexpected path: {path}')
used_input = "read.me"
required_writes = {"write.me"}
with mock.patch.object(os.path, 'exists',
return_value=True) as mock_exists:
with mock.patch.object(os.path, 'getctime',
wraps=fake_read_ctime) as mock_read_ctime:
with mock.patch.object(
action_tracer, 'realpath_ctime',
wraps=fake_write_ctime) as mock_write_ctime:
output_diagnostics = action_tracer.diagnose_stale_outputs(
accesses=[action_tracer.Read(used_input)],
access_constraints=action_tracer.AccessConstraints(
allowed_reads={used_input},
required_writes=required_writes),
)
mock_exists.assert_called_once()
mock_read_ctime.assert_called()
mock_write_ctime.assert_called()
self.assertEqual(
output_diagnostics,
action_tracer.StalenessDiagnostics(
required_writes=required_writes,
newest_input=used_input,
stale_outputs={"write.me"}),
)
def test_stale_output_with_multiple_used_inputs(self):
def fake_read_ctime(path: str):
if path == "read.me":
return 200
if path == "read.me.newer":
return 300
raise Exception(f'fake_read_ctime for unexpected path: {path}')
def fake_write_ctime(path: str):
if path.startswith("write"):
return 250
raise Exception(f'fake_write_ctime for unexpected path: {path}')
used_input = "read.me"
# Make sure the timestamp of the newest input is used for comparison.
used_input_newer = "read.me.newer"
required_writes = {"write.me"}
with mock.patch.object(os.path, 'exists',
return_value=True) as mock_exists:
with mock.patch.object(os.path, 'getctime',
wraps=fake_read_ctime) as mock_read_ctime:
with mock.patch.object(
action_tracer, 'realpath_ctime',
wraps=fake_write_ctime) as mock_write_ctime:
output_diagnostics = action_tracer.diagnose_stale_outputs(
accesses=[
action_tracer.Read(used_input),
action_tracer.Read(used_input_newer),
],
access_constraints=action_tracer.AccessConstraints(
allowed_reads={used_input, used_input_newer},
required_writes=required_writes),
)
mock_exists.assert_called_once()
mock_read_ctime.assert_called()
mock_write_ctime.assert_called()
self.assertEqual(
output_diagnostics,
action_tracer.StalenessDiagnostics(
required_writes=required_writes,
# newer input is used for comparison
newest_input=used_input_newer,
stale_outputs={"write.me"}),
)
def test_fresh_output_with_used_input(self):
def fake_getctime(path: str):
if path.startswith("read"):
return 100
if path.startswith("write"):
return 200
return 0
used_input = "read.me"
written_output = "write.me"
with mock.patch.object(os.path, 'exists',
return_value=True) as mock_exists:
with mock.patch.object(os.path, 'getctime',
wraps=fake_getctime) as mock_ctime:
output_diagnostics = action_tracer.diagnose_stale_outputs(
accesses=[
action_tracer.Read(used_input),
action_tracer.Write(written_output),
],
access_constraints=action_tracer.AccessConstraints(
allowed_reads={used_input},
required_writes={written_output}),
)
# There are no untouched outputs, so getctime is never called.
mock_exists.assert_not_called()
mock_ctime.assert_not_called()
self.assertEqual(
output_diagnostics,
action_tracer.StalenessDiagnostics(
required_writes={written_output},
# newest_input is not evaluated
stale_outputs=set(),
),
)
def test_fresh_output_with_used_input_readable_output(self):
def fake_getctime(path: str):
if path.startswith("read"):
return 100
if path.startswith("write"):
return 200
return 0
used_input = "read.me"
written_output = "write.me"
with mock.patch.object(os.path, 'exists',
return_value=True) as mock_exists:
with mock.patch.object(os.path, 'getctime',
wraps=fake_getctime) as mock_ctime:
output_diagnostics = action_tracer.diagnose_stale_outputs(
accesses=[
action_tracer.Read(used_input),
action_tracer.Read(written_output),
action_tracer.Write(written_output),
],
access_constraints=action_tracer.AccessConstraints(
allowed_reads={used_input, written_output},
required_writes={written_output}),
)
# There are no untouched outputs, so getctime is never called.
mock_exists.assert_not_called()
mock_ctime.assert_not_called()
self.assertEqual(
output_diagnostics,
action_tracer.StalenessDiagnostics(
required_writes={written_output},
# newest_input is not evaluated
stale_outputs=set(),
),
)
class MainArgParserTests(unittest.TestCase):
# These args are required, and there's nothing interesting about them to test.
required_args = "--trace-output t.out --label //pkg:tgt "
def test_only_required_args(self):
parser = action_tracer.main_arg_parser()
args = parser.parse_args(self.required_args.split())
self.assertEqual(args.trace_output, "t.out")
self.assertEqual(args.label, "//pkg:tgt")
# Make sure some checks are enabled by default
self.assertTrue(args.check_access_permissions)
def test_check_access_permissions(self):
parser = action_tracer.main_arg_parser()
args = parser.parse_args(
(self.required_args + "--check-access-permissions").split())
self.assertTrue(args.check_access_permissions)
def test_no_check_access_permissions(self):
parser = action_tracer.main_arg_parser()
args = parser.parse_args(
(self.required_args + "--no-check-access-permissions").split())
self.assertFalse(args.check_access_permissions)
if __name__ == '__main__':
unittest.main()
|
import matplotlib.pyplot as plt
import numpy as np
from numba import jit
from timeit import default_timer as timer
import random
start = timer()
# PARAMETERS TO CHANGE THE FRACTAL GENERATED
seq = "AB" # sequence to alternate r values
probability = 75 # probability that a given value in the sequence will switch
a_lb = 2 # a lower bound
a_ub = 4 # a upper bound
b_lb = 2 # b lower bound
b_ub = 4 # b upper bound
# PARAMETERS REFINING ACCURACY OF FRACTAL PICTURE GENERATED
num_warmups = 1200 # number of "warmups" or throwaway iterations before computing lyapunov exponent
num_lyap_iterations = 300 # number of iterations used to compute the lyapunov exp
steps = 500 # steps between b1 and b2 values on axes -- higher it is, the better the picture
# CREATING RANDOM SEQUENCE WITH A LENGTH OF THE TOTAL NUMBER OF ITERATIONS
# EACH ITERATION THE PROBABILITY WILL BE JUDGED AGAINST THIS LIST
@jit
def getrandomseq():
problist = list()
for i in range(num_lyap_iterations + num_warmups):
problist.append(random.randint(0,99))
return problist
# LOGISTIC MAP THAT GIVES US THE NEXT X
@jit
def F(x, curr_r):
return (curr_r * x) * (1 - x)
# DERIVATIVE OF F -- USED TO COMPUTE THE LYAPUNOV EXPONENT
@jit
def Fprime(x, curr_r):
ans = curr_r * (1 - (2 *x))
ans[ans == 0] = 0.0001
ans[ans == -np.inf] = -1000
ans[ans == np.inf] = 1000
return ans
# RETURNS THE CORRECT B-VALUE BASED ON THE CURRENT ITERATION
@jit
def getseqval(curr_iteration, a, b, probability, problist):
randnum = problist[curr_iteration]
index = np.mod(curr_iteration, len(seq))
if (seq[index] == 'A'):
if (probability <= randnum):
return a
else:
return b
else:
if (probability <= randnum):
return b
else:
return a
# RETURNS THE LYAPUNOV EXPONENT BASED ON THE SPECIFIED B1 AND B2 VALUES
@jit
def getlyapexponent(time_scale, probability, problist):
b1, b2 = time_scale
lyap_prob = probability
x = .5 # initial value of x
lyapsum = 0 # initializing lyapunov sum for use later
# do warmups, to discard the early values of the iteration to allow the orbit to settle down
for i in range(num_warmups):
x = F(x, getseqval(i, b1, b2, lyap_prob, problist))
for i in range(num_warmups, num_lyap_iterations + num_warmups):
lyapsum += np.log( np.abs(Fprime(x, getseqval(i, b1, b2, lyap_prob, problist) ) ) )
# get next x
x = F(x, getseqval(i, b1, b2, lyap_prob, problist))
return (lyapsum / num_lyap_iterations)
# CREATING FRACTAL IMAGE
a = np.linspace(a_lb, a_ub, steps) #range of b1 values
b = np.linspace(b_lb, b_ub, steps) #range of b2 values
aa, bb = np.meshgrid(a, b)
# COMPUTING AVERAGE IMAGE FROM MULTIPLE RANDOM SEQUENCES
lyap_exponents = []
for i in range(10):
problist = getrandomseq()
lyap_exponents.append(getlyapexponent( (bb, aa), probability, problist ))
fractal_grid = np.average(lyap_exponents, axis = 0)
# CREATING AND ADJUSTING GRAPH
plt.figure() # creating new window for each graph that is run
plt.subplots_adjust(top = 0.825, bottom = 0.1, left = 0.11, right = 0.9, hspace = 0.2, wspace = 0.2)
plt.suptitle("Probabilistic Lyanpuov fractal for logistic map")
plt.title("Pattern: " + seq + "\n Probability that a given value\n in the pattern will switch: " + str(probability) + "%")
plt.xlabel("a")
plt.ylabel("b")
im = plt.imshow(fractal_grid, cmap = lyap_cmap, vmin = -2, vmax = 0, origin = "lower", extent = (a_lb, a_ub, b_lb, b_ub))
plt.colorbar(im)
end = timer()
print("elapsed time: " + str(end - start))
|
from mythx_models.response import AuthLogoutResponse
def test_auth_login_response_from_valid_json():
resp = AuthLogoutResponse()
assert resp.dict() == {}
|
# -*- coding: utf-8 -*-
from django.utils.translation import ugettext_lazy as _
from django import forms
from django.contrib import admin
from publication_backbone.models import Publication
from .models import PromoPluginModel
from salmonella.widgets import SalmonellaIdWidget
from fluent_contents.forms import ContentItemForm
#==============================================================================
# PromoForm
#==============================================================================
class PromoForm(ContentItemForm):
to_publication = forms.ModelChoiceField(queryset=Publication.objects.all(), required=False,
widget=SalmonellaIdWidget(PromoPluginModel._meta.get_field("to_publication").rel, admin.site),
label=_('Related publication'))
|
from .anchor import *
from .constraints import *
from .edge import *
from .graph import *
from .multi_edge import *
from .node import *
from .tools import *
del anchor
del constraints
del edge
del graph
del multi_edge
del node
del tools
|
import os
import pytest
from iotedgedev.envvars import EnvVars
from iotedgedev.output import Output
from iotedgedev.utility import Utility
from .utility import assert_list_equal, assert_file_equal, assert_json_file_equal
pytestmark = pytest.mark.unit
tests_dir = os.path.join(os.getcwd(), "tests")
test_assets_dir = os.path.join(tests_dir, "assets")
test_file_1 = os.path.join(test_assets_dir, "deployment.template_1.json")
test_file_2 = os.path.join(test_assets_dir, "deployment.template_2.json")
test_file_4 = os.path.join(test_assets_dir, "deployment.template_4.json")
@pytest.fixture
def utility():
output = Output()
envvars = EnvVars(output)
envvars.load()
return Utility(envvars, output)
def test_ensure_dir(request, utility):
before_ensure = os.listdir(tests_dir)
utility.ensure_dir(test_assets_dir)
after_ensure = os.listdir(tests_dir)
assert_list_equal(before_ensure, after_ensure)
new_dir = "new_dir"
utility.ensure_dir(new_dir)
assert os.path.exists(new_dir)
def clean():
if os.path.exists(new_dir):
os.rmdir(new_dir)
request.addfinalizer(clean)
def test_copy_from_template_dir(utility, tmpdir):
src_file = "deployment.template.json"
dest_dir = tmpdir.strpath
dest_file = tmpdir.join(src_file).strpath
utility.copy_from_template_dir(src_file, dest_dir, replacements={"%MODULE%": "filtermodule"})
assert_file_equal(dest_file, test_file_4)
def test_copy_template(utility, tmpdir):
replacements = {
"${MODULES.csharpmodule.amd64}": "localhost:5000/csharpmodule:0.0.1-amd64",
"${MODULES.csharpfunction.amd64.debug}": "localhost:5000/csharpfunction:0.0.1-amd64.debug"
}
dest = tmpdir.join("deployment_template_1.dest.json").strpath
utility.copy_template(test_file_1, dest, replacements=replacements, expandvars=False)
assert_json_file_equal(test_file_2, dest)
def test_copy_template_expandvars(utility, tmpdir):
replacements = {
"${MODULES.csharpmodule.amd64}": "${CONTAINER_REGISTRY_SERVER}/csharpmodule:0.0.1-amd64",
"${MODULES.csharpfunction.amd64.debug}": "${CONTAINER_REGISTRY_SERVER}/csharpfunction:0.0.1-amd64.debug"
}
os.environ["CONTAINER_REGISTRY_SERVER"] = "localhost:5000"
dest = tmpdir.join("deployment_template_2.dest.json").strpath
utility.copy_template(test_file_1, dest, replacements=replacements, expandvars=True)
assert_json_file_equal(test_file_2, dest)
def test_in_asterisk_list(utility):
assert utility.in_asterisk_list("filtermodule", "pipemodule, filtermodule")
def test_in_asterisk_list_empty(utility):
assert not utility.in_asterisk_list("filtermodule", "")
def test_in_asterisk_list_asterisk(utility):
assert utility.in_asterisk_list("filtermodule", "*")
def test_del_key(utility):
dict_ = {
"1": 0,
"2": {
"3": 0,
"4": "foo",
"5": {
"6": 0
}
}
}
assert utility.del_key("not a dict", ["1", "2"]) is None
assert utility.del_key(dict_, ["2", "5"]) == {"6": 0}
assert utility.del_key(dict_, ["1", "non_existent_key"]) is None
assert utility.del_key(dict_, ["2", "non_existent_key"]) is None
expected = {
"1": 0,
"2": {
"3": 0,
"4": "foo"
}
}
assert dict_ == expected
def test_get_sha256_hash():
assert Utility.get_sha256_hash("foo") == "2c26b46b68ffc68ff99b453c1d30413413422d706483bfa0f98a5e886266e7ae"
def test_get_deployment_manifest_name():
assert Utility.get_deployment_manifest_name("config/deployment.template.json", "0.0.1", "amd64") == "deployment.json"
assert Utility.get_deployment_manifest_name("deployment.template.json", "0.0.1", "amd64") == "deployment.json"
assert Utility.get_deployment_manifest_name("deployment.debug.template.json", "0.0.1", "amd64") == "deployment.debug.json"
assert Utility.get_deployment_manifest_name("config/deployment.template.json", "1.0.0", "amd64") == "deployment.amd64.json"
assert Utility.get_deployment_manifest_name("deployment.template.json", "1.0.0", "amd64") == "deployment.amd64.json"
assert Utility.get_deployment_manifest_name("deployment.debug.template.json", "1.0.0", "amd64") == "deployment.debug.amd64.json"
assert Utility.get_deployment_manifest_name("", "", "") == "deployment.json"
os.environ["DEPLOYMENT_CONFIG_FILE"] = "foo.json"
assert Utility.get_deployment_manifest_name("deployment.debug.template.json", "1.0.0", "amd64") == "foo.json"
|
from nwck_class_3 import Newick_Tree
import numpy as np
f=open('data/data.dat')
junk=f.readline()
s1=f.readline().strip()
s2=f.readline().strip()
f.close()
n=Newick_Tree(s1)
m=Newick_Tree(s2)
X=np.zeros((3,3,max(n.tree.keys())+2,len(m.tree.keys())+2))
#Z=np.zeros((3,3,max(n.tree.keys())+2,len(m.tree.keys())+2))
def xchoose2(r):
if r<2:
return 0
else:
return(r*(r-1)/2)
def xchoose4(r):
if r<4:
return 0
else:
return((1.0*r*(r-1)*(r-2)*(r-3))/24)
Nspan={}
for x in n.post_order(n.root):
if len(n.dtree[x])==0:
Nspan[x]=1
else:
Nspan[x]=sum([Nspan[i] for i in n.dtree[x]])
Mspan={}
for x in m.post_order(m.root):
if len(m.dtree[x])==0:
Mspan[x]=1
else:
Mspan[x]=sum([Mspan[i] for i in m.dtree[x]])
#X=defaultdict(int)
for A in n.leaves_postordered:
for B in m.leaves_postordered:
Aindex=n.dtree[n.tree[A]].index(A)
Bindex=m.dtree[m.tree[B]].index(B)
Aparent=n.tree[A]
Bparent=m.tree[B]
if n.node_to_taxon[A]==m.node_to_taxon[B]:
# X[0,0,A,B]=1
# X[Aindex,0,Aparent,B]=1
# X[0,Bindex,A,Bparent]=1
X[Aindex,Bindex,Aparent,Bparent]=1
Q=0
for A in n.post_order(n.dtree[n.root][0]):
for B in m.post_order(m.dtree[m.root][0]):
Aindex=n.dtree[n.tree[A]].index(A)
Bindex=m.dtree[m.tree[B]].index(B)
Aparent=n.tree[A]
Bparent=m.tree[B]
if len(n.dtree[A])==2 and len(m.dtree[B])==2:
#X[Aindex,Bindex,Aparent,Bparent]=X[0,0,A,B]+X[0,1,A,B]+X[1,0,A,B]+X[1,1,A,B]
X[0,Bindex,A,Bparent]=X[0,0,A,B]+X[0,1,A,B]
X[1,Bindex,A,Bparent]=X[1,0,A,B]+X[1,1,A,B]
X[Aindex,0,Aparent,B]=X[0,0,A,B]+X[1,0,A,B]
X[Aindex,1,Aparent,B]=X[0,1,A,B]+X[1,1,A,B]
X[Aindex,Bindex,Aparent,Bparent]=X[0,0,A,B]+X[0,1,A,B]+X[1,0,A,B]+X[1,1,A,B]
X[0,2,A,B]=Nspan[n.dtree[A][0]]-X[0,0,A,B]-X[0,1,A,B]
X[1,2,A,B]=Nspan[n.dtree[A][1]]-X[1,0,A,B]-X[1,1,A,B]
X[2,0,A,B]=Mspan[m.dtree[B][0]]-X[1,0,A,B]-X[0,0,A,B]
X[2,1,A,B]=Mspan[m.dtree[B][1]]-X[0,1,A,B]-X[1,1,A,B]
X[2,2,A,B]=len(n.taxa)-Nspan[n.dtree[A][0]]-Nspan[n.dtree[A][1]]-X[2,1,A,B]-X[2,0,A,B]
# Q+=(xchoose2(X[2,2,A,B])*(X[0,0,A,B]*X[1,1,A,B]+X[1,0,A,B]*X[0,1,A,B]))
#for A in n.post_order(n.dtree[n.root][0]):
# for B in m.post_order(m.dtree[m.root][0]):
# if len(n.dtree[A])==0 or len(m.dtree[B])==0:
# continue
# else:
# X[0,2,A,B]=Nspan[n.dtree[A][0]]-X[0,0,A,B]-X[0,1,A,B]
# X[1,2,A,B]=Nspan[n.dtree[A][1]]-X[1,0,A,B]-X[1,1,A,B]
# X[2,0,A,B]=Mspan[m.dtree[B][0]]-X[1,0,A,B]-X[0,0,A,B]
# X[2,1,A,B]=Mspan[m.dtree[B][1]]-X[0,1,A,B]-X[1,1,A,B]
# X[2,2,A,B]=len(n.taxa)-Nspan[n.dtree[A][0]]-Nspan[n.dtree[A][1]]-X[2,1,A,B]-X[2,0,A,B]
ds=[]
for A in n.post_order(n.dtree[n.root][0]):
for B in m.post_order(m.dtree[m.root][0]):
if len(n.dtree[A])==0 or len(m.dtree[B])==0:
continue
else:
pA=n.tree[A]
pB=m.tree[B]
Aindex=n.dtree[pA].index(A)
Bindex=m.dtree[pB].index(B)
Z00AB=X[1-Aindex,1-Bindex,pA,pB]
Z11AB=X[2,2,pA,pB]
Z01AB=X[1-Aindex,2,pA,pB]
Z10AB=X[2,1-Bindex,pA,pB]
ZZ02AB=X[1-Aindex,Bindex,pA,pB]
Z20AB=X[Aindex,1-Bindex,pA,pB]
Z12AB=X[2,Bindex,pA,pB]
Z21AB=X[Aindex,2,pA,pB]
Z22AB=X[Aindex,Bindex,pA,pB]
dqX=xchoose2(X[2,2,A,B])*(X[0,0,A,B]*X[1,1,A,B]+X[1,0,A,B]*X[0,1,A,B])
dqZ=xchoose2(Z22AB)*(Z00AB*Z11AB+Z10AB*Z01AB)
U00AB=X[0,1-Bindex,A,pB]
U10AB=X[1,1-Bindex,A,pB]
U01AB=X[0,2,A,pB]
U11AB=X[1,2,A,pB]
U22AB=X[1-Aindex,0,pA,B]+X[2,0,pA,B]+X[1-Aindex,1,pA,B]+X[2,1,pA,B]
dqU=xchoose2(U22AB)*(U00AB*U11AB+U10AB*U01AB)
V00AB=X[1-Aindex,0,pA,B]
V10AB=X[1-Aindex,1,pA,B]
V01AB=X[2,0,pA,B]
V11AB=X[2,1,pA,B]
V22AB=X[0,1-Bindex,A,pB]+X[0,2,A,pB]+X[1,1-Bindex,A,pB]+X[1,2,A,pB]
dqV=xchoose2(V22AB)*(V00AB*V11AB+V10AB*V01AB)
if dqX!=0 or dqZ!=0 or dqU!=0 or dqV!=0:
ds.append((A,B,dqX,dqZ,dqU,dqV))
#print A,B,dqX,dqZ
Q+=dqX+dqZ+dqU+dqV
print 2*xchoose4(len(n.taxa))-Q
def tsummary(X,A,B):
return [(i,j,X[i,j,A,B]) for i in [0,1,2] for j in [0,1,2]] |
import csv
import numpy as np
import os
import pandas as pd
import sys
import sklearn.model_selection
#path = sys.argv[1]
def read_patients(path='C:/Users/nealm/Dropbox (Brown)/MIMIC DATA/mimic-iii-clinical-database-1.4/mimic-iii-clinical-database-1.4'):
patients = pd.read_csv(os.path.join(path, 'PATIENTS.csv.gz'), compression="gzip", header=0, index_col=0)
patients = patients[['SUBJECT_ID', 'GENDER', 'DOB', 'DOD']]
patients.DOB = pd.to_datetime(patients.DOB)
patients.DOD = pd.to_datetime(patients.DOD)
return patients
def read_admissions(path='C:/Users/nealm/Dropbox (Brown)/MIMIC DATA/mimic-iii-clinical-database-1.4/mimic-iii-clinical-database-1.4'):
admits = pd.read_csv(os.path.join(path, 'ADMISSIONS.csv.gz'), compression="gzip", header=0, index_col=0)
admits = admits[['SUBJECT_ID', 'HADM_ID', 'ADMITTIME', 'DISCHTIME', 'DEATHTIME', 'ETHNICITY', 'DIAGNOSIS']]
admits.ADMITTIME = pd.to_datetime(admits.ADMITTIME)
admits.DISCHTIME = pd.to_datetime(admits.DISCHTIME)
admits.DEATHTIME = pd.to_datetime(admits.DEATHTIME)
return admits
def read_events(subj_ids, path='C:/Users/nealm/Dropbox (Brown)/MIMIC DATA/mimic-iii-clinical-database-1.4/mimic-iii-clinical-database-1.4'):
file = os.path.join(path, 'CHARTEVENTS.csv.gz')
df = pd.DataFrame()
counter=0
for chunker in pd.read_csv(file, chunksize=100000, compression="gzip", header=0, index_col=0):
if counter==0:
temp = chunker
counter += 1
elif counter == 210:
break
else:
temp = temp.append(chunker)
counter += 1
print(counter)
df = temp
df = df[df['SUBJECT_ID'].isin(subj_ids)]
print(df)
df.to_csv('chartevents.csv')
return 0
def read_icustays(path='C:/Users/nealm/Dropbox (Brown)/MIMIC DATA/mimic-iii-clinical-database-1.4/mimic-iii-clinical-database-1.4'):
stays = pd.read_csv(os.path.join(path, 'ICUSTAYS.csv.gz'), compression="gzip", header=0, index_col=0)
stays.INTIME = pd.to_datetime(stays.INTIME)
stays.OUTTIME = pd.to_datetime(stays.OUTTIME)
return stays
def read_notes(path='C:/Users/nealm/Dropbox (Brown)/MIMIC DATA/mimic-iii-clinical-database-1.4/mimic-iii-clinical-database-1.4'):
notes = pd.read_csv(os.path.join(path, 'NOTEEVENTS.csv.gz'), compression="gzip", header=0, index_col=0)
notes['CHARTDATE'] = pd.to_datetime(notes['CHARTDATE'])
notes['CHARTTIME'] = pd.to_datetime(notes['CHARTTIME'])
notes['STORETIME'] = pd.to_datetime(notes['STORETIME'])
notes = notes[notes['CHARTTIME'].notnull()]
notes = notes[notes['HADM_ID'].notnull()]
notes = notes[notes['SUBJECT_ID'].notnull()]
notes = notes[notes['TEXT'].notnull()]
notes = notes[['SUBJECT_ID', 'HADM_ID', 'CHARTTIME', 'TEXT']]
return notes
def filter_stays(icu_stays):
icu_stays = icu_stays[(icu_stays.FIRST_WARDID == icu_stays.LAST_WARDID) & (icu_stays.FIRST_CAREUNIT == icu_stays.LAST_CAREUNIT)]
return icu_stays[['SUBJECT_ID', 'HADM_ID', 'ICUSTAY_ID', 'LAST_CAREUNIT', 'DBSOURCE', 'INTIME', 'OUTTIME', 'LOS']]
def merge_tables(stays, admissions, patients):
table = stays.merge(admissions, how='inner', left_on=['SUBJECT_ID', 'HADM_ID'], right_on=['SUBJECT_ID', 'HADM_ID'])
table = table.merge(patients, how='inner', left_on=['SUBJECT_ID'], right_on=['SUBJECT_ID'])
return table
def add_age_mortality(icu_stays):
icu_stays['INTIME'] = pd.to_datetime(icu_stays['INTIME']).dt.date
icu_stays['DOB'] = pd.to_datetime(icu_stays['DOB']).dt.date
icu_stays['AGE'] = icu_stays.apply(lambda e: np.timedelta64(e['INTIME'] - e['DOB']) / np.timedelta64(1, 'h'), axis=1)
mortality = icu_stays['DOD'].notnull() & (icu_stays['ADMITTIME'] <= icu_stays['DOD']) & (icu_stays['DISCHTIME'] >= icu_stays['DOD']) | (icu_stays['DEATHTIME'].notnull() & (icu_stays['ADMITTIME'] <= icu_stays['DEATHTIME']) & (icu_stays['DISCHTIME'] >= icu_stays['DEATHTIME']))
icu_stays['MORTALITY'] = mortality.astype(int)
return icu_stays
def save_data(X_train, X_test, y_train, y_test, notes):
X_train.to_csv('X_train.csv')
X_test.to_csv('X_test.csv')
y_train.to_csv('y_train.csv')
y_test.to_csv('y_test.csv')
notes.to_csv('clinical_notes.csv')
def unique_hadm(icu_stays, notes):
unique_hadm = np.unique(icu_stays['HADM_ID'])
notes = notes[notes['HADM_ID'] in unique_hadm]
return notes
def main():
patients = read_patients()
subj_id = np.unique(patients['SUBJECT_ID'])
events = read_events(subj_id)
print(events)
notes = read_notes()
admissions = read_admissions()
icu_stays = read_icustays()
icu_stays = filter_stays(icu_stays)
icu_stays = merge_tables(icu_stays, admissions, patients)
icu_stays = add_age_mortality(icu_stays)
icu_stays.to_csv('icu_stays')
labels = icu_stays['MORTALITY']
icu_stays = icu_stays.drop(['MORTALITY'], axis=1)
#notes = unique_hadm(icu_stays, notes)
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(icu_stays, labels, test_size=0.2, shuffle=True)
save_data(X_train, X_test, y_train, y_test, notes)
if __name__ == '__main__':
main()
|
from copy import deepcopy
import torch
import os, sys
import_path = os.path.abspath('.')
sys.path.insert(1, import_path)
from vg_neural_network import NeuralNetwork
from vg_game_state import GameState
from vg_logger import Logger
import logger
import coach
initial_state = GameState.initial(8, 8)
logger_ = Logger()
trainer = coach.Coach(initial_state, 1000, logger_)
batch_size = 1
mcts_iterations = 1
train_temperature = 2.
eval_temperature = 4.
iterations = 1
train_iterations = 1
eval_iterations = 1
current_best = NeuralNetwork([2, 8, 8], 8, torch.device('cpu'))
for _ in range(iterations):
new_contestant = deepcopy(current_best)
trainer.train(new_contestant, train_iterations, batch_size, train_temperature, mcts_iterations)
current_best = trainer.pit([current_best, new_contestant], eval_iterations) |
from scheme_runner import SchemeTestCase, Query, out
cases = [
SchemeTestCase(
[
Query(code=['(define (f (variadic x)) (cons 10 x))'], expected=out("f")),
Query(code="f", expected=out("(f (variadic x)) [parent = Global]")),
Query(code=['(f 2 3)'], expected=out("(10 2 3)")),
Query(code=['(variadic x)'], expected=out("Error")),
Query(code=['(variadic 2)'], expected=out("Error")),
Query(code=['(define (f . x) (cons 10 x))'], expected=out("f")),
Query(code=['(f 2 3)'], expected=out("(10 2 3)")),
Query(code=['. x'], expected=out("Error")),
Query(code=['\' . x'], expected=out("(variadic x)")),
Query(code=["'(1 . x)"], expected=out("(1 (variadic x))")),
])]
|
letters="ABCDEFGHIJKLMNOPQRSTUVWXYZ"
numbers="1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26"
pos=numbers.find (input("WRITE STUFF? "))
print(pos)
|
import numpy as np
import warnings
from pycompss.api.api import compss_delete_object
from pycompss.api.constraint import constraint
from pycompss.api.task import task
from dislib.data.array import Array, full, eye
from dislib.data.util import compute_bottom_right_shape, \
pad_last_blocks_with_zeros
from dislib.data.util.base import remove_last_rows, remove_last_columns
def qr(a: Array, mode='full', overwrite_a=False):
""" QR Decomposition (blocked).
Parameters
----------
a : ds-arrays
Input ds-array.
mode : string
Mode of the algorithm
'full' - computes full Q matrix of size m x m and R of size m x n
'economic' - computes Q of size m x n and R of size n x n
'r' - computes only R of size m x n
overwrite_a : bool
Overwriting the input matrix as R.
Returns
-------
q : ds-array
only for modes 'full' and 'economic'
r : ds-array
for all modes
Raises
------
ValueError
If m < n for the provided matrix m x n
or
If blocks are not square
or
If top left shape is different than regular
or
If bottom right block is different than regular
"""
_validate_ds_array(a)
if mode not in ['full', 'economic', 'r']:
raise ValueError("Unsupported mode: " + mode)
if mode == 'economic' and overwrite_a:
warnings.warn(
"The economic mode does not overwrite the original matrix. "
"Argument overwrite_a is changed to False.", UserWarning)
overwrite_a = False
a_obj = a if overwrite_a else a.copy()
padded_rows = 0
padded_cols = 0
bottom_right_shape = compute_bottom_right_shape(a_obj)
if bottom_right_shape != a_obj._reg_shape:
padded_rows = a_obj._reg_shape[0] - bottom_right_shape[0]
padded_cols = a_obj._reg_shape[1] - bottom_right_shape[1]
pad_last_blocks_with_zeros(a_obj)
if mode == "economic":
q, r = _qr_economic(a_obj)
_undo_padding_economic(q, r, padded_rows, padded_cols)
return q, r
elif mode == "full":
q, r = _qr_full(a_obj)
_undo_padding_full(q, r, padded_rows, padded_cols)
return q, r
elif mode == "r":
r = _qr_r(a_obj)
if padded_cols > 0:
remove_last_columns(r, padded_cols)
return r
ZEROS = 0
IDENTITY = 1
OTHER = 2
def _qr_full(r):
b_size = r._reg_shape
q, q_type = _gen_identity(
r.shape[0],
r.shape[0],
r._reg_shape,
r._n_blocks[0],
r._n_blocks[0]
)
r_type = full((r._n_blocks[0], r._n_blocks[1]), (1, 1), OTHER)
for i in range(r._n_blocks[1]):
act_q_type, act_q, r_type_block, r_block = _qr(
r._blocks[i][i], r_type._blocks[i][i], r._reg_shape, t=True
)
r_type.replace_block(i, i, r_type_block)
r.replace_block(i, i, r_block)
for j in range(r._n_blocks[0]):
q_type_block, q_block = _dot(
q._blocks[j][i],
q_type._blocks[j][i],
act_q,
act_q_type,
b_size,
transpose_b=True
)
q_type.replace_block(j, i, q_type_block)
q.replace_block(j, i, q_block)
for j in range(i + 1, r._n_blocks[1]):
r_type_block, r_block = _dot(
act_q,
act_q_type,
r._blocks[i][j],
r_type._blocks[i][j],
b_size
)
r_type.replace_block(i, j, r_type_block)
r.replace_block(i, j, r_block)
compss_delete_object(act_q_type)
compss_delete_object(act_q)
sub_q = [[np.array([0]), np.array([0])],
[np.array([0]), np.array([0])]]
sub_q_type = [[_type_block(OTHER), _type_block(OTHER)],
[_type_block(OTHER), _type_block(OTHER)]]
# Update values of the respective column
for j in range(i + 1, r._n_blocks[0]):
sub_q[0][0], sub_q[0][1], sub_q[1][0], sub_q[1][1], \
r_type_block1, r_block1, r_type_block2, r_block2 = _little_qr(
r._blocks[i][i],
r_type._blocks[i][i],
r._blocks[j][i],
r_type._blocks[j][i],
r._reg_shape,
transpose=True
)
r_type.replace_block(i, i, r_type_block1)
r.replace_block(i, i, r_block1)
r_type.replace_block(j, i, r_type_block2)
r.replace_block(j, i, r_block2)
# Update values of the row for the value updated in the column
for k in range(i + 1, r._n_blocks[1]):
[[r_type_block1], [r_type_block2]], \
[[r_block1], [r_block2]] = _multiply_blocked(
sub_q,
sub_q_type,
[[r._blocks[i][k]], [r._blocks[j][k]]],
[[r_type._blocks[i][k]], [r_type._blocks[j][k]]],
r._reg_shape
)
r_type.replace_block(i, k, r_type_block1)
r.replace_block(i, k, r_block1)
r_type.replace_block(j, k, r_type_block2)
r.replace_block(j, k, r_block2)
for k in range(r._n_blocks[0]):
[[q_type_block1, q_type_block2]], \
[[q_block1, q_block2]] = _multiply_blocked(
[[q._blocks[k][i], q._blocks[k][j]]],
[[q_type._blocks[k][i], q_type._blocks[k][j]]],
sub_q,
sub_q_type,
r._reg_shape,
transpose_b=True
)
q_type.replace_block(k, i, q_type_block1)
q.replace_block(k, i, q_block1)
q_type.replace_block(k, j, q_type_block2)
q.replace_block(k, j, q_block2)
compss_delete_object(sub_q[0][0])
compss_delete_object(sub_q[0][1])
compss_delete_object(sub_q[1][0])
compss_delete_object(sub_q[1][1])
return q, r
def _qr_r(r):
b_size = r._reg_shape
r_type = full((r._n_blocks[0], r._n_blocks[1]), (1, 1), OTHER)
for i in range(r._n_blocks[1]):
act_q_type, act_q, r_type_block, r_block = _qr(
r._blocks[i][i], r_type._blocks[i][i], r._reg_shape, t=True
)
r_type.replace_block(i, i, r_type_block)
r.replace_block(i, i, r_block)
for j in range(i + 1, r._n_blocks[1]):
r_type_block, r_block = _dot(
act_q,
act_q_type,
r._blocks[i][j],
r_type._blocks[i][j],
b_size
)
r_type.replace_block(i, j, r_type_block)
r.replace_block(i, j, r_block)
compss_delete_object(act_q_type)
compss_delete_object(act_q)
sub_q = [[np.array([0]), np.array([0])],
[np.array([0]), np.array([0])]]
sub_q_type = [[_type_block(OTHER), _type_block(OTHER)],
[_type_block(OTHER), _type_block(OTHER)]]
# Update values of the respective column
for j in range(i + 1, r._n_blocks[0]):
sub_q[0][0], sub_q[0][1], sub_q[1][0], sub_q[1][1], \
r_type_block1, r_block1, r_type_block2, r_block2 = _little_qr(
r._blocks[i][i],
r_type._blocks[i][i],
r._blocks[j][i],
r_type._blocks[j][i],
r._reg_shape,
transpose=True
)
r_type.replace_block(i, i, r_type_block1)
r.replace_block(i, i, r_block1)
r_type.replace_block(j, i, r_type_block2)
r.replace_block(j, i, r_block2)
# Update values of the row for the value updated in the column
for k in range(i + 1, r._n_blocks[1]):
[[r_type_block1], [r_type_block2]], \
[[r_block1], [r_block2]] = _multiply_blocked(
sub_q,
sub_q_type,
[[r._blocks[i][k]], [r._blocks[j][k]]],
[[r_type._blocks[i][k]], [r_type._blocks[j][k]]],
r._reg_shape
)
r_type.replace_block(i, k, r_type_block1)
r.replace_block(i, k, r_block1)
r_type.replace_block(j, k, r_type_block2)
r.replace_block(j, k, r_block2)
compss_delete_object(sub_q[0][0])
compss_delete_object(sub_q[0][1])
compss_delete_object(sub_q[1][0])
compss_delete_object(sub_q[1][1])
return r
def _qr_economic(r):
a_shape = (r.shape[0], r.shape[1])
a_n_blocks = (r._n_blocks[0], r._n_blocks[1])
b_size = r._reg_shape
q, q_type = _gen_identity(
r.shape[0],
a_shape[1],
b_size,
r._n_blocks[0],
r._n_blocks[1]
)
r_type = full((r._n_blocks[0], r._n_blocks[1]), (1, 1), OTHER)
act_q_list = []
sub_q_list = {}
for i in range(a_n_blocks[1]):
act_q_type, act_q, r_type_block, r_block = _qr(
r._blocks[i][i], r_type._blocks[i][i], b_size, t=True
)
r_type.replace_block(i, i, r_type_block)
r.replace_block(i, i, r_block)
act_q_list.append((act_q_type, act_q))
for j in range(i + 1, a_n_blocks[1]):
r_type_block, r_block = _dot(
act_q,
act_q_type,
r._blocks[i][j],
r_type._blocks[i][j],
b_size
)
r_type.replace_block(i, j, r_type_block)
r.replace_block(i, j, r_block)
# Update values of the respective column
for j in range(i + 1, r._n_blocks[0]):
sub_q = [[np.array([0]), np.array([0])],
[np.array([0]), np.array([0])]]
sub_q_type = [[_type_block(OTHER), _type_block(OTHER)],
[_type_block(OTHER), _type_block(OTHER)]]
sub_q[0][0], sub_q[0][1], sub_q[1][0], sub_q[1][1], \
r_type_block1, r_block1, \
r_type_block2, r_block2 = _little_qr(
r._blocks[i][i], r_type._blocks[i][i],
r._blocks[j][i], r_type._blocks[j][i],
b_size, transpose=True
)
r_type.replace_block(i, i, r_type_block1)
r.replace_block(i, i, r_block1)
r_type.replace_block(j, i, r_type_block2)
r.replace_block(j, i, r_block2)
sub_q_list[(j, i)] = (sub_q_type, sub_q)
# Update values of the row for the value updated in the column
for k in range(i + 1, a_n_blocks[1]):
[[r_type_block1], [r_type_block2]], \
[[r_block1], [r_block2]] = _multiply_blocked(
sub_q,
sub_q_type,
[[r._blocks[i][k]], [r._blocks[j][k]]],
[[r_type._blocks[i][k]], [r_type._blocks[j][k]]],
b_size
)
r_type.replace_block(i, k, r_type_block1)
r.replace_block(i, k, r_block1)
r_type.replace_block(j, k, r_type_block2)
r.replace_block(j, k, r_block2)
for i in reversed(range(len(act_q_list))):
for j in reversed(range(i + 1, r._n_blocks[0])):
for k in range(q._n_blocks[1]):
[[q_type_block1], [q_type_block2]], \
[[q_block1], [q_block2]] = _multiply_blocked(
sub_q_list[(j, i)][1],
sub_q_list[(j, i)][0],
[[q._blocks[i][k]], [q._blocks[j][k]]],
[[q_type._blocks[i][k]], [q_type._blocks[j][k]]],
b_size,
transpose_a=True
)
q_type.replace_block(i, k, q_type_block1)
q.replace_block(i, k, q_block1)
q_type.replace_block(j, k, q_type_block2)
q.replace_block(j, k, q_block2)
compss_delete_object(sub_q_list[(j, i)][0][0])
compss_delete_object(sub_q_list[(j, i)][0][1])
compss_delete_object(sub_q_list[(j, i)][1][0])
compss_delete_object(sub_q_list[(j, i)][1][1])
del sub_q_list[(j, i)]
for k in range(q._n_blocks[1]):
q_type_block, q_block = _dot(
act_q_list[i][1],
act_q_list[i][0],
q._blocks[i][k],
q_type._blocks[i][k],
b_size,
transpose_a=True
)
q_type.replace_block(i, k, q_type_block)
q.replace_block(i, k, q_block)
compss_delete_object(act_q_list[i][0])
compss_delete_object(act_q_list[i][1])
# removing last rows of r to make it n x n instead of m x n
remove_last_rows(r, r.shape[0] - r.shape[1])
return q, r
def _undo_padding_full(q, r, n_rows, n_cols):
if n_rows > 0:
remove_last_rows(q, n_rows)
remove_last_columns(q, n_rows)
if n_cols > 0:
remove_last_columns(r, n_cols)
remove_last_rows(r, max(r.shape[0] - q.shape[1], 0))
def _undo_padding_economic(q, r, n_rows, n_cols):
if n_rows > 0:
remove_last_rows(q, n_rows)
if n_cols > 0:
remove_last_columns(r, n_cols)
remove_last_rows(r, n_cols)
remove_last_columns(q, n_cols)
def _validate_ds_array(a: Array):
if a._n_blocks[0] < a._n_blocks[1]:
raise ValueError("m > n is required for matrices m x n")
if a._reg_shape[0] != a._reg_shape[1]:
raise ValueError("Square blocks are required")
if a._reg_shape != a._top_left_shape:
raise ValueError(
"Top left block needs to be of the same shape as regular ones"
)
def _split_matrix(a, m_size):
b_size = int(len(a) / m_size)
split_matrix = [[None for m in range(m_size)] for m in range(m_size)]
for i in range(m_size):
for j in range(m_size):
split_matrix[i][j] = a[i * b_size:(i + 1) * b_size,
j * b_size:(j + 1) * b_size]
return split_matrix
def _gen_identity(n, m, b_size, n_size, m_size):
a = eye(n, m, b_size, dtype=None)
aux_a = eye(n_size, m_size, (1, 1), dtype=np.uint8)
return a, aux_a
@constraint(computing_units="${ComputingUnits}")
@task(returns=np.array)
def _dot_task(a, b, transpose_result=False, transpose_a=False,
transpose_b=False):
if transpose_a:
a = np.transpose(a)
if transpose_b:
b = np.transpose(b)
if transpose_result:
return np.transpose(np.dot(a, b))
return np.dot(a, b)
@constraint(computing_units="${ComputingUnits}")
@task(returns=(np.array, np.array))
def _qr_task(a, a_type, b_size, mode='reduced', t=False):
from numpy.linalg import qr
if a_type[0, 0] == OTHER:
q, r = qr(a, mode=mode)
elif a_type[0, 0] == ZEROS:
q, r = qr(np.zeros(b_size), mode=mode)
else:
q, r = qr(np.identity(max(b_size)), mode=mode)
if t:
q = np.transpose(q)
return q, r
def _qr(a, a_type, b_size, mode='reduced', t=False):
q_aux, r_aux = _qr_task(a, a_type, b_size, mode=mode, t=t)
return _type_block(OTHER), q_aux, _type_block(OTHER), r_aux
def _type_block(value):
return np.full((1, 1), value, np.uint8)
def _empty_block(shape):
return np.full(shape, 0, dtype=np.uint8)
@constraint(computing_units="${ComputingUnits}")
@task(returns=(np.array, np.array))
def _dot(a, a_type, b, b_type, b_size, transpose_result=False,
transpose_a=False, transpose_b=False):
if a_type[0][0] == ZEROS:
return _type_block(ZEROS), _empty_block(b_size)
if a_type[0][0] == IDENTITY:
if transpose_b and transpose_result:
return b_type, b
if transpose_b or transpose_result:
return _transpose_block(b, b_type)
return b_type, b
if b_type[0][0] == ZEROS:
return _type_block(ZEROS), _empty_block(b_size)
if b_type[0][0] == IDENTITY:
if transpose_a:
a_type, a = _transpose_block(a, a_type)
if transpose_result:
return _transpose_block(a, a_type)
return a_type, a
result = _dot_task(
a,
b,
transpose_result=transpose_result,
transpose_a=transpose_a,
transpose_b=transpose_b
)
return _type_block(OTHER), result
@constraint(computing_units="${ComputingUnits}")
@task(returns=(np.array, np.array, np.array, np.array, np.array, np.array))
def _little_qr_task(a, type_a, b, type_b, b_size, transpose=False):
regular_b_size = b_size[0]
ent_a = [type_a, a]
ent_b = [type_b, b]
for mat in [ent_a, ent_b]:
if mat[0] == ZEROS:
mat[1] = np.zeros(b_size)
elif mat[0] == IDENTITY:
mat[1] = np.identity(regular_b_size)
curr_a = np.bmat([[ent_a[1]], [ent_b[1]]])
(sub_q, sub_r) = np.linalg.qr(curr_a, mode='complete')
aa = sub_r[0:regular_b_size]
bb = sub_r[regular_b_size:2 * regular_b_size]
sub_q = _split_matrix(sub_q, 2)
if transpose:
return np.transpose(sub_q[0][0]), np.transpose(sub_q[1][0]), \
np.transpose(sub_q[0][1]), np.transpose(sub_q[1][1]), aa, bb
else:
return sub_q[0][0], sub_q[0][1], sub_q[1][0], sub_q[1][1], aa, bb
def _little_qr(a, type_a, b, type_b, b_size, transpose=False):
sub_q00, sub_q01, sub_q10, sub_q11, aa, bb = _little_qr_task(
a,
type_a,
b,
type_b,
b_size,
transpose
)
return sub_q00, sub_q01, sub_q10, sub_q11, \
_type_block(OTHER), aa, _type_block(OTHER), bb
@constraint(computing_units="${ComputingUnits}")
@task(returns=(np.array, np.array))
def _multiply_single_block_task(a, type_a, b, type_b, c, type_c, b_size,
transpose_a=False, transpose_b=False):
if type_a[0][0] == ZEROS or type_b[0][0] == ZEROS:
return type_c, c
fun_a = [type_a, a]
fun_b = [type_b, b]
if type_c[0][0] == ZEROS:
c = np.zeros((b_size[0], b_size[1]))
elif type_c[0][0] == IDENTITY:
c = np.identity(b_size[0])
if fun_a[0][0][0] == IDENTITY:
if fun_b[0][0][0] == IDENTITY:
fun_b[1] = np.identity(b_size[0])
if transpose_b:
aux = np.transpose(fun_b[1])
else:
aux = fun_b[1]
c += aux
return _type_block(OTHER), c
if fun_b[0][0][0] == IDENTITY:
if transpose_a:
aux = np.transpose(fun_a[1])
else:
aux = fun_a[1]
c += aux
return _type_block(OTHER), c
if transpose_a:
fun_a[1] = np.transpose(fun_a[1])
if transpose_b:
fun_b[1] = np.transpose(fun_b[1])
c += (fun_a[1].dot(fun_b[1]))
return _type_block(OTHER), c
def _multiply_single_block(a, type_a, b, type_b, c, type_c, b_size,
transpose_a=False, transpose_b=False):
return _multiply_single_block_task(a,
type_a,
b,
type_b,
c,
type_c,
b_size,
transpose_a=transpose_a,
transpose_b=transpose_b
)
def _multiply_blocked(a, type_a, b, type_b, b_size, transpose_a=False,
transpose_b=False):
if transpose_a:
new_a = []
for i in range(len(a[0])):
new_a.append([])
for j in range(len(a)):
new_a[i].append(a[j][i])
a = new_a
new_a_type = []
for i in range(len(type_a[0])):
new_a_type.append([])
for j in range(len(type_a)):
new_a_type[i].append(type_a[j][i])
type_a = new_a_type
if transpose_b:
new_b = []
for i in range(len(b[0])):
new_b.append([])
for j in range(len(b)):
new_b[i].append(b[j][i])
b = new_b
new_b_type = []
for i in range(len(type_b[0])):
new_b_type.append([])
for j in range(len(type_b)):
new_b_type[i].append(type_b[j][i])
type_b = new_b_type
c = []
type_c = []
for i in range(len(a)):
c.append([])
type_c.append([])
for j in range(len(b[0])):
c[i].append(_empty_block(b_size))
type_c[i].append(_type_block(ZEROS))
for k in range(len(a[0])):
type_c[i][j], c[i][j] = _multiply_single_block(
a[i][k], type_a[i][k],
b[k][j], type_b[k][j],
c[i][j], type_c[i][j],
b_size, transpose_a=transpose_a, transpose_b=transpose_b)
return type_c, c
def _transpose_block(a, a_type):
if a_type[0][0] == ZEROS or a_type[0][0] == IDENTITY:
return a_type, a
return _type_block(OTHER), np.transpose(a)
|
#!/usr/bin/env python3
import math
def solution(A, B, K):
"""
Returns the number of integers within the range [A..B] that are divisible by K
"""
first = math.ceil(A / K) * K
if first > B:
return 0
last = math.floor(B / K) * K
return ((last - first) // K) + 1
|
from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="ogsolar", # The python module name.
version="4.6", # The version of the module.
author="Paul Austen", # The name of the module author.
author_email="pausten.os@gmail.com", # The email address of the author.
description="A Raspberry PI application for controlling EPEver Tracer off grid solar installations.", # A short description of the module.
long_description="", # The long description is contained in the README.md file.
long_description_content_type="text/markdown",
packages=find_packages(),
include_package_data=True,
license="MIT License", # The License that the module is distributed under
url="https://github.com/pjaos/rpi_ogsolar", # The home page for the module
install_requires=[
['p3lib>=1.1.28','tornado>=6.1','pymodbus>=2.5.2'], # A python list of required module dependencies (optionally including versions)
],
scripts=['scripts/ogsolar','scripts/tracer'], # A list of command line startup scripts to be installed.
)
|
class SettingsDict(dict):
def __getattr__(self, name):
if name not in self:
raise AttributeError
return self[name]
def __setitem__(self, key, value):
raise AttributeError('SettingsDict properties are immutable')
def _hash_key(self):
keys_and_values = []
for key, value in self.items():
if isinstance(value, set):
value = frozenset(value)
keys_and_values.append("%s %s" % (key, hash(value)))
return frozenset(keys_and_values)
def __hash__(self):
return hash(self._hash_key())
def __eq__(self, other):
return hash(self) == hash(other)
|
'''
Created on Apr 12, 2011
@author: Yuri Corilo
'''
class Constants():
atomic_exact_masses = {'H': 1.007825032239,
'D': 2.0141017778,
'T': 3.0160492777,
'He': 4.00260325415,
'6Li': 6.015122795,
'7Li': 7.01600455,
'Be': 9.0121822,
'10B': 10.012900352,
'11B': 11.00930977,
'C': 12,
'13C': 13.0033548378,
'N': 14.00307400443,
'15N': 15.0001088982,
'O': 15.9949146195717,
'18O': 17.9991610,
'F': 18.99840322,
'20Ne': 19.9924401754,
'22Ne': 21.991385114,
'Na': 22.9897692809,
'24Mg': 23.985041700,
'25Mg': 24.98583692,
'26Mg': 25.982592929,
'Al': 26.98153863,
'28Si': 27.9769265325,
'29Si': 28.976494700,
'30Si': 29.97377017,
'P': 30.97376163,
'S': 31.972071174414,
'34S': 32.971458909815,
'35Cl': 34.96885268,
'37Cl': 36.96590259,
'Ar': 39.9623831225,
'39K': 38.96370668,
'41K': 40.96182576,
'40Ca': 39.96259098,
'44Ca': 43.9554818,
'Sc': 44.9559119,
'46Ti': 45.9526316,
'47Ti': 46.9517631,
'48Ti': 47.9479463,
'49Ti': 48.9478700,
'50Ti': 49.9447912,
'50V': 49.9471585,
'51V': 50.9439595,
'52Cr': 51.9405075,
'53Cr': 52.9406494,
'50Cr': 49.9460442,
'54Cr': 53.9388804,
'Mn': 54.9380451,
'56Fe': 55.9349375,
'54Fe': 53.9396105,
'57Fe': 56.9353940,
'Co': 58.9331950,
'58Ni': 57.9353429,
'60Ni': 59.9307864,
'61Ni': 60.9310560,
'62Ni': 61.9283451,
'63Cu': 62.9295975,
'65Cu': 64.9277895,
'64Zn': 63.9291422,
'66Zn': 65.9260334,
'67Zn': 66.9271273,
'68Zn': 67.9248442,
'69Ga': 68.9255736,
'71Ga': 70.9247013,
'70Ge': 69.9242474,
'72Ge': 71.9220758,
'73Ge': 72.9234589,
'74Ge': 73.9211778,
'76Ge': 75.9214026,
'As': 74.9215965,
'76Se': 75.9192136,
'77Se': 76.9199140,
'78Se': 77.9173091,
'80Se': 79.9165213,
'82Se': 81.9166994,
'79Br': 78.9183371,
'80Br': 80.9162906,
'80Kr': 79.9163790,
'82Kr': 81.9134836,
'83Kr': 82.914136,
'84Kr': 83.911507,
'86Kr': 85.91061073,
'85Rb': 84.91178973,
'87Rb': 86.909180527,
'86Sr': 85.9092602,
'87Sr': 86.9088771,
'88Sr': 87.9056121,
'Y': 88.9058483,
'90Zr': 89.9047044,
'91Zr': 90.9056458,
'92Zr': 91.9050408,
'94Zr': 93.9063152,
'96Zr': 95.9082734,
'Nb': 92.9063781,
'92Mo': 91.906811,
'94Mo': 93.9050883,
'95Mo': 94.9058421,
'96Mo': 95.9046795,
'97Mo': 96.9060215,
'98Mo': 97.9054082,
'100Mo': 99.907477,
'Tc': 97.907216,
'96Ru': 95.907598,
'98Ru': 97.905287,
'99Ru': 98.9059393,
'100Ru': 99.9042195,
'101Ru': 100.9055821,
'102Ru': 101.9043493,
'104Ru': 103.905433,
'Rh': 102.905504,
'102Pd': 101.905609,
'104Pd': 103.904036,
'105Pd': 104.905085,
'106Pd': 105.903486,
'108Pd': 107.903892,
'110Pd': 109.905153,
'107Ag': 106.905097,
'109Ag': 108.904752,
'106Cd': 105.906459,
'108Cd': 107.904184,
'110Cd': 109.9030021,
'111Cd': 110.9041781,
'112Cd': 111.9027578,
'113Cd': 112.9044017,
'114Cd': 113.9033585,
'116Cd': 115.904756,
'113In': 112.904058,
'115In': 114.903878,
'112Sn': 111.904818,
'114Sn': 113.902779,
'115Sn': 114.903342,
'116Sn': 115.901741,
'117Sn': 116.902952,
'118Sn': 117.901603,
'119Sn': 118.903308,
'120Sn': 119.9021947,
'122Sn': 121.9034390,
'124Sn': 123.9052739,
'121Sb': 120.9038157,
'123Sb': 122.9042140,
'120Te': 119.904020,
'122Te': 121.9030439,
'123Te': 122.9042700,
'124Te': 123.9028179,
'125Te': 124.9044307,
'126Te': 125.9033117,
'128Te': 127.9044631,
'130Te': 129.9062244,
'I': 126.904473,
'124Xe': 123.9058930,
'126Xe': 125.904274,
'128Xe': 127.9035313,
'129Xe': 128.9047794,
'130Xe': 129.9035080,
'131Xe': 130.9050824,
'132Xe': 131.9041535,
'134Xe': 133.9053945,
'136Xe': 135.907219,
'Cs': 132.905451933,
'130Ba': 129.9063208,
'132Ba': 131.9050613,
'134Ba': 133.9045084,
'135Ba': 134.9056886,
'136Ba': 135.9045759,
'137Ba': 136.9058274,
'138Ba': 137.9052472,
'La': 138.9063533,
'174Hf': 173.940046,
'176Hf': 175.9414086,
'177Hf': 176.9432207,
'178Hf': 177.9436988,
'179Hf': 178.9458161,
'180Hf': 179.9465500,
'Ta': 180.9479958,
'180W': 179.946704,
'182W': 181.9482042,
'183W': 182.9502230,
'184W': 183.9509312,
'186W': 185.9543641,
'185Re': 184.9529550,
'187Re': 186.9557531,
'184Os': 183.9524891,
'186Os': 185.9538382,
'187Os': 186.9557505,
'188Os': 187.9558382,
'189Os': 188.9581475,
'190Os': 189.9584470,
'192Os': 191.9614807,
'191Ir': 190.9605940,
'192Ir': 192.9629264,
'190Pt': 189.959932,
'192Pt': 191.9610380,
'194Pt': 193.9626803,
'195Pt': 194.9647911,
'196Pt': 195.9649515,
'198Pt': 197.967893,
'Au': 196.9665687,
'196Hg': 195.965833,
'198Hg': 197.9667690,
'199Hg': 198.9682799,
'200Hg': 199.9683260,
'201Hg': 200.9703023,
'202Hg': 201.9706430,
'204Hg': 203.9734939,
'203Tl': 202.9723442,
'205Tl': 204.9744275,
'204Pb': 203.9730436,
'206Pb': 205.9744653,
'207Pb': 206.9758969,
'208Pb': 207.9766521,
'Bi': 208.9803987,
'Po': 208.9824304,
'At': 209.987148,
'Rn': 222.0175777,
'Fr': 223.0197359,
'Ra': 226.0254098,
'Ac': 227.0277521}
massa_eletron = 0.000548579909016
Valence = {'C': 4,
'13C': 4,
'N': 3,
'O': 2,
'S': 2,
'H': 1,
'F': (0, 1),
'Cl': (0, 1),
'Br': (0, 1),
'I': (0, 1),
'At': 1,
'Li': (0, 1),
'Na': (0, 1),
'K': (0, 1),
'Rb': 1,
'Cs': 1,
'Fr': 1,
'B': (4, 3, 2, 1),
'In': (3, 2, 1),
'Al': (3, 1, 2),
'P': (3, 5, 4, 2, 1),
'Ga': (3, 1, 2),
'Mg': (2, 1),
'Be': (2, 1),
'Ca': (2, 1),
'Sr': (2, 1),
'Ba': 2,
'Ra': 2,
'V': (5, 4, 3, 2, 1),
'Fe': (3, 2, 4, 5, 6),
'Si': (4, 3, 2),
'Sc': (3, 2, 1),
'Ti': (4, 3, 2, 1),
'Cr': (1, 2, 3, 4, 5, 6),
'Mn': (1, 2, 3, 4, 5, 6, 7),
'Co': (1, 2, 3, 4, 5),
'Ni': (1, 2, 3, 4),
'Cu': (2, 1, 3, 4),
'Zn': (2, 1),
'Ge': (4, 3, 2, 1),
'As': (5, 3, 2, 1),
'Se': (6, 4, 2, 1),
'Y': (3, 2, 1),
'Zr': (4, 3, 2, 1),
'Nb': (5, 4, 3, 2, 1),
'Mo': (6, 5, 4, 3, 2, 1),
'Tc': (7, 6, 5, 4, 3, 2, 1),
'Ru': (8, 7, 6, 5, 4, 3, 2, 1),
'Rh': (6, 5, 4, 3, 2, 1),
'Pd': (4, 2, 1),
'Ag': (0, 1, 2, 3, 4),
'Cd': (2, 1),
'Sn': (4, 2),
'Sb': (5, 3),
'Te': (6, 5, 4, 2),
'La': (3, 2),
'Hf': (4, 3, 2),
'Ta': (5, 4, 3, 2),
'W': (6, 5, 4, 3, 2, 1),
'Re': (4, 7, 6, 5, 3, 2, 1),
'Os': (4, 8, 7, 6, 5, 3, 2, 1),
'Ir': (4, 8, 6, 5, 3, 2, 1),
'Pt': (4, 6, 5, 3, 2, 1),
'Au': (3, 5, 2, 1),
'Hg': (1, 2, 4),
'Tl': (3, 1),
'Pb': (4, 2),
'Bi': (3, 1, 5),
'Po': (2, 4, 6),
'Ac': (3, 2)
}
Isotopes = {'F': ['Flourine'],
'Na': ['Sodium'],
'Al': ['Aluminum'],
'P': ['Phosphorus'],
'Sc': ['Scandium'],
'Co': ['Cobalt'],
'He': ['Helium'],
'Ar': ['Argon'],
'H': ('Hydrogen', ('D', 'T')),
'Cl': ('Chlorine', ('35Cl', '37Cl')),
'Li': ('Lithium', ('7Li', '6Li')),
'Be': ['Beryllium'],
'B': ('Boron', ('11B', '10B')),
'C': ('Carbon', ['13C']),
'O': ('Oxygen', ['18O']),
'S': ('Sulfur', ['34S']),
'N': ('Nitrogen', ['15N']),
'V': ('Vanadium', ('51V', '50V')),
'Ne': ('Neon', ('20Ne', '22Ne')),
'Mg': ('Magnesium', ('24Mg', '26Mg', '25Mg')),
'Si': ('Silicon', ('28Si', '29Si', '30Si')),
'K': ('Potassium', ('39K', '41K')),
'Ca': ('Calcium', ('40Ca', '44Ca')),
'Ti': ('Titanium', ('48Ti', '46Ti', '47Ti', '49Ti', '50Ti')),
'Cr': ('Chromium', ('52Cr', '53Cr', '50Cr', '54Cr')),
'Fe': ('Iron', ('56Fe', '54Fe', '57Fe')),
'Mn': ['Manganese'],
'Ni': ('Nickel', ('58Ni', '60Ni', '62Ni', '61Ni')),
'Cu': ('Copper', ('63Cu', '65Cu')),
'Zn': ('Zinc', ('64Zn', '66Zn', '68Zn', '67Zn')),
'Ga': ('Gallium', ('69Ga', '71Ga')),
'Ge': ('Germanium', ('74Ge', '72Ge', '70Ge', '73Ge', '76Ge')),
'As': ['Arsenic'],
'Se': ('Selenium', ('80Se', '78Se', '76Se', '82Se', '77Se')),
'Br': ('Bromine', ('79Br', '80Br')),
'Kr': ('Krypton', ('84Kr', '86Kr', '82Kr', '83Kr', '80Kr')),
'Rb': ('Rubidium', ('85Rb', '87Rb')),
'Sr': ('Strontium', ('88Sr', '86Sr', '87Sr')),
'Y': ['Yttrium'],
'Zr': ('Zironium', ('90Zr', '94Zr', '92Zr', '91Zr', '96Zr')),
'Nb': ['Niobium'],
'Mo': ('Molybdenum', ('98Mo', '96Mo', '95Mo', '92Mo', '100Mo', '97Mo', '94Mo')),
'Tc': ['Technetium'],
'Ru': ('Ruthenium', ('102Ru', '104Ru', '101Ru', '99Ru', '100Ru', '96Ru', '98Ru')),
'Rh': ['Rhodium'],
'Pd': ('Palladium', ('106Pd', '108Pd', '105Pd', '110Pd', '104Pd', '102Pd')),
'Ag': ('Silver', ('107Ag', '109Ag')),
'Cd': ('Cadmium', ('114Cd', '112Cd', '111Cd', '110Cd', '113Cd', '116Cd', '106Cd', '108Cd')),
'In': ('Indium', ('115In', '113In')),
'Sn': ('Tin', ('120Sn', '118Sn', '116Sn', '119Sn', '117Sn', '124Sn', '122Sn', '112Sn')),
'Sb': ('Antimony', ('121Sb', '123Sb')),
'Te': ('Tellurium', ('130Te', '128Te', '126Te', '125Te', '124Te', '122Te')),
'I': ['Iodine'],
'Xe': ('Xenon', ('132Xe', '129Xe', '131Xe', '134Xe', '136Xe', '130Xe', '128Xe')),
'Cs': ['Cesium'],
'Ba': ('Barium', ('138Ba', '137Ba', '136Ba', '135Ba', '134Ba')),
'La': ['Lanthanum'],
'Hf': ('Hafnium', ('180Hf', '178Hf', '177Hf', '179Hf', '176Hf')),
'Ta': ['Tantalum'],
'W': ('Tungsten', ('184W', '186W', '182W', '183W')),
'Re': ('Rhenium', ('187Re', '185Re')),
'Os': ('Osmium', ('192OS', '190Os', '189Os', '188Os', '187Os', '186Os')),
'Ir': ('Iridium', ('193Ir', '191Ir')),
'Pt': ('Platinum', ('195Pt', '194Pt', '196Pt', '198Pt', '192Pt')),
'Au': ['Gold'],
'Hg': ('Mercury', ('202Hg', '200Hg', '199Hg', '201Hg', '198Hg', '204Hg')),
'Tl': ('Thallium', ('205Tl', '203Tl')),
'Pb': ('Lead', ('208Pb', '206Pb', '207Pb', '204Pb')),
'Bi': ['Bismuth'],
'Po': ['Polonium'],
'At': ['Aslatine'],
'Rn': ['Radon'],
'Fr': ['Francium'],
'Ra': ['Radium'],
'Ac': ['Actinium']
}
|
import configparser
# CONFIG
config = configparser.ConfigParser()
config.read('dwh.cfg')
# DROP TABLES
staging_events_table_drop = "DROP TABLE IF EXISTS staging_events;"
staging_songs_table_drop = "DROP TABLE IF EXISTS staging_songs;"
songplay_table_drop = "DROP TABLE IF EXISTS songplays;"
user_table_drop = "DROP TABLE IF EXISTS users;"
song_table_drop = "DROP TABLE IF EXISTS songs;"
artist_table_drop = "DROP TABLE IF EXISTS artists;"
time_table_drop = "DROP TABLE IF EXISTS time;"
# CREATE TABLES
staging_events_table_create= ("""
CREATE TABLE IF NOT EXISTS staging_events (
artist_name VARCHAR,
auth VARCHAR(10),
user_first_name VARCHAR,
user_gender CHAR(1),
item_in_session INTEGER,
user_last_name VARCHAR,
song_duration NUMERIC(9,5),
user_membership_level CHAR(4),
artist_location VARCHAR,
method CHAR(3),
page VARCHAR(16),
registration BIGINT,
session_id INTEGER,
song_title VARCHAR,
status CHAR(3),
ts BIGINT,
user_agent VARCHAR,
user_id INTEGER
);
""")
staging_songs_table_create = ("""
CREATE TABLE IF NOT EXISTS staging_songs (
num_songs INTEGER,
artist_id CHAR(18),
artist_latitude NUMERIC(8,5),
artist_longitude NUMERIC(8,5),
artist_location VARCHAR,
artist_name VARCHAR,
song_id CHAR(18),
title VARCHAR,
duration NUMERIC(9,5),
year INTEGER
);
""")
songplay_table_create = ("""
CREATE TABLE IF NOT EXISTS songplays (
songplay_id INT IDENTITY(0,1) PRIMARY KEY,
start_time timestamp NOT NULL REFERENCES time(start_time),
user_id INTEGER NOT NULL REFERENCES users(user_id),
user_membership_level CHAR(4),
song_id CHAR(18) NOT NULL REFERENCES songs(song_id),
artist_id CHAR(18) NOT NULL REFERENCES artists(artist_id),
session_id INTEGER,
location VARCHAR,
user_agent VARCHAR
);
""")
user_table_create = ("""
CREATE TABLE IF NOT EXISTS users (
user_id INTEGER PRIMARY KEY,
first_name VARCHAR,
last_name VARCHAR,
gender CHAR(1),
membership_level CHAR(4)
);
""")
song_table_create = ("""
CREATE TABLE IF NOT EXISTS songs (
song_id CHAR(18) PRIMARY KEY,
title VARCHAR,
artist_id CHAR(18) NOT NULL REFERENCES artists(artist_id),
year INTEGER,
duration NUMERIC(9,5)
);
""")
artist_table_create = ("""
CREATE TABLE IF NOT EXISTS artists (
artist_id CHAR(18) PRIMARY KEY,
name VARCHAR,
location VARCHAR,
latitude NUMERIC(8,5),
longitude NUMERIC(8,5)
);
""")
time_table_create = ("""
CREATE TABLE IF NOT EXISTS time (
start_time timestamp PRIMARY KEY,
hour INTEGER,
day INTEGER,
week INTEGER,
month INTEGER,
year INTEGER,
weekday INTEGER
);
""")
# STAGING TABLES
staging_events_copy = (f"""
copy staging_events
from {config.get('S3', 'LOG_DATA')}
iam_role {config.get('IAM_ROLE','ARN')}
format as json {config.get('S3','LOG_JSONPATH')}
""")
staging_songs_copy = (f"""
copy staging_songs
from {config.get('S3', 'SONG_DATA')}
iam_role {config.get('IAM_ROLE','ARN')}
format as json 'auto'
""")
# FINAL TABLES
songplay_table_insert = ("""
INSERT INTO SONGPLAYS (
start_time,
user_id,
user_membership_level,
song_id,
artist_id,
session_id,
location,
user_agent
)
SELECT
TIMESTAMP 'epoch' + se.ts/1000 * interval '1 second',
user_id,
user_membership_level,
song_id,
artist_id,
session_id,
se.artist_location,
user_agent
FROM staging_events AS se
JOIN staging_songs AS ss
ON se.song_title = ss.title AND se.artist_name = ss.artist_name AND se.song_duration = ss.duration
WHERE page = 'NextSong';
""")
user_table_insert = ("""
INSERT INTO users (
SELECT DISTINCT user_id AS uid, user_first_name, user_last_name, user_gender, user_membership_level
FROM staging_events
WHERE user_id IS NOT NULL AND page = 'NextSong' AND ts = (SELECT MAX(ts) FROM staging_events WHERE user_id = uid AND page = 'NextSong')
);
""")
song_table_insert = ("""
INSERT INTO songs (
SELECT song_id, title, artist_id, year, duration
FROM staging_songs
WHERE song_id IS NOT NULL
);
""")
artist_table_insert = ("""
INSERT INTO artists (
SELECT DISTINCT artist_id AS aid, artist_name, artist_location, artist_latitude, artist_longitude
FROM staging_songs
WHERE artist_id IS NOT NULL AND year = (SELECT MAX(year) FROM staging_songs WHERE artist_id = aid)
);
""")
time_table_insert = ("""
INSERT INTO time (
SELECT
DISTINCT timestamp 'epoch' + ts/1000 * interval '1 second' AS start_time,
EXTRACT(hour FROM start_time),
EXTRACT(day FROM start_time),
EXTRACT(week FROM start_time),
EXTRACT(month FROM start_time),
EXTRACT(year FROM start_time),
EXTRACT(weekday FROM start_time)
FROM staging_events
WHERE ts IS NOT NULL
);
""")
# QUERY LISTS
create_table_queries = [staging_events_table_create, staging_songs_table_create, time_table_create, user_table_create, artist_table_create, song_table_create, songplay_table_create]
drop_table_queries = [staging_events_table_drop, staging_songs_table_drop, songplay_table_drop, user_table_drop, song_table_drop, artist_table_drop, time_table_drop]
copy_table_queries = [staging_events_copy, staging_songs_copy]
insert_table_queries = [songplay_table_insert, user_table_insert, song_table_insert, artist_table_insert, time_table_insert]
|
from datetime import timedelta
from ebedke.utils.date import days_lower, on_workdays
from ebedke.utils.text import pattern_slice
from ebedke.utils.http import get_dom
from ebedke.pluginmanager import EbedkePlugin
URL_ROOT = "http://stexhaz.hu/index.php/hu/etl/deli-ajanlat"
@on_workdays
def get_menu(today):
dom = get_dom(URL_ROOT)
menu = dom.xpath("/html/body//article//text()")
menu = "".join(menu).replace("hétfőig", "").replace("csütörtökétől", "").splitlines()
menu = pattern_slice(menu, [days_lower[today.weekday()]], days_lower + ['ára', 'előfizetés', 'ajánlat'], inclusive=False)
return list(menu)
plugin = EbedkePlugin(
enabled=True,
groups=["corvin"],
name='Stex',
id="st",
url=URL_ROOT,
downloader=get_menu,
ttl=timedelta(minutes=25),
cards=['szep', 'erzs'],
coord=(47.489313, 19.070442)
)
|
# Copyright 2020 MONAI Consortium
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
import unittest
import nibabel
import numpy as np
from nibabel.processing import resample_to_output
from parameterized import parameterized
from monai.transforms import AddChanneld, LoadNiftid, Orientationd, Spacingd
FILES = tuple(
os.path.join(os.path.dirname(__file__), "testing_data", filename)
for filename in ("anatomical.nii", "reoriented_anat_moved.nii")
)
class TestLoadSpacingOrientation(unittest.TestCase):
@parameterized.expand(FILES)
def test_load_spacingd(self, filename):
data = {"image": filename}
data_dict = LoadNiftid(keys="image")(data)
data_dict = AddChanneld(keys="image")(data_dict)
t = time.time()
res_dict = Spacingd(keys="image", pixdim=(1, 0.2, 1), diagonal=True, mode="zeros")(data_dict)
t1 = time.time()
print(f"time monai: {t1 - t}")
anat = nibabel.Nifti1Image(data_dict["image"][0], data_dict["image_meta"]["original_affine"])
ref = resample_to_output(anat, (1, 0.2, 1), order=1)
t2 = time.time()
print(f"time scipy: {t2 - t1}")
self.assertTrue(t2 >= t1)
np.testing.assert_allclose(res_dict["image_meta"]["affine"], ref.affine)
np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape)
np.testing.assert_allclose(ref.get_fdata(), res_dict["image"][0], atol=0.05)
@parameterized.expand(FILES)
def test_load_spacingd_rotate(self, filename):
data = {"image": filename}
data_dict = LoadNiftid(keys="image")(data)
data_dict = AddChanneld(keys="image")(data_dict)
affine = data_dict["image_meta"]["affine"]
data_dict["image_meta"]["original_affine"] = data_dict["image_meta"]["affine"] = (
np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]]) @ affine
)
t = time.time()
res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=True, mode="zeros")(data_dict)
t1 = time.time()
print(f"time monai: {t1 - t}")
anat = nibabel.Nifti1Image(data_dict["image"][0], data_dict["image_meta"]["original_affine"])
ref = resample_to_output(anat, (1, 2, 3), order=1)
t2 = time.time()
print(f"time scipy: {t2 - t1}")
self.assertTrue(t2 >= t1)
np.testing.assert_allclose(res_dict["image_meta"]["affine"], ref.affine)
if "anatomical" not in filename:
np.testing.assert_allclose(res_dict["image"].shape[1:], ref.shape)
np.testing.assert_allclose(ref.get_fdata(), res_dict["image"][0], atol=0.05)
else:
# different from the ref implementation (shape computed by round
# instead of ceil)
np.testing.assert_allclose(ref.get_fdata()[..., :-1], res_dict["image"][0], atol=0.05)
def test_load_spacingd_non_diag(self):
data = {"image": FILES[1]}
data_dict = LoadNiftid(keys="image")(data)
data_dict = AddChanneld(keys="image")(data_dict)
affine = data_dict["image_meta"]["affine"]
data_dict["image_meta"]["original_affine"] = data_dict["image_meta"]["affine"] = (
np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]]) @ affine
)
res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=False, mode="zeros")(data_dict)
np.testing.assert_allclose(
res_dict["image_meta"]["affine"],
np.array(
[
[0.0, 0.0, 3.0, -27.599409],
[0.0, 2.0, 0.0, -47.977585],
[-1.0, 0.0, 0.0, 35.297897],
[0.0, 0.0, 0.0, 1.0],
]
),
)
def test_load_spacingd_rotate_non_diag(self):
data = {"image": FILES[0]}
data_dict = LoadNiftid(keys="image")(data)
data_dict = AddChanneld(keys="image")(data_dict)
res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=False, mode="border")(data_dict)
np.testing.assert_allclose(
res_dict["image_meta"]["affine"],
np.array([[-1.0, 0.0, 0.0, 32.0], [0.0, 2.0, 0.0, -40.0], [0.0, 0.0, 3.0, -16.0], [0.0, 0.0, 0.0, 1.0]]),
)
def test_load_spacingd_rotate_non_diag_ornt(self):
data = {"image": FILES[0]}
data_dict = LoadNiftid(keys="image")(data)
data_dict = AddChanneld(keys="image")(data_dict)
res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=False, mode="border")(data_dict)
res_dict = Orientationd(keys="image", axcodes="LPI")(res_dict)
np.testing.assert_allclose(
res_dict["image_meta"]["affine"],
np.array([[-1.0, 0.0, 0.0, 32.0], [0.0, -2.0, 0.0, 40.0], [0.0, 0.0, -3.0, 32.0], [0.0, 0.0, 0.0, 1.0]]),
)
def test_load_spacingd_non_diag_ornt(self):
data = {"image": FILES[1]}
data_dict = LoadNiftid(keys="image")(data)
data_dict = AddChanneld(keys="image")(data_dict)
affine = data_dict["image_meta"]["affine"]
data_dict["image_meta"]["original_affine"] = data_dict["image_meta"]["affine"] = (
np.array([[0, 0, 1, 0], [0, 1, 0, 0], [-1, 0, 0, 0], [0, 0, 0, 1]]) @ affine
)
res_dict = Spacingd(keys="image", pixdim=(1, 2, 3), diagonal=False, mode="border")(data_dict)
res_dict = Orientationd(keys="image", axcodes="LPI")(res_dict)
np.testing.assert_allclose(
res_dict["image_meta"]["affine"],
np.array(
[
[-3.0, 0.0, 0.0, 56.4005909],
[0.0, -2.0, 0.0, 52.02241516],
[0.0, 0.0, -1.0, 35.29789734],
[0.0, 0.0, 0.0, 1.0],
]
),
)
if __name__ == "__main__":
unittest.main()
|
import os
from dateutil.parser import parse
from glob import glob
import shutil
from django.core.management import BaseCommand
from django.utils import timezone
from presqt.utilities import read_file
class Command(BaseCommand):
def handle(self, *args, **kwargs):
"""
Delete all mediafiles that have run past their expiration date.
"""
if os.environ['ENVIRONMENT'] == 'development':
print('***delete_outdated_mediafiles is running in development mode.***')
directories_list = [
'/usr/src/app/mediafiles/downloads/*/',
'/usr/src/app/mediafiles/uploads/*/',
'/usr/src/app/mediafiles/transfers/*/'
]
directories = []
[directories.extend(glob(directory)) for directory in directories_list]
for directory in directories:
try:
data = read_file('{}process_info.json'.format(directory), True)
except FileNotFoundError:
shutil.rmtree(directory)
print('{} has been deleted. No process_info.json file found'.format(directory))
else:
expiration = parse(data['expiration'])
if expiration <= timezone.now() or os.environ['ENVIRONMENT'] == 'development':
shutil.rmtree(directory)
print('{} has been deleted.'.format(directory))
else:
print('{} has been retained.'.format(directory))
|
from typing import Dict, List, cast
import logging
import copy
from overrides import overrides
from allennlp.data.instance import Instance
from allennlp.data.tokenizers.tokenizer import Tokenizer
from allennlp.data.tokenizers import Token
from allennlp.data.tokenizers.whitespace_tokenizer import WhitespaceTokenizer
from allennlp.data.dataset_readers.dataset_reader import DatasetReader
from allennlp.data.token_indexers.token_indexer import TokenIndexer
from allennlp.data.fields import Field, TextField
from allennlp.data.token_indexers import SingleIdTokenIndexer
from allennlp.data.tokenizers import PretrainedTransformerTokenizer
logger = logging.getLogger(__name__)
@DatasetReader.register("next_token_lm")
class NextTokenLMReader(DatasetReader):
"""
Creates `Instances` suitable for use in predicting a single next token using a language
model. The :class:`Field` s that we create are the following: an input `TextField` and a
target token `TextField` (we only ver have a single token, but we use a `TextField` so we
can index it the same way as our input, typically with a single
`PretrainedTransformerIndexer`).
NOTE: This is not fully functional! It was written to put together a demo for interpreting and
attacking language models, not for actually training anything. It would be a really bad idea
to use this setup for training language models, as it would be incredibly inefficient. The
only purpose of this class is for a demo.
# Parameters
tokenizer : `Tokenizer`, optional (default=`WhitespaceTokenizer()`)
We use this `Tokenizer` for the text. See :class:`Tokenizer`.
token_indexers : `Dict[str, TokenIndexer]`, optional (default=`{"tokens": SingleIdTokenIndexer()}`)
We use this to define the input representation for the text, and to get ids for the mask
targets. See :class:`TokenIndexer`.
max_tokens : `int`, optional (default = `None`)
If you don't handle truncation at the `tokenizer` level, you can specify `max_tokens`
here, and the only the last `max_tokens` will be used.
"""
def __init__(
self,
tokenizer: Tokenizer = None,
token_indexers: Dict[str, TokenIndexer] = None,
max_tokens: int = None,
**kwargs,
) -> None:
super().__init__(**kwargs)
self._tokenizer = tokenizer or WhitespaceTokenizer()
self._targets_tokenizer: Tokenizer
if isinstance(self._tokenizer, PretrainedTransformerTokenizer):
self._targets_tokenizer = copy.copy(self._tokenizer)
self._targets_tokenizer._add_special_tokens = False
else:
self._targets_tokenizer = self._tokenizer
self._token_indexers = token_indexers or {"tokens": SingleIdTokenIndexer()}
self._max_tokens = max_tokens
@overrides
def _read(self, file_path: str):
import sys
# You can call pytest with either `pytest` or `py.test`.
if "test" not in sys.argv[0]:
logger.error(
"_read is only implemented for unit tests. You should not actually "
"try to train or evaluate a language model with this code."
)
with open(file_path, "r") as text_file:
for sentence in text_file:
tokens = self._tokenizer.tokenize(sentence)
target = "the"
yield self.text_to_instance(sentence, tokens, target)
@overrides
def text_to_instance(
self, # type: ignore
sentence: str = None,
tokens: List[Token] = None,
target: str = None,
) -> Instance:
if tokens is None and sentence is not None:
tokens = self._tokenizer.tokenize(sentence)
elif sentence is None:
raise ValueError("expected either 'sentence' or 'tokens' to not be null")
tokens = cast(List[Token], tokens)
if self._max_tokens is not None:
tokens = tokens[-self._max_tokens :]
input_field = TextField(tokens, self._token_indexers)
fields: Dict[str, Field] = {"tokens": input_field}
# TODO: if we index word that was not split into wordpieces with
# PretrainedTransformerTokenizer we will get OOV token ID...
# Until this is handeled, let's use first wordpiece id for each token since tokens should contain text_ids
# to be indexed with PretrainedTokenIndexer. It also requeires hack to avoid adding special tokens...
if target:
wordpiece = self._targets_tokenizer.tokenize(target)[0]
target_token = Token(text=target, text_id=wordpiece.text_id, type_id=wordpiece.type_id)
fields["target_ids"] = TextField([target_token], self._token_indexers)
return Instance(fields)
|
# -*- coding: utf-8 -*-
"""HackerNewsV4.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1dtIIRFFq81O97i6azALZbqBYgoVsUKvD
"""
from warnings import filterwarnings
filterwarnings("ignore")
import pandas as pd
import json
import urllib
import requests
import numpy as np
html = urllib.request.urlopen('https://hacker-news.firebaseio.com/v0/item/2169746.json?print=pretty')
json.loads(html.read())
x = urllib.request.urlopen('https://hacker-news.firebaseio.com/v0/maxitem.json?print=pretty')
max_item = int(x.read())
print(max_item)
data = []
number_of_entries = 1000
min_item = max_item - number_of_entries
count = 0
for i in range(min_item, max_item):
html = urllib.request.urlopen('https://hacker-news.firebaseio.com/v0/item/' + str(i) + '.json')
data.append(json.loads(html.read()))
count += 1
if count % 100 == 0:
print(f"Loaded {count} rows")
print (data[0])
data = [i for i in data if i is not None]
from pandas.io.json import json_normalize
df = pd.DataFrame.from_dict(data)
df.head(10)
print(df.shape)
df.columns
df_new = df.drop(columns = ['deleted', 'dead', 'descendants', 'score', 'kids', 'parent', 'title', 'url'])
df_new.head(10)
df_new['text'][0]
df_comments = df_new[df_new['type'] == 'comment']
df_comments['text']
import nltk
from nltk.tokenize import RegexpTokenizer
from nltk.stem import WordNetLemmatizer,PorterStemmer
from nltk.corpus import stopwords
nltk.download('stopwords')
nltk.download('wordnet')
import re
lemmatizer = WordNetLemmatizer()
stemmer = PorterStemmer()
def preprocess(sentence):
sentence=str(sentence)
sentence = sentence.lower()
sentence=sentence.replace('{html}',"")
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', sentence)
rem_url=re.sub(r'http\S+', '',cleantext)
rem_num = re.sub('[0-9]+', '', rem_url)
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(rem_num)
filtered_words = [w for w in tokens if len(w) > 2 if not w in stopwords.words('english')]
stem_words=[stemmer.stem(w) for w in filtered_words]
lemma_words=[lemmatizer.lemmatize(w) for w in stem_words]
return " ".join(filtered_words)
df_comments['clean_text']=df_comments['text'].map(lambda s:preprocess(s))
df_comments.head(10)
df_comments['by'].value_counts()
df_comments.shape
df_comments['text'] = df_comments['text'].astype(str)
df_comments['text'][:10]
df_comments['clean_text'] = df_comments['clean_text'].astype(str)
df_comments['clean_text'][:10]
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
!pip install vaderSentiment
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyzer = SentimentIntensityAnalyzer()
def get_compund_score(text):
score = analyzer.polarity_scores(text)
str(text)
return score['compound']
get_compund_score(df_comments['text'][0])
get_compund_score(df_comments['clean_text'][0])
df_comments['vader_score'] = df_comments['text'].apply(get_compund_score)
df_comments['clean_vader_score'] = df_comments['clean_text'].apply(get_compund_score)
df_comments.head(10)
df_comments['sentiment'] = df_comments['vader_score'].apply(lambda c: 'positive' if c >=0 else 'negative')
df_comments['clean_sentiment'] = df_comments['clean_vader_score'].apply(lambda c: 'positive' if c >=0 else 'negative')
df_comments.head(25)
df_comments.to_csv('hn_sentiments.csv')
import pandas as pd
import json
import urllib
import requests
!pip install vaderSentiment
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
analyzer = SentimentIntensityAnalyzer()
def get_compund_score(text):
score = analyzer.polarity_scores(text)
str(text)
return score['compound']
def preprocess(sentence):
sentence=str(sentence)
sentence = sentence.lower()
sentence=sentence.replace('{html}',"")
cleanr = re.compile('<.*?>')
cleantext = re.sub(cleanr, '', sentence)
rem_url=re.sub(r'http\S+', '',cleantext)
rem_num = re.sub('[0-9]+', '', rem_url)
tokenizer = RegexpTokenizer(r'\w+')
tokens = tokenizer.tokenize(rem_num)
filtered_words = [w for w in tokens if len(w) > 2 if not w in stopwords.words('english')]
stem_words=[stemmer.stem(w) for w in filtered_words]
lemma_words=[lemmatizer.lemmatize(w) for w in stem_words]
return " ".join(filtered_words)
import nltk
from nltk.corpus import stopwords
nltk.download('stopwords')
import pandas as pd
import json
import urllib
import requests
def get_score(entries):
data = []
for id in entries:
html = urllib.request.urlopen('https://hacker-news.firebaseio.com/v0/item/' + str(id) + '.json')
data.append(json.loads(html.read()))
print(data[0])
data = [i for i in data if i is not None]
df = pd.DataFrame.from_dict(data)
df_comments = df[df['type'] == 'comment']
df_comments['clean_text']=df_comments['text'].map(lambda s:preprocess(s))
df_comments['clean_vader_score'] = df_comments['clean_text'].apply(get_compund_score)
return df_comments['clean_vader_score'].sum() #we can use mean()
import pandas as pd
import json
import urllib
import requests
def get_cummulative_score(username):
data = []
html = urllib.request.urlopen('https://hacker-news.firebaseio.com/v0/user/' + str(username) + '.json?print=pretty')
data.append(json.loads(html.read()))
df2 = pd.DataFrame.from_dict(data)
entries = (df2['submitted'][0])
score = get_score(entries)
return score
score = get_cummulative_score('jl')
print("Cummulative score for user ", score )
test_message = "This is really bad. Never going there again. Absolute worst"
print (get_compund_score(test_message))
test_message = "This is really good. Loved the place. Employees were super kind. Can't wait to go back again."
print (get_compund_score(test_message)) |
import faulthandler
faulthandler.enable()
import numpy as np
import tensorflow as tf
from tensorflow import keras as ks
import os
from models import build_model
from csbdeep.utils.tf import limit_gpu_memory
from datawrapper import make_sequence
limit_gpu_memory(fraction=0.75, allow_growth=False)
# run_opts = tf.RunOptions(report_tensor_allocations_upon_oom = True)
##### architecture ########
architecture = ["time CNN"][0]
image_size = [256, 256]
batch_size = 8
timepoints = 11
timeinterval = [
1
] # can be used as an data augmentation to generate training dataset with various movements
signallevel = [s for s in range(10, 100, 10)]
gaussnoise = [s for s in np.linspace(0.5, 5, 10)]
offset = 100
# possibly simulate and then load paths to simulation data
path_train = (
os.path.dirname(os.path.realpath(__file__)) + "/Experimental_data/training/"
)
validation_ratio = 0.1
train_sequence, val_sequence = make_sequence(
path_train,
validation_ratio,
batch_size,
timepoints,
timeinterval,
signallevel,
gaussnoise,
offset,
)
# model configuration
use_bias = True
optimizer = ks.optimizers.Adam(lr=1e-3)
# build model
model = build_model(image_size, timepoints, architecture, use_bias=use_bias)
# for layer in model.layers:
# print(layer.name, layer.output_shape)
# compile model
model.compile(optimizer=optimizer, loss="mse", metrics=["mse"])
# train
model.fit_generator(
generator=train_sequence,
validation_data=val_sequence,
epochs=50,
max_queue_size=3,
verbose=1,
callbacks=[
ks.callbacks.EarlyStopping(mode="min", patience=10, verbose=1),
ks.callbacks.ModelCheckpoint(
filepath="./models/best_model.h5", save_best_only=True
),
ks.callbacks.TensorBoard(log_dir="./logs"),
ks.callbacks.TerminateOnNaN(),
],
)
|
# coding: utf-8
'''
.. versionadded:: 0.21
'''
from __future__ import absolute_import, print_function, unicode_literals
from functools import wraps
import logging
import platform
import sys
import threading
if sys.version_info <= (3, 4):
import trollius as asyncio
from ._async_py27 import run_command
else:
import asyncio
from ._async_py35 import run_command
__all__ = ['new_file_event_loop', 'ensure_event_loop', 'with_loop', 'asyncio',
'run_command']
logger = logging.getLogger(__name__)
def new_file_event_loop():
'''
.. versionadded:: 0.15
Returns
-------
asyncio.BaseEventLoop
Event loop capable of monitoring file IO events, including ``stdout``
and ``stderr`` pipes. **Note that on Windows, the default event loop
_does not_ support file or stream events. Instead, a
:class:`ProactorEventLoop` must explicitly be used on Windows. **
'''
return (asyncio.ProactorEventLoop() if platform.system() == 'Windows'
else asyncio.new_event_loop())
def ensure_event_loop():
'''
.. versionadded:: 0.15
Get existing event loop or create a new one if necessary.
Returns
-------
asyncio.BaseEventLoop
'''
try:
loop = asyncio.get_event_loop()
except RuntimeError as e:
if 'There is no current event loop' in str(e):
loop = new_file_event_loop()
asyncio.set_event_loop(loop)
else:
raise
return loop
def with_loop(func):
'''
.. versionadded:: 0.15
Decorator to run function within an asyncio event loop.
.. notes::
Uses :class:`asyncio.ProactorEventLoop` on Windows to support file I/O
events, e.g., serial device events.
If an event loop is already bound to the thread, but is either a)
currently running, or b) *not a :class:`asyncio.ProactorEventLoop`
instance*, execute function in a new thread running a new
:class:`asyncio.ProactorEventLoop` instance.
'''
@wraps(func)
def wrapped(*args, **kwargs):
loop = ensure_event_loop()
thread_required = False
if loop.is_running():
logger.debug('Event loop is already running.')
thread_required = True
elif all([platform.system() == 'Windows',
not isinstance(loop, asyncio.ProactorEventLoop)]):
logger.debug('`ProactorEventLoop` required, not `%s`'
'loop in background thread.', type(loop))
thread_required = True
if thread_required:
logger.debug('Execute new loop in background thread.')
finished = threading.Event()
def _run(generator):
loop = ensure_event_loop()
try:
result = loop.run_until_complete(asyncio
.ensure_future(generator))
except Exception as e:
finished.result = None
finished.error = e
else:
finished.result = result
finished.error = None
finished.set()
thread = threading.Thread(target=_run,
args=(func(*args, **kwargs), ))
thread.daemon = True
thread.start()
finished.wait()
if finished.error is not None:
raise finished.error
return finished.result
logger.debug('Execute in exiting event loop in main thread')
return loop.run_until_complete(func(**kwargs))
return wrapped
|
import numpy as np
from pychemia.utils.periodic import atomic_number, covalent_radius, cpk_colors
class StructurePovray:
def __init__(self, structure):
self.structure = structure
self.distance = 10
def create_pov(self):
ret = """
#version 3.7;
#include "colors.inc" // The include files contain
#include "stones.inc" // pre-defined scene elements
#include "glass.inc"
background{rgb 0}
"""
if self.structure.is_crystal:
self.distance = max(self.structure.lattice.lengths)
else:
self.distance = 10
ret += "#declare r=%7.3f;\n #declare s=%7.3f;" % (self.distance, self.distance)
ret += "camera {\n"
ret += "\tlocation <%7.3f, %7.3f, %7.3f>\n" % (1.3 * self.distance, 1.3 * self.distance, -1.3 * self.distance)
ret += "\tlook_at <%7.3f, %7.3f, %7.3f>\n" % tuple(0.5 * sum(self.structure.cell[:]))
ret += "}\n\n"
if self.structure.nsites > 0:
d = self.distance
ret += "light_source { <%7.3f, %7.3f, %7.3f> color White}\n" % (2 * d, 2 * d, 2 * d)
for imagx in np.arange(-1, 2):
for imagy in np.arange(-1, 2):
for imagz in np.arange(-1, 2):
for site in self.structure:
for symbol in site.symbols:
cell = self.structure.cell
x = site.position[0] - imagx * cell[0, 0] - imagy * cell[1, 0] - imagz * cell[2, 0]
y = site.position[1] - imagx * cell[0, 1] - imagy * cell[1, 1] - imagz * cell[2, 1]
z = site.position[2] - imagx * cell[0, 2] - imagy * cell[1, 2] - imagz * cell[2, 2]
if (x - self.distance) ** 2 + (y - self.distance) ** 2 + (z + self.distance) ** 2 < 2:
continue
cr = 0.5 * covalent_radius(symbol)
rgb = cpk_colors[atomic_number(symbol)]
color = 'rgb < %7.3f, %7.3f, %7.3f>' % (rgb[0], rgb[1], rgb[2])
ret += "sphere {\n"
ret += "\t<%7.3f, %7.3f, %7.3f>, %7.3f\n\ttexture {\n" % (x, y, z, cr)
ret += "\t\tpigment { color %s filter 0.4 transmit %7.3f}\n" % \
(color, 1 - 0.9 * np.exp(-0.1 * (abs(imagx) + abs(imagy) + abs(imagz))))
ret += "\t\tnormal { bumps 0.8 scale 0.1 }\n\t\tfinish { phong %7.3f }\n\t}\n}\n\n" % \
np.exp(-0.1 * (abs(imagx) + abs(imagy) + abs(imagz)))
if self.structure.nsites <= 0:
ret += "light_source { <%7.3f, %7.3f, %7.3f> color White}\n" % (x, y, z)
ret += """union{
#include "cell.pov"
scale 1
rotate <0, 0, 0>
pigment{rgb <0.3,0.3,0.9>} finish{phong 0.9 ambient 0.42 reflection 0.1}
}
"""
return ret
def write_povray(self, filename):
wf = open(filename, 'w')
wf.write(self.create_pov())
wf.close()
if self.structure.is_crystal:
self.write_cell('cell.pov')
def write_cell(self, filename):
wf = open(filename, 'w')
ret = ''
for i in range(3):
for j in range(3):
ret += "cylinder { "
if i == j:
ret += " <%7.3f, %7.3f, %7.3f>, " % (0.0, 0.0, 0.0)
ret += " <%7.3f, %7.3f, %7.3f>, " % tuple(self.structure.cell[j])
else:
ret += " <%7.3f, %7.3f, %7.3f>, " % tuple(self.structure.cell[i])
ret += " <%7.3f, %7.3f, %7.3f>, " % tuple(self.structure.cell[i] + self.structure.cell[j])
ret += " %7.3f }\n" % (self.distance / 100.0)
ret += "cylinder { "
ret += " <%7.3f, %7.3f, %7.3f>, " % tuple(sum(self.structure.cell[:]))
ret += " <%7.3f, %7.3f, %7.3f>, " % tuple(sum(self.structure.cell[:]) - self.structure.cell[i])
ret += " %7.3f }\n" % (self.distance / 100.0)
wf.write(ret)
wf.close()
# ret += "\topen // Remove end caps\n"
# #ret += "\ttexture { %s }\n" % ('T_Stone25 scale 4')
# ret += "\ttexture { %s }\n" % ('pigment { Col_Glass_Old }')
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2021, Ontario Institute for Cancer Research (OICR).
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Authors:
Junjun Zhang <junjun.zhang@oicr.on.ca>
"""
import os
import sys
from click import echo
from wfpm.project import Project
def workon_cmd(
project: Project = None,
pkg: str = None,
stop: bool = False,
update: bool = False
):
if update and project.git.current_branch != 'main':
echo(f"Can only use '-u' when on the 'main' branch, currently on '{project.git.current_branch}'")
sys.exit(1)
elif update and project.git.fetch_and_housekeeping():
# refresh the project object
project = Project(project_root=project.root, debug=project.debug)
if project.git.current_branch and project.git.current_branch in project.pkgs_released:
echo(f"You are on a package branch that has been released '{project.git.current_branch}'.")
echo(f"Please switch to the 'main' branch, run 'git branch -D {project.git.current_branch}' to delete the "
"local branch. Make sure to delete it on GitHub as well.")
sys.exit(1)
if stop and pkg:
echo("When '-s' is used, no pkg argument can be supplied.")
sys.exit(1)
if pkg is None and not stop:
display_pkg_info(project)
sys.exit()
if stop:
stop_workon(project)
sys.exit()
if project.pkg_workon and (pkg == project.pkg_workon or pkg == project.pkg_workon.split('@')[0]):
echo(f"Continue working on '{project.pkg_workon}', no change.")
elif pkg in project.pkgs_in_dev:
project.set_workon(pkg)
echo(f"Now work on '{pkg}'")
elif pkg in project.git.rel_candidates:
if len(project.git.rel_candidates[pkg]) == 1:
workon_pkg = '@'.join([pkg, project.git.rel_candidates[pkg][0]])
project.set_workon(workon_pkg)
echo(f"Now work on '{workon_pkg}'")
else:
echo(f"Multiple versions of the package are in development: {', '.join(project.git.rel_candidates[pkg])}")
workon_pkg_1 = '@'.join([pkg, project.git.rel_candidates[pkg][0]])
workon_pkg_2 = '@'.join([pkg, project.git.rel_candidates[pkg][1]])
echo("Please specify which version to work on, eg, ", nl=False)
echo(f"'wfpm workon {workon_pkg_1}' or 'wfpm workon {workon_pkg_2}'")
else:
echo(f"Not a package in development: '{pkg}'")
def display_pkg_info(project=None):
echo("Packages released:", nl=False)
if not project.git.releases:
echo(" <none>")
else:
echo("")
for pkg in sorted(project.git.releases):
echo(f" {pkg}: ", nl=False)
echo(', '.join(project.git.releases[pkg]))
echo("Packages in development:", nl=False)
if not project.git.rel_candidates:
echo(" <none>")
else:
echo("")
for pkg in sorted(project.git.rel_candidates):
echo(f" {pkg}: ", nl=False)
echo(', '.join(project.git.rel_candidates[pkg]))
echo(f"Package being worked on: {project.pkg_workon if project.pkg_workon else '<none>'}")
def stop_workon(project=None):
if project.pkg_workon:
if os.getcwd() != project.root:
echo("Must run this command under project root dir.")
sys.exit(1)
if not project.git.branch_clean():
echo(f"Package branch '{project.pkg_workon}' not clean, please complete on-going work and commit changes.")
sys.exit(1)
project.git.cmd_checkout_branch('main')
echo(f"Stopped work on {project.pkg_workon}")
else:
echo("Not working on any package.")
|
"""This modules allows for convenient error functions imports"""
from .http_error import http_error_handler
from .validation_error import http422_error_handler
from .creation_error import creation_error_handler
__all__ = ["http_error_handler", "http422_error_handler", "creation_error_handler"]
|
"""Sensitivity analysis funcs for sensitivity analysis clinic at CSDMS 2019.
Written by Nathan Lyons, May 2019
"""
from csv import DictWriter
from os import listdir
from os.path import isfile, join
import numpy as np
from pandas import concat, read_csv
def get_problem_dict():
# Get the path of the file with the bounds of factors.
file_name = 'factor_bounds.csv'
design_path = join('..', 'experiment_design', file_name)
# Create a problem dictionary with the keys required by SALib.
df = read_csv(design_path)
problem = {'names': df.names.values.tolist()}
problem['num_vars'] = len(problem['names'])
bounds_min = df.bounds_min.values
bounds_max = df.bounds_max.values
problem['bounds'] = np.column_stack((bounds_min, bounds_max))
return problem
def get_path_dict():
path = {'factor_levels_file': join('..', 'experiment_design',
'trial_factor_levels.txt'),
'trials': join('..', 'model_output', 'trials')}
return path
def get_compilated_response(trials_path):
df_list = []
for trial_path in listdir(trials_path):
if trial_path == '.DS_Store':
continue
trial_id = int(trial_path.split('.')[1])
response_path = join(trials_path, trial_path, 'response.csv')
if not isfile(response_path):
continue
df = read_csv(response_path)
df['trial_id'] = trial_id
df_list.append(df)
compilated_response = concat(df_list, ignore_index=True, sort=True)
compilated_response.to_csv()
return compilated_response
def write_data(response, full_path):
"""Write model data to csv.
"""
with open(full_path, 'w') as f:
w = DictWriter(f, response.keys())
w.writeheader()
w.writerow(response)
|
from git import Repo
def clone(url, destiny):
return Repo.clone_from(url, destiny) |
import argparse
import json
import os
import sys
from io import StringIO
import pytest
import yaml
from bootstrap.lib.options import Options
from bootstrap.lib.options import OptionsDict
from bootstrap.lib.utils import merge_dictionaries
def reset_options_instance():
Options._Options__instance = None
sys.argv = [sys.argv[0]] # reset command line args
def test_empty_path():
""" Test empty path
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py
usage: tests_options.py -o PATH_OPTS
test_options.py: error: the following arguments are required: -o/--path_opts
"""
reset_options_instance()
try:
Options()
assert False
except SystemExit as e:
assert True
def test_o():
""" Test path given in argument
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py -o test/default.yaml
{
"path_opts": "test/default.yaml",
"message": "default"
}
"""
reset_options_instance()
sys.argv += ['--path_opts', 'tests/default.yaml']
assert (Options().options == OptionsDict({'path_opts': 'tests/default.yaml', 'message': 'default'}))
def test_path_opts():
""" Test path given in argument
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py --path_opts test/default.yaml
{
"path_opts": "test/default.yaml",
"message": "default"
}
"""
reset_options_instance()
sys.argv += ['-o', 'tests/default.yaml']
assert (Options().options == OptionsDict({'path_opts': 'tests/default.yaml', 'message': 'default'}))
def test_path_opts_h():
""" Test path given in argument with help
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py -o test/default.yaml -h
usage: tests/test_options.py [-h] -o PATH_OPTS [--message [MESSAGE]]
optional arguments:
-h, --help show this help message and exit
-o PATH_OPTS, --path_opts PATH_OPTS
--message [MESSAGE] Default: default
"""
reset_options_instance()
sys.argv += ['-o', 'tests/default.yaml', '-h']
try:
Options()
assert False
except SystemExit as e:
assert True
def test_include():
""" Test include
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py -o tests/sgd.yaml
{
"path_opts": "test/sgd.yaml",
"message": "sgd",
"sgd": true,
"nested": {
"message": "lol"
}
}
"""
reset_options_instance()
sys.argv += ['-o', 'tests/sgd.yaml']
assert Options().options == OptionsDict({
"path_opts": "tests/sgd.yaml",
"message": "sgd",
"sgd": True,
"nested": {
"message": "lol"
}
})
def test_include_list():
reset_options_instance()
sys.argv += ['-o', 'tests/sgd_list_include.yaml']
assert Options().options == OptionsDict({
"path_opts": "tests/sgd_list_include.yaml",
"message": "sgd",
"sgd": True,
"nested": {
"message": "lol"
},
"database": "db",
})
def test_include_absolute_path():
reset_options_instance()
path_file = os.path.join(os.getcwd(), 'tests', 'sgd_abs_include.yaml')
include_file = os.path.join(os.getcwd(), 'tests', 'default.yaml')
options = {
'__include__': include_file,
'sgd': True,
'nested': {'message': 'lol'},
}
with open(path_file, 'w') as f:
yaml.dump(options, f, default_flow_style=False)
sys.argv += ['-o', 'tests/sgd_abs_include.yaml']
gt_options = {
"path_opts": 'tests/sgd_abs_include.yaml',
"message": "default",
"sgd": True,
"nested": {
"message": "lol"
}
}
assert Options().options.asdict() == gt_options
os.remove(path_file)
def test_overwrite():
""" Test overwrite
Expected behavior:
.. code-block:: bash
$ python tests/test_options.py -o tests/sgd.yaml --nested.message lolilol`
{
"path_opts": "tests/sgd.yaml",
"message": "sgd",
"sgd": true,
"nested": {
"message": "lolilol"
}
}
"""
reset_options_instance()
sys.argv += ['-o', 'tests/sgd.yaml', '--nested.message', 'lolilol']
assert (Options().options == OptionsDict({
"path_opts": "tests/sgd.yaml",
"message": "sgd",
"sgd": True,
"nested": {
"message": "lolilol"
}
}))
def test_getters():
""" Test getters
"""
reset_options_instance()
sys.argv += ['-o', 'tests/sgd.yaml']
opt = Options()
assert opt['nested']['message'] == 'lol'
assert opt['nested.message'] == 'lol'
assert opt.nested.message == 'lol'
# TODO: test_setters
def test_save():
""" Test save and load
"""
reset_options_instance()
sys.argv += ['-o', 'tests/sgd.yaml', '--nested.message', 'save']
path_yaml = 'tests/saved.yaml'
Options().save(path_yaml)
with open(path_yaml, 'r') as yaml_file:
options_yaml = yaml.safe_load(yaml_file)
assert (OptionsDict(options_yaml) == OptionsDict({
"message": "sgd",
"sgd": True,
"nested": {
"message": "save"
}
}))
reset_options_instance()
sys.argv += ['-o', 'tests/saved.yaml']
assert (Options().options == OptionsDict({
"path_opts": "tests/saved.yaml",
"message": "sgd",
"sgd": True,
"nested": {
"message": "save"
}
}))
def test_load_yaml_opts():
""" Load options using static method (no singleton)
"""
reset_options_instance()
opt = Options.load_yaml_opts('tests/default.yaml')
assert (opt == OptionsDict({'message': 'default'}))
assert Options._Options__instance is None
def test_merge_dictionaries():
""" Merge two dictionnary
"""
dict1 = {
'exp': {
'dir': 'lol1',
'resume': None
}
}
dict2 = {
'exp': {
'dir': 'lol2'
}
}
dict1 = OptionsDict(dict1)
dict2 = OptionsDict(dict2)
merge_dictionaries(dict1, dict2)
assert (dict1 == OptionsDict({'exp': OptionsDict({'dir': 'lol2', 'resume': None})}))
def test_as_dict():
""" Copy OptionsDict in a new dictionary of type :mod:`dict`
"""
dict1 = {
'exp': {
'dir': 'lol1',
'resume': None
}
}
assert (dict1 == OptionsDict(dict1).asdict())
def test_initialize_options_source_dict_1():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False)
assert Options().options == OptionsDict(source)
assert Options().source == source
def test_initialize_options_source_dict_2():
reset_options_instance()
sys.argv += ['-o', 'tests/default.yaml', '--model.network', 'mynet']
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=True)
assert Options()['model']['network'] == 'mynet'
def test_initialize_options_source_dict_3():
reset_options_instance()
source1 = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source1, run_parser=False)
assert Options().options == OptionsDict(source1)
assert Options().source == source1
source2 = {
'Micael': 'is the best',
'Remi': 'is awesome',
}
Options(source2, run_parser=False)
assert Options().options == OptionsDict(source1)
assert Options().source == source1
def test_initialize_options_source_dict_4():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
with pytest.raises(SystemExit):
Options(source, run_parser=True)
def test_initialize_options_source_optionsdict():
reset_options_instance()
source = OptionsDict({
'dataset': 124,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
})
Options(source, run_parser=False)
assert Options().options == source
assert Options().source == source.asdict()
def test_initialize_options_incorrect_source():
reset_options_instance()
source = 123
with pytest.raises(TypeError):
Options(source, run_parser=False)
def test_initialize_arguments_callback():
reset_options_instance()
sys.argv += ['-o', 'tests/default.yaml']
source = {
'dataset': 'mydataset',
'model': 'mymodel',
}
def arguments_callback_a(instance, arguments, options_dict):
arguments.dataset = arguments.dataset + 'a'
arguments.model = arguments.model + 'a'
return arguments
Options(source, arguments_callback=arguments_callback_a)
source_a = {
'path_opts': 'tests/default.yaml',
'dataset': 'mydataseta',
'model': 'mymodela',
}
assert Options().options == OptionsDict(source_a)
assert Options().source == source
def test_initialize_lock():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False, lock=True)
assert Options().options.islocked()
def test_initialize_not_locked():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False, lock=False)
assert not Options().options.islocked()
def test_setitem_1():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
Options()['abc'] = 'new value'
assert Options()['abc'] == 'new value'
def test_setitem_2():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False)
assert Options().options == source
Options()['model.criterion'] = 'new value'
assert Options()['model.criterion'] == 'new value'
def test_setitem_key_int():
reset_options_instance()
source = {1: 123}
Options(source, run_parser=False)
assert Options().options == source
Options()[1] = 'new value'
assert Options()[1] == 'new value'
def test_setitem_key_float():
reset_options_instance()
source = {1.2: 123}
Options(source, run_parser=False)
assert Options().options == source
Options()[1.2] = 'new value'
assert Options()[1.2] == 'new value'
def test_setitem_key_bytes():
reset_options_instance()
source = {bytes(1): 123}
Options(source, run_parser=False)
assert Options().options == source
Options()[bytes(2)] = 'new value'
assert Options()[bytes(2)] == 'new value'
def test_getattr():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
assert Options().abc == 123
def test_get_exist_value():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
value = Options().get('abc', 'default value')
assert value == 123
def test_get_default_value():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
value = Options().get('cba', 'default value')
assert value == 'default value'
def test_has_key_true():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
assert Options().has_key('abc')
def test_has_key_false():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().options == source
assert not Options().has_key('cba')
def test_keys():
reset_options_instance()
source = {
'model': 'mymodel',
'dataset': 'mydataset'
}
Options(source, run_parser=False)
assert Options().options == source
assert sorted(Options().keys()) == sorted(['model', 'dataset'])
def test_values():
reset_options_instance()
source = {
'model': 'mymodel',
'dataset': 'mydataset'
}
Options(source, run_parser=False)
assert Options().options == source
assert sorted(Options().values()) == sorted(['mymodel', 'mydataset'])
def test_items():
reset_options_instance()
source = {'model': 'mymodel'}
Options(source, run_parser=False)
assert Options().options == source
for key, value in Options().items():
assert key == 'model'
assert value == 'mymodel'
def test_lock():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False)
assert Options().options == source
Options().unlock()
assert not Options().options.islocked()
assert not Options().options['model'].islocked()
Options().lock()
assert Options().options.islocked()
assert Options().options['model'].islocked()
def test_unlock():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False)
assert Options().options == source
Options().lock()
assert Options().options.islocked()
assert Options().options['model'].islocked()
old_stdout = sys.stdout
result = StringIO()
sys.stdout = result
Options().unlock()
sys.stdout = old_stdout
assert not Options().options.islocked()
assert not Options().options['model'].islocked()
result_string = result.getvalue()
# Should print more than 3 times
assert len(result_string.splitlines()) > 3
def test_lock_setitem():
reset_options_instance()
source = {
'dataset': 123,
'model': {
'criterion': 'I am a criterion',
'network': 'I am a network',
},
}
Options(source, run_parser=False)
assert Options().options == source
Options().lock()
with pytest.raises(PermissionError):
Options()['dataset'] = 421
def test_str_to_bool_yes():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().str_to_bool('yes')
assert Options().str_to_bool('Yes')
assert Options().str_to_bool('YES')
def test_str_to_bool_true():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert Options().str_to_bool('true')
assert Options().str_to_bool('True')
assert Options().str_to_bool('TRUE')
def test_str_to_bool_no():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert not Options().str_to_bool('no')
assert not Options().str_to_bool('No')
assert not Options().str_to_bool('NO')
def test_str_to_bool_false():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
assert not Options().str_to_bool('false')
assert not Options().str_to_bool('False')
assert not Options().str_to_bool('FALSE')
def test_str_to_bool_incorrect():
reset_options_instance()
source = {'abc': 123}
Options(source, run_parser=False)
with pytest.raises(argparse.ArgumentTypeError):
Options().str_to_bool('incorrect')
def test_str():
reset_options_instance()
source = {'abc': 123, 'key1': 'value1'}
Options(source, run_parser=False)
assert Options().options == source
str_representation = Options().__str__()
opt_dict = json.loads(str_representation)
assert isinstance(str_representation, str)
assert opt_dict == source
def test_add_options():
reset_options_instance()
sys.argv += [
'-o', 'tests/default.yaml',
'--dataset', '421',
'--value', '2',
'--model.metric', 'm1', 'm2',
]
source = {
'dataset': 123,
'value': 1.5,
'model': {
'criterion': ['mse', 'l1'],
'network': 'I am a network',
'metric': [],
},
'useless': None,
}
Options(source, run_parser=True)
assert Options()['dataset'] == 421
assert Options()['value'] == 2
assert isinstance(Options()['value'], float)
assert Options()['model']['metric'] == ['m1', 'm2']
|
import pylast
import configparser
import logging
import time
import argparse
from pathlib import Path
logger = logging.getLogger()
temps_debut = time.time()
def lastfmconnect():
config = configparser.ConfigParser()
config.read("config.ini")
api_key = config["lastfm"]["api_key"]
api_secret = config["lastfm"]["api_secret"]
api_username = config["lastfm"]["username"]
api_password = pylast.md5(config["lastfm"]["password"])
network = pylast.LastFMNetwork(
api_key=api_key,
api_secret=api_secret,
username=api_username,
password_hash=api_password,
)
return network
def main():
args = parse_args()
network = lastfmconnect()
if args.username:
users = [x.strip() for x in args.username.split(",")]
else:
logger.error("Use the -u/--username flag to set an username.")
exit()
Path("Exports").mkdir(parents=True, exist_ok=True)
for user in users:
logger.info("Extracting favorite tracks for %s.", user)
user = network.get_user(user)
loved_tracks = user.get_loved_tracks(limit=None)
loved_tracks = [x.track for x in loved_tracks]
logger.info("%s tracks extracted for %s.", len(loved_tracks), user)
with open(f"Exports/{int(time.time())}_{user}_favorite_tracks.csv", "w") as f:
for track in loved_tracks:
f.write(f"{track.artist} - {track.title}\n")
logger.info("Runtime : %.2f seconds" % (time.time() - temps_debut))
def parse_args():
parser = argparse.ArgumentParser(
description="Extract all favorite tracks from one or several lastfm users."
)
parser.add_argument(
"--debug",
help="Display debugging information.",
action="store_const",
dest="loglevel",
const=logging.DEBUG,
default=logging.INFO,
)
parser.add_argument(
"--username",
"-u",
help="Names of the users (separated by comma).",
type=str,
)
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
return args
if __name__ == "__main__":
main()
|
print("consume") |
# -*- coding: UTF-8 -*-
#import math
from math import sqrt
# a = input ("Valor A")
# b = input ("Valor B")
a = float (input("Valor A:"))
b = float (input("Valor B:"))
c = float (input("Valor C:"))
b2 = b**2
delta = b2 - (4*a*c)
try:
raizquadrada = sqrt(delta)
x1 = (-b-raizquadrada)/(2*a)
x2 = (-b + raizquadrada)/(2*a)
print ("x1="+ str(x1))
print ("x2="+ str(x2))
except Exception as e:
print(e) |
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
try:
from Cython.Distutils import build_ext
except:
from distutils.command import build_ext
try:
import numpy as np
except:
raise ImportError('NumPy must be installed.')
def get_extensions():
return [Extension('*', sources=['src/cultionet/networks/_build_network.pyx'])]
def setup_package():
metadata = dict(
ext_modules=cythonize(get_extensions()),
include_dirs=[np.get_include()]
)
setup(**metadata)
if __name__ == '__main__':
setup_package()
|
import streamlit as st
import pandas as pd
import numpy as np
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split
from sklearn.metrics import plot_confusion_matrix, plot_roc_curve, plot_precision_recall_curve
from sklearn.metrics import precision_score, recall_score
def main():
st.title("Binary classification web app")
st.markdown("Is this mashroom edible or poisonous ?")
st.sidebar.title("Binary classification web app")
st.sidebar.markdown("Is this mashroom edible or poisonous ?")
@st.cache(persist=True)
def load_data():
data = pd.read_csv('mushrooms.csv')
label = LabelEncoder()
for col in data.columns:
data[col] = label.fit_transform(data[col])
return data
@st.cache(persist=True)
def split(df):
y = df.type
x = df.drop(columns=['type'])
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size = 0.3, random_state = 0)
return x_train, x_test, y_train, y_test
def plot_metrics(metric_list):
if 'Confusion Matrix' in metric_list:
st.subheader("Confusion Metrix")
plot_confusion_matrix(model, x_test, y_test, display_labels=class_names)
st.pyplot()
if 'ROC Curve' in metric_list:
st.subheader("ROC Curve")
plot_roc_curve(model, x_test, y_test)
st.pyplot()
if 'Precision-Recall Curve' in metric_list:
st.subheader('Precision-Recall Curve')
plot_precision_recall_curve(model, x_test, y_test)
st.pyplot()
df = load_data()
x_train, x_test, y_train, y_test = split(df)
class_names = ['edible', 'poisonous']
if st.sidebar.checkbox("Show raw data", False):
st.subheader("Mushroom data set")
st.write(df)
st.sidebar.subheader("Choose Classifier")
classifier = st.sidebar.selectbox("Classifier", ("Support Vector Machine (SVM)", "Logistic Regression", "Random Forest Classifier"))
if classifier == 'Support Vector Machine (SVM)':
st.sidebar.subheader("Model Hyperparameters")
c = st.sidebar.number_input("C (Regularization Parameter)", 0.01, 10.0, step = 0.01, key='C')
kernel = st.sidebar.radio("Kernel", ("rbf", "linear"), key='kernel')
gamma = st.sidebar.radio("Gamma Kernel Coefficient", ("scale", "auto"), key='gamma')
metrics = st.sidebar.multiselect("What metrics to plot: ", ("Confusion Matrix", "ROC Curve", "Precision-Recall Curve"))
if st.sidebar.button("Classify", key='clasify'):
st.subheader("Support Vector Machine (SVM) Results")
model = SVC(C=c, kernel=kernel, gamma=gamma)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if classifier == "Logistic Regression":
st.sidebar.subheader("Model Hyperparameters")
c = st.sidebar.number_input("C (Regularization Parameter)", 0.01, 10.0, step = 0.01, key='C')
max_iter = st.sidebar.slider("Maximum number of iteration", 100, 500, key='max_iteration')
metrics = st.sidebar.multiselect("What metrics to plot: ", ("Confusion Matrix", "ROC Curve", "Precision-Recall Curve"))
if st.sidebar.button("Classify", key='classify'):
st.subheader("Logistic Regression Results")
model = LogisticRegression(C=c, max_iter=max_iter)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if classifier == "Random Forest Classifier":
st.sidebar.subheader("Model Hyperparameters")
n_estimators = st.sidebar.number_input("The number of Trees in the forest", 100, 5000, step=10, key='n_estimators' )
max_depth = st.sidebar.number_input("Maximum depth of the tree", 1, 20, step=1, key='max_depth')
bootstrap = st.sidebar.radio("Bootstrap samples when building trees", ("True", "False"))
metrics = st.sidebar.multiselect("What metrics to plot: ", ("Confusion Matrix", "ROC Curve", "Precision-Recall Curve"))
if st.sidebar.button("Classify", key='classify'):
st.subheader("Random Forest Results")
model = RandomForestClassifier(n_estimators=n_estimators, max_depth=max_depth, bootstrap=bootstrap, n_jobs=-1)
model.fit(x_train, y_train)
accuracy = model.score(x_test, y_test)
y_pred = model.predict(x_test)
st.write("Accuracy: ", accuracy.round(2))
st.write("Precision: ", precision_score(y_test, y_pred, labels=class_names).round(2))
st.write("Recall: ", recall_score(y_test, y_pred, labels=class_names).round(2))
plot_metrics(metrics)
if __name__ == '__main__':
main()
|
import numpy as np
import argparse
from depth_evaluation_utils import compute_error_3d
parser = argparse.ArgumentParser()
parser.add_argument("--pred_file", type=str, help="Path to the prediction file")
parser.add_argument("--gt_file", type=str, help="Path to the Ground truth file")
parser.add_argument('--min_limit', type=float, default=1e-3, help="Threshold for minimum depth")
parser.add_argument('--max_limit', type=float, default=80, help="Threshold for maximum depth")
args = parser.parse_args()
def compute_error_angle(gt, pred, plus=1e-5):
#input [N,3] groundtruth norm and [N,3] pred
num = gt.shape[0]
dot_product = np.sum(np.multiply(gt,pred),axis=1)
norm_gt = np.linalg.norm(gt, axis=1)
norm_pred = np.linalg.norm(pred, axis=1)
mcos = dot_product/(np.multiply(norm_gt, norm_pred)+1e-5)
radients = np.arccos(np.clip(mcos, -1, 1))
angle = np.degrees(radients)
a1 = len(angle[angle<11.5]) / num
a2 = len(angle[angle<22.5]) / num
a3 = len(angle[angle<30]) / num
a_mean = np.mean(angle)
a_median = np.median(angle)
rmse = np.sqrt(np.sum(radients ** 2))
return rmse,a_mean,a_median,a1,a2,a3
def main():
gt_norm = np.load(args.gt_file)
pred_norm = np.load(args.pred_file)
print("Normal prediction and groundtruth loaded...")
print(gt_norm.shape)
num_test = gt_norm.shape[0]
rms = np.zeros(num_test, np.float32)
log_rms = np.zeros(num_test, np.float32)
abs_rel = np.zeros(num_test, np.float32)
sq_rel = np.zeros(num_test, np.float32)
d1_all = np.zeros(num_test, np.float32)
a1 = np.zeros(num_test, np.float32)
a2 = np.zeros(num_test, np.float32)
a3 = np.zeros(num_test, np.float32)
a_mean = np.zeros(num_test, np.float32)
a_median= np.zeros(num_test, np.float32)
a_rmse= np.zeros(num_test, np.float32)
a_a1 = np.zeros(num_test, np.float32)
a_a2 = np.zeros(num_test, np.float32)
a_a3 = np.zeros(num_test, np.float32)
for i in range(num_test):
gt_normi = gt_norm[i]
pred_normi = np.copy(pred_norm[i])
mask_norm = sum([pred_normi[:,:,i]**2 for i in range(3)])
mask = np.logical_and(mask_norm > args.min_limit,
mask_norm < args.max_limit)
# crop used by Garg ECCV16 to reprocude Eigen NIPS14 results
# if used on gt_size 370x1224 produces a crop of [-218, -3, 44, 1180]
gt_height, gt_width = gt_normi.shape[:2]
crop = np.array([0.40810811 * gt_height, 0.99189189 * gt_height,
0.03594771 * gt_width, 0.96405229 * gt_width]).astype(np.int32)
crop_mask = np.zeros(mask.shape)
crop_mask[crop[0]:crop[1],crop[2]:crop[3]] = 1
mask = np.logical_and(mask, crop_mask)
# mask = np.ones([gt_height, gt_width]).astype(bool)
# Scale matching
# scalor = np.median(gt_normi[mask])/np.median(pred_normi[mask])
# pred_normi[mask] *= scalor
pred_normi[mask_norm < args.min_limit,:] = [args.min_limit]*3
pred_normi[mask_norm > args.max_limit,:] = [args.max_limit]*3
# abs_rel[i], sq_rel[i], rms[i], log_rms[i], a1[i], a2[i], a3[i] = \
# compute_error_3d(gt_normi[mask], pred_normi[mask])
a_rmse[i],a_mean[i],a_median[i], a_a1[i], a_a2[i], a_a3[i] = \
compute_error_angle(gt_normi[mask], pred_normi[mask])
print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format('rms', 'mean', 'median', 'a1', 'a2', 'a3'))
print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format(a_rmse.mean(), a_mean.mean(), a_median.mean(), a_a1.mean(), a_a2.mean(), a_a3.mean()))
# print("{:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}, {:>10}".format('abs_rel', 'sq_rel', 'rms', 'log_rms', 'd1_all', 'a1', 'a2', 'a3'))
# print("{:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}, {:10.4f}".format(abs_rel.mean(), sq_rel.mean(), rms.mean(), log_rms.mean(), d1_all.mean(), a1.mean(), a2.mean(), a3.mean()))
main() |
import logging
import os
import sys
import itertools
import json
import torch
from misc import util
from .executor import ExecutorTrainer
class IliadTrainer(ExecutorTrainer):
def _remove_loops_in_paths(self, paths):
new_paths = []
for path in paths:
new_path = []
visited_viewpoints = set()
for u in path:
if u in visited_viewpoints:
while new_path:
v = new_path.pop()
visited_viewpoints.remove(v)
if v == u:
break
visited_viewpoints.add(u)
new_path.append(u)
new_paths.append(new_path)
return new_paths
def do_rollout(self, batch, student, teacher, is_eval, should_log=False):
batch_size = len(batch)
init_poses = []
goal_viewpoints = []
instructions = []
batch_size = len(batch)
for item in batch:
pose = (item['scan'], item['path'][0], item['heading'], 0)
init_poses.append(pose)
goal_viewpoints.append(item['path'][-1])
instructions.append(item['instruction'])
if should_log:
logging.info('')
logging.info(' instr_id %s scan %s heading %f' %
(batch[0]['instr_id'], batch[0]['scan'], batch[0]['heading']))
logging.info(' gold path: %s' % str(batch[0]['path']))
teacher.receive_simulation_data(batch)
student.reset()
pred_paths, _ = student.predict(
init_poses, instructions, sample=True, model_name='exploration')
pred_paths = self._remove_loops_in_paths(pred_paths)
descriptions, num_gt_d_hat = teacher.describe(init_poses, pred_paths)
is_valid = [d != ['<PAD>'] for d in descriptions]
if should_log:
logging.info(' pred path: %s' % str(pred_paths[0]))
logging.info(' instruction: %s' % ' '.join(instructions[0]))
logging.info(' description: %s' % ' '.join(descriptions[0]))
metric = teacher.eval(batch[0]['scan'], batch[0]['path'], pred_paths[0])
logging.info(' metric: %s' % str(metric))
student.receive(init_poses, instructions, descriptions, pred_paths, is_valid)
stats = {
'reward' : self.compute_reward(teacher, batch, pred_paths),
'e_hat_len' : sum([len(e) for e in pred_paths]),
'd_star_len' : sum([len(d) for d in instructions]),
'd_hat_len' : sum([len(d) for d in descriptions if d != ['<PAD>']]),
'num_d_hat' : sum([d != ['<PAD>'] for d in descriptions]),
'num_gt_d_hat': num_gt_d_hat
}
return stats
def compute_reward(self, teacher, batch, pred_paths):
total_reward = 0
for i, pred_path in enumerate(pred_paths):
item = batch[i]
scan = item['scan']
gold_path = item['path']
metric = teacher.eval(scan, pred_path, gold_path)
total_reward += metric['score']
return total_reward
def save_train_info(self, name, train_info):
file_path = '%s/%s' % (self.config.experiment_dir, name + '.info')
torch.save(train_info, file_path)
logging.info('Saved train info to %s' % file_path)
def load_train_info(self, file_path):
file_path = file_path.replace('ckpt', 'info')
train_info = torch.load(file_path)
return train_info
def train(self, datasets, student, teacher):
max_iters = self.config.trainer.max_iters
log_every = self.config.trainer.log_every
log_rate = self.config.trainer.log_rate
metric_name = self.config.trainer.main_metric_name
unsup_weight_config = self.config.trainer.unsup_weight
train_info = {
'i_iter' : 0,
'num_examples' : 0,
'best_eval_score': teacher.init_metric_value(metric_name),
'unsup_weight' : unsup_weight_config.init,
'stats': {
'loss' : 0,
'reward' : 0,
'e_hat_len' : 0,
'd_star_len' : 0,
'd_hat_len' : 0,
'num_d_hat' : 0,
'num_gt_d_hat' : 0,
}
}
data_iter = datasets['train'].iterate_batches()
if self.config.resume and hasattr(self.config.student.model, 'load_from'):
train_info = self.load_train_info(
self.config.student.model.load_from)
data_iter = datasets['train'].iterate_batches(
data_idx=train_info['data_idx'],
data_indices=train_info['data_indices'])
train_info['data_indices'] = train_info['data_indices'][:10]
logging.info('Loaded train info %s' % str(train_info))
for batch in data_iter:
train_info['i_iter'] += 1
train_info['num_examples'] += len(batch)
should_log = (train_info['i_iter'] % log_every == 0)
should_save = (train_info['i_iter'] % (log_every * log_rate) == 0)
stats = self.do_rollout(batch, student, teacher, False,
should_log=should_log)
for k in stats:
train_info['stats'][k] += stats[k]
loss = student.learn(train_info['unsup_weight'])
train_info['stats']['loss'] += loss
# Decay unsup weight
if train_info['i_iter'] % unsup_weight_config.decay_every == 0:
train_info['unsup_weight'] = max(
train_info['unsup_weight'] * unsup_weight_config.rate,
unsup_weight_config.min)
logging.info('')
logging.info('Train iter %d: decay unsup weight = %.5f' %
(train_info['i_iter'], train_info['unsup_weight']))
if should_log:
log_str = 'Train iter %d (%d%%): ' % (
train_info['i_iter'],
train_info['i_iter'] / max_iters * 100)
stat_strs = []
stat_strs.append('lambda = %.5f' % train_info['unsup_weight'])
stat_strs.append('num_examples = %d' % train_info['num_examples'])
for stat_name, stat_value in train_info['stats'].items():
if stat_name == 'loss':
stat = stat_value / train_info['i_iter']
elif stat_name == 'd_hat_len':
stat = stat_value / train_info['stats']['num_d_hat']
elif stat_name in ['reward', 'num_d_hat', 'num_gt_d_hat']:
stat = stat_value
else:
stat = stat_value / train_info['num_examples']
stat_strs.append('%s = %.3f' % (stat_name, stat))
log_str += ', '.join(stat_strs)
logging.info('')
logging.info(log_str)
# Save best model
if should_save:
eval_info = self.evaluate(datasets['val'], student, teacher)
eval_preds = eval_info['pred']
eval_score = eval_info['metric'][metric_name]
if teacher.is_better(metric_name, eval_score, train_info['best_eval_score']):
logging.info('New best score: %.1f' % eval_score)
train_info['best_eval_score'] = eval_score
student.save('best_val')
self.save_preds('best_val', eval_preds)
# Update data indices
train_info['data_idx'] = datasets['train'].idx
train_info['data_indices'] = datasets['train'].indices
# Save last model
student.save('last')
self.save_train_info('last', train_info)
self.save_preds('last', eval_preds)
if train_info['i_iter'] >= max_iters:
break
|
#프로그램을 구성하는 독립적인 단위를
#각각 정의하고 관리하는 방법
#자주 사용하는 일반적인 기능은 모듈로 한번 만들어 두면
#필요할때 마다 도입해서 활용 할 수 있다.
#모듈 : 관련성 있는 데이터들, 함수,클래스
#모듈을 사용하려면 import 명령으로 인터프리터에게
#사용여부를 알려야 한다
#import random
# 인터프리에게 사용여부를 알림 (import)
#import random
#import random as r #별칭으로줄여쓰기
#from random import randint #모듈명 줄여쓰기
#from math import pi
#from math import sqrt
#from math import pi,sqrt #추천
#from math import * # 비추
#import Lab03 #우리가 작성한 함수가 있는 파일
#import math
#import lkljlh1001
#모듈을 호출할때는 모듈명(파일이름).함수명
#print (r.radint(1,10))
#print(randint(1,10))
#Lab03.isLeapYear()
#print(math.pi)
#print(pi)
#print(math.sqrt(9))
#print(sqrt(9))
#모듈 호출시 이름을 별칭을로 바꿔 정의
# import 모듈이름 as 별칭
#함수 호출시 모듈명까지 기술하는 것은 왠지 불편
#from 모듈명 import 함수명
#사용자가 만든 모듈을 다른 파일에서 참조하려면
# 두 파일이 모두 같은 위치에 있어야함
#즉, 프로젝드내에서 서로 참조하려면
#이 파일들은 같은 위치에 저장되어 있어야함
#한편, python IDE나 다른 프로젝트에서 모듈을
#참조하려면 pythonPath 가 정의한 위치에
#모듈을 저장해둔다
#파이썬설치위치 나 (파이썬 설치위치 /Lib)
#lkljlh1001.isLeapYear()
#파이썬 패키지
#다수의 개발자가 만든 모듈의 이름이 서로 같을 경우
# 파이썬에서는 패키지라는 개념을 이용해서 해결
# .연산자를 이용해서 모듈을 계층적(디렉토리)으로 관리
#파이썬에서 디렉토리가 패키지로 인식되려면
#__init__.py라는 파일이 반드시 있어야함
from lkljlh1002.hello import sayHello
sayHello()
|
from django.urls import reverse_lazy
from django.views import generic
from django.contrib.auth.forms import UserCreationForm
from .models import User
class MyUserCreationForm(UserCreationForm):
class Meta(UserCreationForm.Meta):
model = User
fields = ['username']
class UserCreate(generic.edit.CreateView):
form_class = MyUserCreationForm
template_name = 'registration/register.html'
success_url = reverse_lazy('accounts:register-done')
class UserCreateDone(generic.TemplateView):
template_name = 'registration/register_done.html'
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ZhimaCreditEpSceneRatingInitializeModel(object):
def __init__(self):
self._apply_amount = None
self._biz_ext_param = None
self._credit_category = None
self._ep_cert_no = None
self._ep_name = None
self._evaluate_type = None
self._m_category = None
self._member_type = None
self._out_order_no = None
self._product_code = None
self._user_id = None
@property
def apply_amount(self):
return self._apply_amount
@apply_amount.setter
def apply_amount(self, value):
self._apply_amount = value
@property
def biz_ext_param(self):
return self._biz_ext_param
@biz_ext_param.setter
def biz_ext_param(self, value):
self._biz_ext_param = value
@property
def credit_category(self):
return self._credit_category
@credit_category.setter
def credit_category(self, value):
self._credit_category = value
@property
def ep_cert_no(self):
return self._ep_cert_no
@ep_cert_no.setter
def ep_cert_no(self, value):
self._ep_cert_no = value
@property
def ep_name(self):
return self._ep_name
@ep_name.setter
def ep_name(self, value):
self._ep_name = value
@property
def evaluate_type(self):
return self._evaluate_type
@evaluate_type.setter
def evaluate_type(self, value):
self._evaluate_type = value
@property
def m_category(self):
return self._m_category
@m_category.setter
def m_category(self, value):
self._m_category = value
@property
def member_type(self):
return self._member_type
@member_type.setter
def member_type(self, value):
self._member_type = value
@property
def out_order_no(self):
return self._out_order_no
@out_order_no.setter
def out_order_no(self, value):
self._out_order_no = value
@property
def product_code(self):
return self._product_code
@product_code.setter
def product_code(self, value):
self._product_code = value
@property
def user_id(self):
return self._user_id
@user_id.setter
def user_id(self, value):
self._user_id = value
def to_alipay_dict(self):
params = dict()
if self.apply_amount:
if hasattr(self.apply_amount, 'to_alipay_dict'):
params['apply_amount'] = self.apply_amount.to_alipay_dict()
else:
params['apply_amount'] = self.apply_amount
if self.biz_ext_param:
if hasattr(self.biz_ext_param, 'to_alipay_dict'):
params['biz_ext_param'] = self.biz_ext_param.to_alipay_dict()
else:
params['biz_ext_param'] = self.biz_ext_param
if self.credit_category:
if hasattr(self.credit_category, 'to_alipay_dict'):
params['credit_category'] = self.credit_category.to_alipay_dict()
else:
params['credit_category'] = self.credit_category
if self.ep_cert_no:
if hasattr(self.ep_cert_no, 'to_alipay_dict'):
params['ep_cert_no'] = self.ep_cert_no.to_alipay_dict()
else:
params['ep_cert_no'] = self.ep_cert_no
if self.ep_name:
if hasattr(self.ep_name, 'to_alipay_dict'):
params['ep_name'] = self.ep_name.to_alipay_dict()
else:
params['ep_name'] = self.ep_name
if self.evaluate_type:
if hasattr(self.evaluate_type, 'to_alipay_dict'):
params['evaluate_type'] = self.evaluate_type.to_alipay_dict()
else:
params['evaluate_type'] = self.evaluate_type
if self.m_category:
if hasattr(self.m_category, 'to_alipay_dict'):
params['m_category'] = self.m_category.to_alipay_dict()
else:
params['m_category'] = self.m_category
if self.member_type:
if hasattr(self.member_type, 'to_alipay_dict'):
params['member_type'] = self.member_type.to_alipay_dict()
else:
params['member_type'] = self.member_type
if self.out_order_no:
if hasattr(self.out_order_no, 'to_alipay_dict'):
params['out_order_no'] = self.out_order_no.to_alipay_dict()
else:
params['out_order_no'] = self.out_order_no
if self.product_code:
if hasattr(self.product_code, 'to_alipay_dict'):
params['product_code'] = self.product_code.to_alipay_dict()
else:
params['product_code'] = self.product_code
if self.user_id:
if hasattr(self.user_id, 'to_alipay_dict'):
params['user_id'] = self.user_id.to_alipay_dict()
else:
params['user_id'] = self.user_id
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ZhimaCreditEpSceneRatingInitializeModel()
if 'apply_amount' in d:
o.apply_amount = d['apply_amount']
if 'biz_ext_param' in d:
o.biz_ext_param = d['biz_ext_param']
if 'credit_category' in d:
o.credit_category = d['credit_category']
if 'ep_cert_no' in d:
o.ep_cert_no = d['ep_cert_no']
if 'ep_name' in d:
o.ep_name = d['ep_name']
if 'evaluate_type' in d:
o.evaluate_type = d['evaluate_type']
if 'm_category' in d:
o.m_category = d['m_category']
if 'member_type' in d:
o.member_type = d['member_type']
if 'out_order_no' in d:
o.out_order_no = d['out_order_no']
if 'product_code' in d:
o.product_code = d['product_code']
if 'user_id' in d:
o.user_id = d['user_id']
return o
|
import unittest
import xr
class TestSuccessResult(unittest.TestCase):
def setUp(self):
self.exc = xr.Success()
def test_docstring(self):
self.assertEqual("Function successfully completed.", self.exc.__doc__)
def test_construct(self):
self.assertEqual("Function successfully completed.", str(self.exc))
self.assertEqual("Bar", str(xr.Success("Bar")))
xr.exceptions.raise_on_qualified_success = False
self.assertFalse(self.exc.is_exception())
xr.exceptions.raise_on_qualified_success = True
self.assertFalse(self.exc.is_exception())
def test_methods(self):
self.assertEqual(xr.Result.SUCCESS, self.exc.get_result_enum())
xr.exceptions.raise_on_qualified_success = False
self.assertFalse(self.exc.is_exception())
xr.exceptions.raise_on_qualified_success = True
self.assertFalse(self.exc.is_exception())
class TestTimeoutExpired(unittest.TestCase):
def setUp(self):
self.exc = xr.TimeoutExpired()
def test_docstring(self):
self.assertEqual(
"The specified timeout time occurred before the operation could complete.",
self.exc.__doc__)
def test_construct(self):
self.assertEqual(
"The specified timeout time occurred before the operation could complete.",
str(self.exc))
self.assertEqual("Bar", str(xr.TimeoutExpired("Bar")))
def test_methods(self):
self.assertEqual(xr.Result.TIMEOUT_EXPIRED, self.exc.get_result_enum())
xr.exceptions.raise_on_qualified_success = False
self.assertFalse(self.exc.is_exception())
xr.exceptions.raise_on_qualified_success = True
self.assertTrue(self.exc.is_exception())
if __name__ == '__main__':
unittest.main()
|
from __future__ import with_statement
import codecs
import os
from django.conf import settings
from django.test import TestCase, RequestFactory
from django.utils.encoding import smart_str
from django_gears.finders import AppFinder
from django_gears.views import serve
TESTS_DIR = os.path.dirname(__file__)
FIXTURES_DIR = os.path.join(TESTS_DIR, 'fixtures')
APP_ASSETS= os.path.join(os.path.dirname(__file__), 'assets')
def read(file):
with codecs.open(file, encoding='utf-8') as f:
return f.read()
class AppFinderTests(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.old_DEBUG = settings.DEBUG
settings.DEBUG = True
def tearDown(self):
settings.DEBUG = self.old_DEBUG
def get_response(self, path, data=None):
request = self.factory.get('/static/' + path, data or {})
return serve(request, path)
def get_app_asset(self, path):
return smart_str(read(os.path.join(APP_ASSETS, path)), 'utf-8')
def test_finder(self):
finder = AppFinder()
self.assertItemsEqual(finder.list('js'), (
('js/test_app_finder.js', os.path.join(APP_ASSETS, 'js', 'test_app_finder.js')),
))
def test_serve(self):
response = self.get_response('js/test_app_finder.js')
self.assertEqual(response.content, self.get_app_asset('js/test_app_finder.js'))
|
import cv2
import numpy as np
def findSpaces(line, thres_space):
# making vertical projections
verProj = cv2.reduce(line, 0, cv2.REDUCE_AVG)
# make hist - same dimension as horProj - if 0 (space), then True, else False
th = 0; # black pixels threshold value. this represents the space lines
hist = verProj <= th;
#Get mean coordinate of white white pixels groups
xcoords = []
x = 0
count = 0
isSpace = False
for i in range(0, line.shape[1]):
if (not isSpace):
if (hist[0][i]): #if space is detected, get the first starting x-coordinates and start count at 1
isSpace = True
count = 1
x = i
else:
if (not hist[0][i]):
isSpace = False
#when smoothing, thin letters will breakdown, creating a new blank lines or pixel columns, but the count will be small, so we set a threshold.
#print count,"\t",
if (count > thres_space):
xcoords.append(x // count)
else:
x = x + i
count = count + 1
xcoords.append(x // count)
return xcoords
def SpacesMedian(line):
# making vertical projections
verProj = cv2.reduce(line, 0, cv2.REDUCE_AVG)
# make hist - same dimension as horProj - if 0 (space), then True, else False
th = 0; # black pixels threshold value. this represents the space lines
hist = verProj <= th;
#Get mean coordinate of white white pixels groups
xcoords = []
x = 0
count = 0
isSpace = False
median_count = []
for i in range(0, line.shape[1]):
if (not isSpace):
if (hist[0][i]): #if space is detected, get the first starting x-coordinates and start count at 1
isSpace = True
count = 1
#x = i
else:
if (not hist[0][i]):
isSpace = False
#when smoothing, thin letters will breakdown, creating a new blank lines or pixel columns, but the count will be small, so we set a threshold.
#print count,"\t",
#append each count of rows of blank gaps found
median_count.append(count)
#if (count > 15):
#xcoords.append(x / count)
else:
#x = x + i
count = count + 1
median_count.append(count)
xcoords.append(x // count)
#returns x-coordinates of the spaces found in the line
return median_count
def get_spaces_threshold(ycoords, img_for_det) :
## Find Median for setting threshold
medianList = []
for i in range ( 0, len(ycoords)-1 ):
line = img_for_det[range(ycoords[i],ycoords[i+1])]
medianList.append(SpacesMedian(line))
#medianList contains count of each blank columns found in all lines
#including spaces found between each characters too
#find the row among medianList[] with maximum length
max_len = len(medianList[0])
max_in = 0 #for index number
for i in range (0, len(medianList)):
if max_len < len(medianList[i]):
max_len = len(medianList[i])
max_in = i
#sort the row having the maximum no. of elements (decending order)
mList = sorted(medianList[max_in],reverse=True)
#delete elements produced from the page's margin
mList = np.delete(mList, [0,1,2])
#print('mList',mList)
firstItem = mList[0]
for i in range (len(mList)-1, 0, -1):
if mList[i] < firstItem/2:
mList = np.delete(mList,i)
mean = np.mean(mList)
threshold_space = mean/2
return threshold_space
|
import math
import os
from pathlib import Path
from utils.cacti_config import cacti_config
################################################################################
# MEMORY CLASS
#
# This class stores the infromation about a specific memory that is being
# generated. This class takes in a process object, the infromation in one of
# the items in the "sram" list section of the json configuration file, and
# finally runs cacti to generate the rest of the data.
################################################################################
class Memory:
def __init__( self, process, sram_data , output_dir = None, cacti_dir = None):
self.process = process
self.name = str(sram_data['name'])
self.width_in_bits = int(sram_data['width'])
self.depth = int(sram_data['depth'] )
self.num_banks = int(sram_data['banks'] )
self.rw_ports = 1
self.width_in_bytes = math.ceil(self.width_in_bits / 8.0)
self.total_size = self.width_in_bytes * self.depth
if output_dir: # Output dir was set by command line option
p = str(Path(output_dir).expanduser().resolve(strict=False))
self.results_dir = os.sep.join([p, self.name])
else:
self.results_dir = os.sep.join([os.getcwd(), 'results', self.name])
if not os.path.exists( self.results_dir ):
os.makedirs( self.results_dir )
if cacti_dir:
self.cacti_dir = cacti_dir
else:
self.cacti_dir = os.environ['CACTI_BUILD_DIR']
self.__run_cacti()
with open( os.sep.join([self.results_dir, 'cacti.cfg.out']), 'r' ) as fid:
lines = [line for line in fid]
cacti_data = lines[1].split(',')
self.standby_leakage_per_bank_mW = float(cacti_data[11])
self.access_time_ns = float(cacti_data[5])
self.cycle_time_ns = float(cacti_data[6])
self.dynamic_read_power_mW = float(cacti_data[10])
self.aspect_ratio = float(cacti_data[31])
self.area_um2 = float(cacti_data[12])*1e6
self.fo4_ps = float(cacti_data[30])
self.cap_input_pf = float(cacti_data[32])
self.width_um = math.sqrt( self.area_um2 * self.aspect_ratio )
self.height_um = math.sqrt( self.area_um2 / self.aspect_ratio )
self.width_um = (math.ceil((self.width_um*1000.0)/self.process.snapWidth_nm)*self.process.snapWidth_nm)/1000.0
self.height_um = (math.ceil((self.height_um*1000.0)/self.process.snapHeight_nm)*self.process.snapHeight_nm)/1000.0
self.area_um2 = self.width_um * self.height_um
self.pin_dynamic_power_mW = (0.5 * self.cap_input_pf * (float(self.process.voltage)**2))*1e9 ;# P = 0.5*CV^2
self.t_setup_ns = self.access_time_ns ;# access time is clk to Q, assume that data to "reg" is about the same.
self.t_hold_ns = 0
# __run_cacti: shell out to cacti to generate a csv file with more data
# regarding this memory based on the input parameters from the json
# configuration file.
def __run_cacti( self ):
fid = open(os.sep.join([self.results_dir,'cacti.cfg']), 'w')
fid.write( '\n'.join(cacti_config).format( self.total_size
, self.width_in_bytes, self.rw_ports, 0, 0
, self.process.tech_um, self.width_in_bits, self.num_banks ))
fid.close()
odir = os.getcwd()
os.chdir(self.cacti_dir )
cmd = os.sep.join(['.','cacti -infile ']) + os.sep.join([self.results_dir,'cacti.cfg'])
os.system( cmd)
os.chdir(odir)
|
import rasterio
def test_band():
with rasterio.open('tests/data/RGB.byte.tif') as src:
b = rasterio.band(src, 1)
assert b.ds == src
assert b.bidx == 1
assert b.dtype in src.dtypes
assert b.shape == src.shape
|
from django.shortcuts import render, redirect
from django.views import View
from lisibilite.Lisibilite import Lisibilite
class HomePageView(View):
def get(self, request):
"""
The function to handle the HTTP GET request for home page.
:param request: The django HTTP request object
:return The render object
"""
return render(request, 'index.html')
class UserInputPageView(View):
def get(self, request):
"""
The function to handle the HTTP GET request for text input page.
:param request: The django HTTP request object
:return The render object
"""
return render(request, 'userinput.html')
def setOutputValuesToSession(self, request, outputModel):
"""
The function to set the data from output model as session variables.
:param request: The django HTTP request object
:param outputModel: The lisibilite output model
:return None
"""
coreMetrics = outputModel.coreMetrics
readabilityMetrics = outputModel.readabilityMetrics
# Text Category and Description
request.session['textCategory'] = outputModel.getCategory()
request.session['textPurpose'] = outputModel.getDescription()
# Core Metrics
request.session['totalWords'] = coreMetrics.getTotalWords()
request.session['totalSentences'] = coreMetrics.getTotalSentences()
request.session['totalHardWords'] = coreMetrics.getTotalComplexWords()
request.session['totalEasyWords'] = coreMetrics.getTotalEasyWords()
request.session['totalSyllables'] = coreMetrics.getTotalSyllables()
request.session['totalCharacters'] = coreMetrics.getTotalCharacters()
# Readability Metrics
request.session['fresValue'] = readabilityMetrics.getFRES().roundedValue
request.session['fresLabel'] = readabilityMetrics.getFRES().label
request.session['fkglValue'] = readabilityMetrics.getFKGL().roundedValue
request.session['fkglLabel'] = readabilityMetrics.getFKGL().label
request.session['gfiValue'] = readabilityMetrics.getGFI().roundedValue
request.session['gfiLabel'] = readabilityMetrics.getGFI().label
request.session['ariValue'] = readabilityMetrics.getARI().roundedValue
request.session['ariLabel'] = readabilityMetrics.getARI().label
request.session['smogValue'] = readabilityMetrics.getSMOG().roundedValue
request.session['smogLabel'] = readabilityMetrics.getSMOG().label
request.session['cliValue'] = readabilityMetrics.getCLI().roundedValue
request.session['cliLabel'] = readabilityMetrics.getCLI().label
request.session['lwsValue'] = readabilityMetrics.getLWS().roundedValue
request.session['lwsLabel'] = readabilityMetrics.getLWS().label
request.session['fryValue'] = readabilityMetrics.getFRY().roundedValue
request.session['fryLabel'] = readabilityMetrics.getFRY().label
def post(self, request):
"""
The function to handle the HTTP POST request for text input page.
:param request: The django HTTP request object
:return The redirect object
"""
contentString = request.POST.get('textContent', "")
if contentString:
outputModel = Lisibilite(contents=contentString).outputModel
if outputModel is not None:
self.setOutputValuesToSession(request, outputModel)
else:
outputModel = None
return redirect('displaymetrics')
class DisplayScoresPageView(View):
def generateMetricsDict(self, request):
"""
The function to retreive the output metrics from the session object.
:param request: The django HTTP request object
:return The dictionary object containing the metrics.
"""
metricsDict = {}
for key in request.session.keys():
metricsDict[key] = request.session.get(key)
return metricsDict
def get(self, request):
"""
The function to handle the HTTP GET request for metrics display.
:param request: The django HTTP request object
:return The render object
"""
metricsDict = self.generateMetricsDict(request)
return render(request, 'displaymetrics.html', metricsDict)
|
import os
import subprocess
def test_db_import():
import fiftyone.db
assert os.path.isfile(
os.path.join(fiftyone.db.FIFTYONE_DB_BIN_DIR, "mongod")
)
def test_db_exec():
import fiftyone.db
subprocess.check_call(
[os.path.join(fiftyone.db.FIFTYONE_DB_BIN_DIR, "mongod"), "--version"]
)
|
import requests
import json
import geojson
import csv
try:
import urllib3.contrib.pyopenssl
urllib3.contrib.pyopenssl.inject_into_urllib3()
except ImportError:
pass
url = "http://localhost:8080/routing/api/v1/isochrone"
class Point(object):
def __init__(self, lon, lat):
self.lon = lon
self.lat = lat
def __repr__(self):
return "(%f,%f)" % (self.lon, self.lat)
points = []
with open('app/data/enqueteurs.tsv') as fin:
reader = csv.DictReader(fin, delimiter='\t')
for record in reader:
points.append({ "gid": record['gid'], "geom": Point(float(record['lon']), float(record['lat'])) })
def isochrone(pt, distance):
response = requests.get(url, {
"lon": pt.lon,
"lat": pt.lat,
"cost": distance,
"concave": "true" })
if response.status_code == 200:
data = json.loads(response.text)
return data
else:
return response
for i, pt in enumerate(points):
geom = isochrone(pt["geom"], 20)
with open('iso_20m_%s.geojson' % pt["gid"], 'w') as f:
f.write(json.dumps(geom)) |
import os
import click
import dotenv
CONTEXT_SETTINGS = dict(help_option_names=['-h', '--help'])
cli = click.Group(name='vuejs-doc-pdf', help="Build pdf of vue.js guides", context_settings=CONTEXT_SETTINGS)
def project_relative_path(*args):
return os.path.join(os.path.dirname(os.path.realpath(__file__)), *args)
base = project_relative_path()
os.environ['PROJECT_PATH'] = base
dotenv.load_dotenv(project_relative_path('.env'))
vue_js_path = os.environ.get('VUEJS_ORG_PATH')
if not vue_js_path or not os.path.exists(vue_js_path):
raise ValueError('VUEJS_ORG_PATH not found')
html_path = os.path.join(vue_js_path, 'public', 'v2', 'guide')
md_path = os.path.join(vue_js_path, 'src', 'v2', 'guide')
dist_path = project_relative_path('build', 'dist')
# Bash placeholders for help generation
@cli.command(
name='init',
help='Initialize the project'
)
def cmd_init():
pass
@cli.command(
name='install_wkhtmltopdf',
help='Install wkhtmltopdf with qt patch'
)
def cmd_install_wkhtmltopdf():
pass
|
import pulsar as psr
def load_ref_system():
""" Returns triphenylene as found in the IQMol fragment library.
All credit to https://github.com/nutjunkie/IQmol
"""
return psr.make_system("""
C 1.23839 0.71468 -0.00000
C 1.23839 -0.71468 -0.00000
C -0.00026 1.42982 -0.00000
C 2.49296 1.39093 0.00000
C -0.00026 -1.42982 -0.00000
C -1.23813 0.71514 -0.00001
C 2.49296 -1.39092 0.00000
C -0.04190 2.85443 -0.00000
C 3.69209 0.69026 0.00000
C 3.69209 -0.69025 0.00000
C -1.23812 -0.71513 -0.00001
C -0.04190 -2.85443 -0.00000
C -2.45105 1.46351 -0.00001
C -1.24826 3.54257 -0.00001
C -2.45105 -1.46350 -0.00001
C -2.44382 2.85232 -0.00001
C -1.24826 -3.54257 -0.00001
C -2.44382 -2.85231 -0.00001
H 4.63206 1.22780 0.00000
H 4.63206 -1.22779 0.00000
H -1.25273 4.62538 -0.00001
H -3.37933 3.39759 -0.00001
H -1.25273 -4.62538 -0.00001
H -3.37933 -3.39758 -0.00001
H 2.58415 2.45972 0.00000
H 2.58415 -2.45972 0.00000
H 0.83811 3.46781 -0.00000
H 0.83811 -3.46780 -0.00000
H -3.42226 1.00808 -0.00001
H -3.42226 -1.00808 -0.00001
""")
|
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 30 10:24:51 2019
@author: if715029
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import datasets
import sklearn.metrics as skm
import scipy.spatial.distance as sc
#%%
digits = datasets.load_digits()
#%%
ndig = 100
for k in np.arange(ndig):
plt.subplot(np.floor(np.sqrt(ndig)),np.ceil(np.sqrt(ndig)),k+1)
plt.axis('off')
plt.imshow(digits.images[k],cmap=plt.cm.gray_r)
#%% Seleccionar una data de los datos y convertirlos a binarios
# Emparejamiento Simple y Jaccard son únicamente para binarios.
# El set predefinido contiene dígitos de 0 a 15, se cambiarán a 0's y 1's
data = digits['data'][0:30]
umbral = 7
data[data<=umbral] = 0
data[data>umbral] = 1
data = pd.DataFrame(data)
#%% Calcular indices de similitud
cf_m = skm.confusion_matrix(data.iloc[0,:],data.iloc[10,:]) # Matriz de confusión.
sim_simple = skm.accuracy_score(data.iloc[0,:],data.iloc[10,:])
sim_simple_manual = (cf_m[0,0]+cf_m[1,1])/np.sum(cf_m)
#sim_jac = skm.jaccard_similarity_score(data.iloc[0,:],data.iloc[10,:]) # No está habilitado, utiliza el indice de
sim_jac_manual = cf_m[1,1]/(np.sum(cf_m)-cf_m[0,0])
#%%
d1 = sc.hamming(data.iloc[0,:],data.iloc[10,:]) #distancia de emparejamiento simple (1-emparejamiento)
d2 = sc.jaccard(data.iloc[0,:],data.iloc[10,:]) #distancia de emparejamiento de Jaccard.
#%% Calcular todas las combinaciones posibles.
D1 = sc.pdist(data,'hamming')
D1 = sc.squareform(D1)
D2 = sc.pdist(data,'jaccard')
D2 = sc.squareform(D2)
|
import matplotlib.pyplot as plt
import numpy as np
"""
def imgmul(y):
z3=np.zeros(2,dtype=int)
z3[0,0]=y[0,0]*y[0,0]+y[1,1]*y[1,1]
z3[0,1]=y[0,0]*y[0,1]+z1[0,1]*z2[0,0]
return z3
def E(x):
return x*x
def DE_Dx(x):
return 2*x
x=10
Er=[E(x)]
num_iter=1000
lamda=0.001
for i in range(num_iter):
x=x-lamda*DE_Dx(x)
e=E(x)
Er.append(e)
plt.plot(Er)
"""
#input z=(x+iy) ->(1,2) gt(t)=(1,2)
def Er_im(z,t):
var=np.zeros((1,2))
r=(z[0][0]-t[0][0])*(z[0][0]-t[0][0])-(z[0][1]-t[0][1])*(z[0][1]-t[0][1])
im= 2*(z[0][0]-t[0][0])*(z[0][1]-t[0][1])
var[0][0]=r
var[0][1]=im
return var
def Er_ours(z,t):
e=np.mean(np.abs(z-t))
return e
def DE_DZ(z,t):
var=np.zeros((1,2))
r=2*(z[0][0]-t[0][0])
im=2*(z[0][1]-t[0][1])
var[0][0]=r
var[0][1]=im
return var
z=np.array([[100,-200]])
t=np.array([[3,2]])
Er=[Er_im(z,t)]
Er_mse=[Er_ours(z,t)]
num_iter=10000
lamda=0.001
for i in range(num_iter):
z=z-lamda*DE_DZ(z,t)
e=Er_im(z,t)
Er.append(e)
Er_mse.append(Er_ours(z,t))
Er=np.array(Er)
Er_mse=np.array(Er_mse)
plt.figure("real -img")
plt.plot(Er[:,0,1])
plt.plot(Er[:,0,0])
plt.figure("ours error")
plt.plot(Er_mse)
plt.show()
|
# Copyright 2017-2021 Lawrence Livermore National Security, LLC and other
# Hatchet Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: MIT
import pstats
import sys
import pandas as pd
import hatchet.graphframe
from hatchet.node import Node
from hatchet.graph import Graph
from hatchet.frame import Frame
def print_incomptable_msg(stats_file):
"""
Function which makes the syntax cleaner in Profiler.write_to_file().
"""
errmsg = """\n Error: Incompatible pstats file ({})\n Please run your code in Python {} to read in this file. \n"""
if sys.version_info[0] == 2:
print(errmsg.format(stats_file, 3))
if sys.version_info[0] == 3:
print(errmsg.format(stats_file, 2.7))
class StatData:
""" Faux Enum for python """
NUMCALLS = 0
NATIVECALLS = 1
EXCTIME = 2
INCTIME = 3
SRCNODE = 4
class NameData:
""" Faux Enum for python """
FILE = 0
LINE = 1
FNCNAME = 2
class CProfileReader:
def __init__(self, filename):
self.pstats_file = filename
self.name_to_hnode = {}
self.name_to_dict = {}
def _create_node_and_row(self, fn_data, fn_name, stats_dict):
"""
Description: Takes a profiled function as specified in a pstats file
and creates a node for it and adds a new line of metadata to our
dataframe if it does not exist.
"""
u_fn_name = "{}:{}:{}".format(
fn_name,
fn_data[NameData.FILE].split("/")[-1],
fn_data[NameData.LINE],
)
fn_hnode = self.name_to_hnode.get(u_fn_name)
if not fn_hnode:
# create a node if it doesn't exist yet
fn_hnode = Node(Frame({"type": "function", "name": fn_name}), None)
self.name_to_hnode[u_fn_name] = fn_hnode
# lookup stat data for source here
fn_stats = stats_dict[fn_data]
self._add_node_metadata(u_fn_name, fn_data, fn_stats, fn_hnode)
return fn_hnode
def _get_src(self, stat):
"""Gets the source/parent of our current desitnation node"""
return stat[StatData.SRCNODE]
def _add_node_metadata(self, stat_name, stat_module_data, stats, hnode):
"""Puts all the metadata associated with a node in a dictionary to insert into pandas."""
node_dict = {
"file": stat_module_data[NameData.FILE],
"line": stat_module_data[NameData.LINE],
"name": stat_module_data[NameData.FNCNAME],
"numcalls": stats[StatData.NUMCALLS],
"nativecalls": stats[StatData.NATIVECALLS],
"time (inc)": stats[StatData.INCTIME],
"time": stats[StatData.EXCTIME],
"node": hnode,
}
self.name_to_dict[stat_name] = node_dict
def create_graph(self):
"""Performs the creation of our node graph"""
try:
stats_dict = pstats.Stats(self.pstats_file).__dict__["stats"]
except ValueError:
print_incomptable_msg(self.pstats_file)
raise
list_roots = []
# We iterate through each function/node in our stats dict
for dst_module_data, dst_stats in stats_dict.items():
dst_name = dst_module_data[NameData.FNCNAME]
dst_hnode = self._create_node_and_row(dst_module_data, dst_name, stats_dict)
# get all parents of our current destination node
# create source nodes and link with destination node
srcs = self._get_src(dst_stats)
if srcs == {}:
list_roots.append(dst_hnode)
else:
for src_module_data in srcs.keys():
src_name = src_module_data[NameData.FNCNAME]
if src_name is not None:
src_hnode = self._create_node_and_row(
src_module_data, src_name, stats_dict
)
dst_hnode.add_parent(src_hnode)
src_hnode.add_child(dst_hnode)
return list_roots
def read(self):
roots = self.create_graph()
graph = Graph(roots)
graph.enumerate_traverse()
dataframe = pd.DataFrame.from_dict(data=list(self.name_to_dict.values()))
index = ["node"]
dataframe.set_index(index, inplace=True)
dataframe.sort_index(inplace=True)
return hatchet.graphframe.GraphFrame(graph, dataframe, ["time"], ["time (inc)"])
|
from selenium import webdriver
from time import sleep
from PIL import Image
from requests import get
from io import BytesIO
from itertools import count
from sys import argv
from bs4 import BeautifulSoup
if len(argv) == 1:
print('No arguments, aborting...')
exit()
elif not argv[1].startswith('https://www.scribd.com/document/'):
print('The argument that you passed is invalid!')
print('It must begin with "https://www.scribd.com/document/"')
exit()
else:
URL = argv[1]
options = webdriver.FirefoxOptions()
options.add_argument('--headless')
driver = webdriver.Firefox(firefox_options=options)
driver.get(URL)
pages = []
for i in count(1):
try:
page = driver.find_element_by_id('outer_page_'+str(i))
except Exception as e:
print(e)
break
soup = BeautifulSoup(page.get_attribute('innerHTML'), 'html.parser')
page = soup.select('.absimg')[0]['src']
pages.append(page)
print(page)
sleep(0.3)
driver.execute_script("scroller = window.document.querySelector('.document_scroller');scroller.scrollBy(0, 2000);")
if len(pages) == 0:
print('Could not find any image, aborting.')
exit()
imgs = []
for i, page in enumerate(pages):
response = get(page)
img = Image.open(BytesIO(response.content))
imgs.append(img)
imgs[0].save((URL.split('/')[-1] + '.pdf'), save_all=True, append_images=imgs[1:]) |
from shserver.shserver import ShortingServer
if __name__ == "__main__":
server = ShortingServer("0.0.0.0", 8000)
server.run()
|
# percol configuration
# https://github.com/mooz/percol
# percol.view.PROMPT = ur"<bold><yellow>X / _ / X</yellow></bold> %q"
# percol.view.PROMPT = ur"<cyan>Input:</cyan> %q"
# Change prompt in response to the status of case sensitivity
percol.view.__class__.PROMPT = property(
lambda self:
ur"<bold><yellow>X / _ / X</yellow> [a]</bold> %q" if percol.model.finder.case_insensitive
else ur"<bold><green>X / _ / X</green> [A]</bold> %q"
)
# Display finder name in RPROMPT
percol.view.prompt_replacees["F"] = lambda self, **args: self.model.finder.get_name()
percol.view.RPROMPT = ur"(%F) [%i/%I]"
# Customizing colors
percol.view.CANDIDATES_LINE_BASIC = ("on_default", "default")
percol.view.CANDIDATES_LINE_SELECTED = ("bold", "on_blue", "white")
percol.view.CANDIDATES_LINE_MARKED = ("bold", "on_cyan", "black")
percol.view.CANDIDATES_LINE_QUERY = ("yellow", "bold")
# Emacs like
percol.import_keymap({
"C-h" : lambda percol: percol.command.delete_backward_char(),
"C-d" : lambda percol: percol.command.delete_forward_char(),
"C-k" : lambda percol: percol.command.kill_end_of_line(),
"C-y" : lambda percol: percol.command.yank(),
"C-a" : lambda percol: percol.command.beginning_of_line(),
"C-e" : lambda percol: percol.command.end_of_line(),
"C-b" : lambda percol: percol.command.backward_char(),
"C-f" : lambda percol: percol.command.forward_char(),
"C-n" : lambda percol: percol.command.select_next(),
"C-p" : lambda percol: percol.command.select_previous(),
"C-v" : lambda percol: percol.command.select_next_page(),
"M-v" : lambda percol: percol.command.select_previous_page(),
"M-<" : lambda percol: percol.command.select_top(),
"M->" : lambda percol: percol.command.select_bottom(),
"C-x" : lambda percol: percol.command.toggle_mark_all(),
"C-m" : lambda percol: percol.finish(),
"C-j" : lambda percol: percol.finish(),
"C-g" : lambda percol: percol.cancel(),
})
# Matching method can be switched dynamically (at run time) by executing
# percol.command.specify_finder(FinderClass) or percol.command.toggle_finder(FinderClass).
# In addition, percol.command.specify_case_sensitive(case_sensitive) and
# percol.command.toggle_case_sensitive() change the matching status of case
# sensitivity.
from percol.finder import FinderMultiQueryRegex
percol.import_keymap({
"M-c" : lambda percol: percol.command.toggle_case_sensitive(),
"M-r" : lambda percol: percol.command.toggle_finder(FinderMultiQueryRegex)
})
|
from .base import ObjectList, Object
__all__ = ['UserList', 'User']
class UserList(ObjectList):
get_instance_cls = lambda self: User
class User(Object):
@property
def name(self):
return self.fullname
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""gpx2geojson: GPX to GeoJSON / KML converter.
GUI application for merging kashmir3d-generated multiple
GPX files into a single file, decimating track points,
and converting into a GeoJSON or KML file,
both of which are specified in
https://maps.gsi.go.jp/development/sakuzu_siyou.html.
Originally written in Perl, and rewritten in Python3.
"""
import os
from tkinter import *
from tkinter import filedialog, messagebox
from tkinter.ttk import *
from config import Config
from gpx2geojson_cli import convert
VERSION = '2.0'
class App(Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.configure(padding=5)
self.pack()
self.cf = Config()
self.cf.load()
self.line_style = StringVar(value=self.cf.line_style)
self.line_size = StringVar(value=self.cf.line_size)
self.opacity = StringVar(value=self.cf.opacity)
self.xt_state = StringVar(value=self.cf.xt_state)
self.xt_error = StringVar(value=self.cf.xt_error)
self.outext = StringVar(value=self.cf.outext)
self.outfile = StringVar(value='')
self.n_point = StringVar(value='')
self.gpxfiles = None
self.xt = None
self.create_widgets()
def save_config(self):
"""save configuration parameters."""
self.cf.line_style = self.line_style.get()
self.cf.line_size = self.line_size.get()
self.cf.opacity = self.opacity.get()
self.cf.xt_state = self.xt_state.get()
self.cf.xt_error = self.xt_error.get()
self.cf.outext = self.outext.get()
self.cf.save()
def create_widgets(self):
"""create gui widgets."""
l = Label(self, text='GPX→GeoJSONコンバータ Ver.'+VERSION)
l.grid(row=0, column=0, columnspan=5)
l = Label(self, text='GPXファイル')
l.grid(row=1, column=0, sticky='e')
l = Label(self, text='出力形式')
l.grid(row=4, column=0, sticky='e')
l = Label(self, text='出力ファイル')
l.grid(row=5, column=0, sticky='e')
l = Label(self, text='変換設定', anchor='center')
l.grid(row=6, column=1, sticky='ew')
l = Label(self, text='線の透過率')
l.grid(row=7, column=0, sticky='e')
l = Label(self, text='線種')
l.grid(row=8, column=0, sticky='e')
l = Label(self, text='線幅')
l.grid(row=9, column=0, sticky='e')
l = Label(self, text='許容誤差[km]')
l.grid(row=7, column=2, sticky='e')
l = Label(self, text='変換結果情報', anchor='center')
l.grid(row=8, column=3, sticky='ew')
l = Label(self, text='軌跡点数')
l.grid(row=9, column=2, sticky='e')
# GPXファイル
f = Frame(self)
f.grid(row=1, column=1, rowspan=3, columnspan=3, sticky='nsew')
l = Listbox(f, width=75, height=3, selectmode='single')
b = Scrollbar(f, orient='vertical')
l['yscrollcommand'] = b.set
b['command'] = l.yview
l.pack(side='left', fill='y')
b.pack(side='right', fill='y')
self.gpxfiles = l
# 追加 除外 クリア
b = Button(self, text='←追加')
b['command'] = self.append_to_list
b.grid(row=1, column=4, sticky='ew')
b = Button(self, text='除外')
b['command'] = self.remove_from_list
b.grid(row=2, column=4, sticky='ew')
b = Button(self, text='クリア')
b['command'] = self.clear_list
b.grid(row=3, column=4, sticky='ew')
# 出力形式
f = Frame(self, borderwidth=2, padding=0, relief='sunken')
f.grid(row=4, column=1, sticky='nsew')
formats = [['GPX', '.gpx'], ['KML', '.kml'], ['GeoJSON', '.geojson']]
for key, value in formats:
b = Radiobutton(f, text=key, value=value)
b['variable'] = self.outext
b.pack(side='left')
# 出力ファイル
e = Entry(self)
e['textvariable'] = self.outfile
e.grid(row=5, column=1, columnspan=3, sticky='nsew')
b = Button(self, text='選択')
b['command'] = self.select_savefile
b.grid(row=5, column=4, sticky='ew')
# 線の透過率
b = Spinbox(self, format='%3.1f', from_=0.0, to=1.0, increment=0.1)
b['textvariable'] =self.opacity
b.grid(row=7, column=1, sticky='nsew')
# 線種
f = Frame(self, borderwidth=2, padding=0, relief='sunken')
# NOTE: for win10, if padding is not specified, inner widgets overlap the border.
f.grid(row=8, column=1, sticky='nsew')
styles = [['GPX', '0'], ['実線', '1'], ['破線', '11'], ['点線', '13']]
for key, value in styles:
b = Radiobutton(f, text=key, value=value)
b['variable'] = self.line_style
b.pack(side='left')
# 線幅
f = Frame(self, borderwidth=2, padding=0, relief='sunken')
f.grid(row=9, column=1, sticky='nsew')
sizes = [['GPX', '0'], [' 1pt', '1'], [' 3pt', '3'], [' 5pt', '5']]
for key, value in sizes:
b = Radiobutton(f, text=key, value=value)
b['variable'] = self.line_size
b.pack(side='left')
# 許容誤差
b = Spinbox(self, format='%5.3f', from_=0.001, to=9.999, increment=0.001)
b['textvariable'] = self.xt_error
b.grid(row=7, column=3, sticky='nsew')
self.xt = b
b = Checkbutton(self, text='軌跡を間引く', onvalue='1', offvalue='0')
b['variable'] = self.xt_state
b['command'] = self.set_xt
b.grid(row=6, column=3, sticky='w')
self.set_xt()
# 軌跡点数
e = Entry(self, state='readonly', foreground='blue')
e['textvariable'] = self.n_point
e.grid(row=9, column=3, sticky='ew')
# 変換
b = Button(self, text='変換')
b['command'] = self.conv
b.grid(row=10, column=1)
# 終了
b = Button(self, text='終了')
b['command'] = self.quit
b.grid(row=10, column=4)
def append_to_list(self):
ret = filedialog.askopenfilenames(
filetypes=[('GPXファイル', '*.gpx'), ('', '*')],
initialdir=self.cf.indir
)
for path in ret:
self.gpxfiles.insert('end', path)
self.cf.indir = os.path.dirname(path)
def remove_from_list(self):
i = self.gpxfiles.curselection()
self.gpxfiles.delete(i)
def clear_list(self):
self.gpxfiles.delete(0, 'end')
def select_savefile(self):
ext = self.outext.get()
nam = { '.geojson': 'GeoJSON', '.kml': 'KML', '.gpx': 'GPX' }
ret = filedialog.asksaveasfilename(
filetypes=[(nam[ext] + 'ファイル', '*' + ext), ('', '*')],
initialdir=self.cf.indir,
initialfile='routemap'+ext
)
if ret:
self.outfile.set(ret)
def set_xt(self):
self.xt.configure(state='normal' if self.xt_state.get() != '0' else 'disabled')
def conv(self):
args = list(self.gpxfiles.get(0, 'end'))
if len(args) == 0:
messagebox.showwarning(title='警告', message='GPXファイルが未設定')
return
outfile = self.outfile.get()
if not outfile:
messagebox.showwarning(title='警告', message='出力ファイルが未設定')
return
n_point = convert(args, outfile,
xt_state=self.xt_state.get(),
xt_error=self.xt_error.get(),
outext=self.outext.get(),
line_size=self.line_size.get(),
line_style=self.line_style.get(),
opacity=self.opacity.get()
)
if n_point < 0:
messagebox.showerror(title='エラー', message='変換に失敗しました')
return
self.n_point.set(str(n_point))
self.master.update()
self.cf.outdir = os.path.dirname(outfile)
messagebox.showinfo(title='成功', message='変換結果を'+outfile+'に出力しました')
def quit(self):
self.save_config()
self.master.destroy()
def main():
root = Tk()
root.title('GPX2GeoJSON')
root.resizable(False, False)
app = App(master=root)
app.mainloop()
if __name__ == '__main__':
main()
# __END__
|
import statsmodels.api as sm
import statsmodels.tsa as tsa
import scipy.stats as stats
import numpy as np
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from statsmodels.stats.outliers_influence import variance_inflation_factor
from statsmodels.tools.tools import add_constant
import statsmodels.formula.api as smf
import statsmodels.api as sm
import statsmodels.tsa as tsa
import scipy.stats as stats
from sklearn.preprocessing import StandardScaler
from sklearn import preprocessing
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.model_selection import train_test_split, KFold, LeaveOneOut, StratifiedKFold, cross_validate, cross_val_score, cross_val_predict
from sklearn.linear_model import ElasticNetCV, ElasticNet, RidgeCV, LassoCV, Ridge, Lasso, LinearRegression
from sklearn.model_selection import RandomizedSearchCV, StratifiedKFold, RepeatedKFold
class stepwise_model(object):
def __init__(self,
alpha_lower = 0.88,
alpha_upper = 0.98,
decay_lower = 0.0,
decay_upper = 1,
lag_lower = 0,
lag_upper = 15,
beta = 500000,
n_fold = 5):
self.alpha_lower = alpha_lower
self.alpha_upper = alpha_upper
self.decay_lower = decay_lower
self.decay_upper = decay_upper
self.lag_lower = lag_lower
self.lag_upper = lag_upper
self.beta = beta
self.n_fold = n_fold
# create leg function
def lag_function(self, data, column_name, lag_num):
'''
data: dataframe
column_name: column to perform lag transformation
lag_num: depends on the data, if the data is at weekly level,the lag_num is number of week lag
'''
column_name1=column_name+'_lag_'+str(lag_num)
data[column_name1]=data[column_name].shift(lag_num).fillna(0)
return column_name1
# create decay function
def transform_decay(self, data, column_name, adstock_factor):
column_name_adstock=column_name+'_decay_'+str(adstock_factor).replace('.','')
data[column_name_adstock]=tsa.filters.filtertools.recursive_filter(data[column_name],adstock_factor)
return column_name_adstock
# create S-Curve transform function
def transform_s_curve (self, data, column_name, alpha, beta):
media_input_index=data[column_name]/np.max(data[column_name]) * 100
beta1=np.float(beta*(10**-10))
column_name1=str(column_name)+'_alpha_'+str(alpha).replace('.','')
data[column_name1]=beta1**(alpha**media_input_index)
return column_name1
def stepwise_model(self, data, target_col, feature_col, media_var):
"""
INPUTS:
- data: Design Matrix as a pd.DataFrame
- target_col: response variable
- feature_col: a list of all dependent variables
- media_var: a list of media variables (these variables will be transformed automatically using the three functions above)
OUTPUTS:
1. model_result: coef of each variables
2. cv_result: cross validation result
3. X_t: transformed media variable with optimal parameter
4. model_module: sklearn RidgeCV object
5. optimal_param: pd.DataFrame contains the optimal alpha, decay, and lag for each media variable.
"""
### S-Curve Parameter
alpha_lower_bound = self.alpha_lower
alpha_upper_bound = self.alpha_upper
alpha_step = 0.01
beta = self.beta
### decay/adstock Parameter
decay_lower_bound = self.decay_lower
decay_upper_bound = self.decay_upper
decay_step=0.1
### lag Parameter
lag_lower_bound = self.lag_lower
lag_upper_bound = self.lag_upper
lag_step=1
X_t, y = data[feature_col], data[target_col]
print(f"Features:{feature_col}")
print("\n")
print("#"*80)
print("finding the best parameters.....")
# S curve optimization
curve_optimized=pd.DataFrame(columns=['mse','score','coef','candidates'])
curve_log=pd.DataFrame()
for curve_var in media_var:
curve_list=[]
for alpha_numb in np.around(np.arange(alpha_lower_bound, alpha_upper_bound, alpha_step),decimals=4):
curve_list.append(transform_s_curve(data, curve_var, alpha_numb, beta))
scores_with_candidates_s_curve = []
for candidate in curve_list:
candidate_removed=list([i for i in media_var if i not in curve_var])
fit_list = candidate_removed +[var for var in feature_col if var not in media_var]
fit_list.append(candidate)
X_t = data[fit_list]
y = data[target]
lasso=LassoCV(cv=5 ,fit_intercept=True, normalize=True).fit(X_t,y)
df_coef=pd.DataFrame(list(zip(data[fit_list].columns, lasso.coef_)),columns=['variables','coef'])
coef=np.round(df_coef[df_coef['variables']==candidate].iloc[0]['coef'], decimals=6)
y_pred = lasso.predict(X_t)
mse=mean_squared_error(y_pred, y)
score=lasso.score(X_t, y)
scores_with_candidates_s_curve.append((mse, score, coef, candidate))
alpha = alpha_numb
curve_result = pd.DataFrame(scores_with_candidates_s_curve,columns=['mse','score','coef','candidates'])
final_pick_curve = curve_result[curve_result.mse==curve_result.mse.min()]
curve_log = curve_log.append(curve_result)
curve_optimized = curve_optimized.append(final_pick_curve[final_pick_curve['mse']==final_pick_curve['mse'].min()].iloc[0])
print(list(curve_optimized['candidates']))
pass
curve_selected = list(curve_optimized['candidates'])
# parsing out the optimal alpha by variable from the column name
curve_optimized['alpha']=curve_optimized['candidates'].map(lambda x:x.split('alpha_')[-1] if len(x.split('alpha_')[-1])>2 else x.split('alpha_')[-1]+'0').astype(int)/100
curve_optimized['var']=curve_optimized['candidates'].map(lambda x:x.split('_alpha')[0])
# decay optimization
decay_optimized=pd.DataFrame(columns=['score','coef','candidates'])
decay_log=pd.DataFrame()
for curve_opt_var in curve_selected:
decay_list=[]
for decay_numb in np.around(np.arange(decay_lower_bound,decay_upper_bound,decay_step),decimals=4):
decay_list.append(transform_decay(data,curve_opt_var,decay_numb))
scores_with_candidates_decay = []
for candidate in decay_list:
candidate_removed=list([i for i in media_var if i not in curve_opt_var])
fit_list=candidate_removed + [var for var in feature_col if var not in media_var]
fit_list.append(candidate)
X_t=data[fit_list]
y=data[target]
lasso=LassoCV(cv=5, fit_intercept=True, normalize=True).fit(X_t,y)
df_coef=pd.DataFrame(list(zip(data[fit_list].columns, lasso.coef_)),columns=['variables','coef'])
coef=np.round(df_coef[df_coef['variables']==candidate].iloc[0]['coef'], decimals=6)
y_pred = lasso.predict(X_t)
mse = mean_squared_error(y_pred, y)
score = lasso.score(X_t,y)
scores_with_candidates_decay.append((mse, score, coef, candidate))
decay_result = pd.DataFrame(scores_with_candidates_decay, columns=['mse','score','coef','candidates'])
final_pick_decay = decay_result[(decay_result.coef >= 0)]
decay_log=decay_log.append(decay_result)
decay_optimized = decay_optimized.append(final_pick_decay[final_pick_decay['mse']==final_pick_decay['mse'].min()].iloc[0])
print(list(decay_optimized['candidates']))
pass
decay_selected = list(decay_optimized['candidates'])
# lag optimization
lag_optimized=pd.DataFrame(columns=['score','coef','candidates'])
lag_log=pd.DataFrame()
for decay_opt_var in decay_selected:
lag_list=[]
for lag in np.arange(lag_lower_bound,lag_upper_bound,lag_step):
lag_list.append(lag_function(data,decay_opt_var,lag))
scores_with_candidates_lag = []
for candidate in lag_list:
candidate_removed=list([i for i in media_var if i not in decay_opt_var])
fit_list=candidate_removed + [var for var in feature_col if var not in media_var]
fit_list.append(candidate)
X_t=data[fit_list]
y = data[target]
lasso=LassoCV(cv=5, fit_intercept=True, normalize=True).fit(X_t,y)
df_coef=pd.DataFrame(list(zip(data[fit_list].columns, lasso.coef_)),columns=['variables','coef'])
coef=np.round(df_coef[df_coef['variables']==candidate].iloc[0]['coef'], decimals=6)
y_pred = lasso.predict(X_t)
mse = mean_squared_error(y_pred, y)
score = lasso.score(X_t,y)
scores_with_candidates_lag.append((mse, score, coef, candidate))
lag_result = pd.DataFrame(scores_with_candidates_lag,columns=['mse','score','coef','candidates'])
final_pick_lag = lag_result[(lag_result.coef >= 0)]
lag_log=lag_log.append(lag_result)
lag_optimized = lag_optimized.append(final_pick_lag[final_pick_lag['mse']==final_pick_lag['mse'].min()].iloc[0])
print(list(lag_optimized['candidates']))
pass
lag_selected = list(lag_optimized['candidates'])
final_features = lag_selected + [var for var in feature_col if var not in media_var]
###### Coefficient
X_t= data[final_features]
# repeated KFold - split dataset into 3 folds and repeat 5 times with different randomization in each repetition
kf=RepeatedKFold(n_splits = self.n_fold, n_repeats= 2, random_state=666)
y = data[target]
ridge_model = RidgeCV(cv=kf, fit_intercept=True, normalize=True , scoring='neg_mean_squared_error').fit(X= X_t,y= y)
y_train_pred = ridge_model.predict(X_t)
y_train = y
# model coefficient
result=pd.DataFrame(list(zip(data[final_features].columns, ridge_model.coef_.ravel())),columns=['variables','coef'])
intercept=[]
intercept.append(('intercept', ridge_model.intercept_))
df_intercept=pd.DataFrame(intercept,columns=['variables','coef'])
model_result=result.append(df_intercept).reset_index(drop=True)
print("\n")
print("#"*80)
print("Model Coefficient")
print(model_result)
###### Cross-validation
# repeated k-fold validation
scorings = ['r2', 'neg_mean_absolute_error']
scores = cross_validate(ridge_model,
X_t,
y.fillna(0).values.ravel(),
cv = kf,
scoring = scorings,
return_train_score = True)
cv_results=pd.DataFrame(scores).reset_index()
cv_results['test_neg_mean_absolute_error']=abs(cv_results['test_neg_mean_absolute_error'])
cv_results['train_neg_mean_absolute_error']=abs(cv_results['train_neg_mean_absolute_error'])
cv_results['test_mape']=cv_results['test_neg_mean_absolute_error']/np.average(y)
cv_results['train_mape']=cv_results['train_neg_mean_absolute_error']/np.average(y)
average={'metrics': list(cv_results.columns), 'avg' : list(cv_results.mean())}
df_cv_results=pd.DataFrame(average)
print("\n")
print("#"*80)
print("Cross validation")
print(df_cv_results)
print("\n")
print("#"*80)
print("Variable VIF")
print(get_vif(X_t, data))
# get optimal alpha, decay and lag
transformed_mkt_var = [val for val in final_features if 'SPEND' in val]
alpha_list = [float(val.split("alpha_")[1].split("_decay")[0])/100 for val in transformed_mkt_var]
decay_list = [float(val.split("_decay_")[1].split("_lag")[0])/10 for val in transformed_mkt_var]
lag_list = [int(val.split("lag_")[1].split("_lag")[0]) for val in transformed_mkt_var]
optimal_param = pd.DataFrame({'var_name': media_var,
'alpha': alpha_list,
'decay': decay_list,
'lag': lag_list})
return model_result, df_cv_results, X_t, ridge_model, optimal_param
|
# -*- coding: utf-8 -*-
import torch
def minibatch_stddev_layer(x, group_size=4):
"""Appends a feature map containing the standard deviation of the minibatch.
Note:
Implemented as described in `this paper <https://arxiv.org/pdf/1710.10196.pdf>`_.
`Reference <https://github.com/tkarras/progressive_growing_of_gans/blob/master/networks.py#L127-L139>`_.
""" # noqa: E501
# Minibatch must be divisible by (or smaller than) group_size.
group_size = min(group_size, x.shape[0])
s = x.shape # [NCHW] Input shape.
# [GMCHW] Split minibatch into M groups of size G.
y = x.reshape(group_size, -1, s[1], s[2], s[3])
y = y.float() # [GMCHW] Cast to FP32.
y -= y.mean(axis=0, keepdims=True) # [GMCHW] Subtract mean over group.
y = y.pow(2).mean(axis=0) # [MCHW] Calc variance over group.
y = (y + 1e-8).sqrt() # [MCHW] Calc stddev over group.
# [M111] Take average over fmaps and pixels.
y = y.mean(axis=[1, 2, 3], keepdims=True)
y = y.to(x.dtype) # [M111] Cast back to original data type.
y = y.repeat(group_size, 1, s[2], s[3]) # [N1HW] Replicate over group and pixels.
return torch.cat([x, y], axis=1) # [NCHW] Append as new fmap.
|
import sys
sys.path.append('')
from src.parser.queryExecutor import Executor
import json
class Interface():
def __init__(self):
self._executor = Executor()
def add_index(self, query):
return self._executor.execute_indexing(query)
def add_figures(self, data):
queries = []
for d in data:
x = round(d['x'], 2)
y = round(d['y'], 2)
query = "CREATE (node:Figure {x: %f, y: %f}) RETURN node" % (x, y)
queries.append(query)
return self._executor.execute_creation(queries)
def add_figure(self, query):
return self._executor.execute_creation([query])
def add_relationship(self, query):
return self._executor.add_relationship(query)
def _check_existence(self, x, y):
query = "MATCH (node:Figure {x: %f, y: %f} RETURN node)" % (x, y)
nodes = self._executor.execute_getting(query, checkExistence=True)
if nodes is not None:
return True
else:
return False
def get_figures(self, query):
return self._executor.execute_getting(query)
def remove_node(self, query):
return self._executor.execute_removing(query) |
"""Define an object to deal with trash/recycling data."""
import asyncio
from collections import OrderedDict
from datetime import datetime
from enum import Enum
from typing import Awaitable, Callable, Dict
from urllib.parse import quote_plus
from aiocache import cached
import pytz as tz
from ics import Calendar
from geocoder import google
from geocoder.google_reverse import GoogleReverse
from .errors import PydenError
CALENDAR_URL = (
"https://recollect.a.ssl.fastly.net/api/places/{0}/services/" "{1}/events.en-US.ics"
)
PLACE_LOOKUP_URL = (
"https://recollect.net/api/lookup/{0},{1}.json?"
"service={2}&address={3}&locale={4}&postal_code={5}&"
"street_number={6}&street_name={7}&subpremise=&locality={8}&"
"territory={9}&country={10}"
)
DEFAULT_CACHE_SECONDS = 60 * 60 * 24 * 7 * 4 * 1
DEFAULT_LOCALE = "en-US"
DEFAULT_SERVICE_ID = 248
DEFAULT_TIMEZONE = tz.timezone("America/Denver")
def raise_on_invalid_place(func: Callable) -> Callable:
"""Raise an exception when a place ID hasn't been set."""
async def decorator(self, *args: list, **kwargs: dict) -> Awaitable:
"""Decorate."""
if not self.place_id:
raise PydenError("No Recollect place ID given")
return await func(self, *args, **kwargs)
return decorator
class Trash:
"""Define the client."""
class PickupTypes(Enum):
"""Define an enum for presence states."""
compost = "Compost"
extra_trash = "Extra Trash"
recycling = "Recycling"
trash = "Trash"
def __init__(
self, request: Callable[..., Awaitable], loop: asyncio.AbstractEventLoop
) -> None:
"""Initialize."""
self._loop = loop
self._request = request
self.place_id = None
@staticmethod
def _get_geo_data(
latitude: float, longitude: float, google_api_key: str
) -> GoogleReverse:
"""Return geo data from a set of coordinates."""
return google([latitude, longitude], key=google_api_key, method="reverse")
async def init_from_coords(
self, latitude: float, longitude: float, google_api_key: str
) -> None:
"""Initialize the client from a set of coordinates."""
geo = await self._loop.run_in_executor(
None, self._get_geo_data, latitude, longitude, google_api_key
)
lookup = await self._request(
"get",
PLACE_LOOKUP_URL.format(
latitude,
longitude,
DEFAULT_SERVICE_ID,
quote_plus(
"{0} {1}, {2}, {3}, {4}".format(
geo.housenumber,
geo.street_long,
geo.city,
geo.state_long,
geo.country_long,
)
),
DEFAULT_LOCALE,
geo.postal,
geo.housenumber,
quote_plus(geo.street_long),
quote_plus(geo.city),
quote_plus(geo.state_long),
quote_plus(geo.country_long),
),
)
try:
self.place_id = lookup["place"]["id"]
except (KeyError, TypeError):
raise PydenError("Unable to find Recollect place ID")
@raise_on_invalid_place
async def next_pickup(self, pickup_type: Enum) -> datetime: # type: ignore
"""Figure out the next pickup date for a particular type."""
schedule = await self.upcoming_schedule()
for date, pickups in schedule.items():
if pickups[pickup_type]:
return date
@cached(ttl=DEFAULT_CACHE_SECONDS)
@raise_on_invalid_place
async def upcoming_schedule(self) -> Dict[datetime, Dict[Enum, bool]]:
"""Get the upcoming trash/recycling schedule for the location."""
events = OrderedDict() # type: dict
resp = await self._request(
"get", CALENDAR_URL.format(self.place_id, DEFAULT_SERVICE_ID), kind="text"
)
calendar = Calendar(resp)
now = DEFAULT_TIMEZONE.localize(datetime.now())
for event in calendar.events:
pickup_date = event.begin.datetime.replace(tzinfo=DEFAULT_TIMEZONE)
if now <= pickup_date:
title = event.name.lower()
if "trash" in title:
events[pickup_date] = {
self.PickupTypes.compost: "compost" in title,
self.PickupTypes.extra_trash: "extra trash" in title,
self.PickupTypes.recycling: "recycl" in title,
self.PickupTypes.trash: "trash" in title,
}
return OrderedDict(sorted(events.items(), reverse=False))
|
import argparse
import json
import logging
import sys
import time
from datetime import datetime
import paho.mqtt.client as client
import paho.mqtt.publish as publish
from Sensors import Sensors
from getmac import get_mac_address
from pytz import timezone
# Author: Gary A. Stafford
# Date: 10/11/2020
# Usage: python3 sensor_data_to_mosquitto.py \
# --host "192.168.1.12" --port 1883 \
# --topic "sensor/output" --frequency 10
sensors = Sensors()
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG)
def main():
args = parse_args()
publish_message_to_db(args)
def get_readings():
sensors.led_state(0)
# Retrieve sensor readings
payload_dht = sensors.get_sensor_data_dht()
payload_gas = sensors.get_sensor_data_gas()
payload_light = sensors.get_sensor_data_light()
payload_motion = sensors.get_sensor_data_motion()
message = {
"device_id": get_mac_address(),
"time": datetime.now(timezone("UTC")),
"data": {
"temperature": payload_dht["temperature"],
"humidity": payload_dht["humidity"],
"lpg": payload_gas["lpg"],
"co": payload_gas["co"],
"smoke": payload_gas["smoke"],
"light": payload_light["light"],
"motion": payload_motion["motion"]
}
}
return message
def date_converter(o):
if isinstance(o, datetime):
return o.__str__()
def publish_message_to_db(args):
while True:
message = get_readings()
message_json = json.dumps(message, default=date_converter, sort_keys=True,
indent=None, separators=(',', ':'))
logger.debug(message_json)
try:
publish.single(args.topic, payload=message_json, qos=0, retain=False,
hostname=args.host, port=args.port, client_id="",
keepalive=60, will=None, auth=None, tls=None,
protocol=client.MQTTv311, transport="tcp")
except Exception as error:
logger.error("Exception: {}".format(error))
finally:
time.sleep(args.frequency)
# Read in command-line parameters
def parse_args():
parser = argparse.ArgumentParser(description='Script arguments')
parser.add_argument('--host', help='Mosquitto host', default='localhost')
parser.add_argument('--port', help='Mosquitto port', type=int, default=1883)
parser.add_argument('--topic', help='Mosquitto topic', default='paho/test')
parser.add_argument('--frequency', help='Message frequency in seconds', type=int, default=5)
return parser.parse_args()
if __name__ == "__main__":
main()
|
import time
import pytest
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
@pytest.fixture
def driver(request):
# Chrome:
wd = webdriver.Chrome()
print(wd.capabilities)
request.addfinalizer(wd.quit)
return wd
def test_countries(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click()
driver.get("http://localhost/litecart/admin/?app=countries&doc=countries")
wait = WebDriverWait(driver, 15)
rows = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "[name=countries_form] tr.row")))
number_of_rows = len(rows)
print(number_of_rows)
country_names = [0 for i in range(number_of_rows)]
for x in range(0, number_of_rows):
row = rows[x]
columns = row.find_elements(By.CSS_SELECTOR, "td")
country = columns[4]
zones = columns[5]
# check sorting for county names:
country_names[x] = country.text
print(country_names[x])
if (x > 0):
assert country_names[x - 1] < country_names[x]
# (b)
number_of_zones = int(zones.text)
print(number_of_zones)
if (number_of_zones > 0):
country.find_element(By.CSS_SELECTOR, "a").click()
zones_rows = wait.until(
EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#table-zones tr:not(.header)")))
# The last row is for adding new zone, it is not zone itself!
number_of_zones_rows = len(zones_rows) - 1
zone_names = [0 for i in range(number_of_zones_rows)]
for y in range(0, number_of_zones_rows):
zones_row = zones_rows[y]
zone_cell = zones_row.find_elements(By.CSS_SELECTOR, "td")[2]
zone_names[y] = zone_cell.text
# zone_names[y] = zone_cell.find_element(By.CSS_SELECTOR , "input").get_attribute("value")
print(zone_names[y])
if (y > 0):
assert zone_names[y - 1] < zone_names[y]
# Come back
driver.get("http://localhost/litecart/admin/?app=countries&doc=countries")
rows = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "[name=countries_form] tr.row")))
def test_geo_zones(driver):
driver.get("http://localhost/litecart/admin/")
driver.find_element_by_name("username").send_keys("admin")
driver.find_element_by_name("password").send_keys("admin")
driver.find_element_by_name("login").click()
# 2) Check zones sorting for each country at the page http://localhost/litecart/admin/?app=geo_zones&doc=geo_zones
driver.get("http://localhost/litecart/admin/?app=geo_zones&doc=geo_zones")
wait = WebDriverWait(driver, 15)
rows = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "[name=geo_zones_form] tr.row")))
number_of_rows = len(rows)
print(number_of_rows)
country_names = [0 for i in range(number_of_rows)]
for x in range(0, number_of_rows):
row = rows[x]
columns = row.find_elements(By.CSS_SELECTOR, "td")
country = columns[2]
print(country.text)
country.find_element(By.CSS_SELECTOR, "a").click()
zones_rows = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "#table-zones tr:not(.header)")))
number_of_zones_rows = len(zones_rows) - 1
print(number_of_zones_rows)
zone_names = [0 for i in range(number_of_zones_rows)]
for y in range(0, number_of_zones_rows):
zones_row = zones_rows[y]
zone_cell = zones_row.find_elements(By.CSS_SELECTOR, "td")[2]
zone_names[y] = zone_cell.find_element(By.CSS_SELECTOR, "option[selected=selected]").text
print(zone_names[y])
if (y > 0):
assert zone_names[y - 1] < zone_names[y]
# Come back
driver.get("http://localhost/litecart/admin/?app=geo_zones&doc=geo_zones")
rows = wait.until(EC.presence_of_all_elements_located((By.CSS_SELECTOR, "[name=geo_zones_form] tr.row"))) |
from typing import Dict
from aenum import Enum
from aerisweather.requests.ParameterType import ParameterType
from aerisweather.requests.RequestAction import RequestAction
from aerisweather.requests.RequestFilter import RequestFilter
from aerisweather.requests.RequestLocation import RequestLocation
from aerisweather.requests.RequestQuery import RequestQuery
from aerisweather.requests.RequestSort import RequestSort
class EndpointType(Enum):
""" Defines the available endpoints for Aeris API requests.
When requesting data from an unimplemented endpoint, use the CUSTOM type and set the name of the endpoint
using the "custom" property.
Examples:
# ObservationSummary
endpoint = Endpoint(endpoint_type=EndpointType.OBSERVATIONS_SUMMARY)
# Custom Endpoint
EndpointType.custom = "stormreports"
endpt = Endpoint(EndpointType.CUSTOM, location=RequestLocation(postal_code="54660"))
"""
ALERTS = "advisories"
CONDITIONS = "conditions"
CONVECTIVE_OUTLOOK = "convective/outlook"
FORECASTS = "forecasts"
OBSERVATIONS = "observations"
OBSERVATIONS_SUMMARY = "observations/summary"
PLACES = "places"
CUSTOM = "custom"
__custom_endpoint_type_name = ""
@property
def custom(self):
""" Returns the string name of the custom/generic endpoint used when CUSTOM is the endpoint type """
return self.__custom_endpoint_type_name
@custom.setter
def custom(self, endpoint_type: str):
""" Sets the string name of the custom/generic endpoint used when CUSTOM is the endpoint type """
self.__custom_endpoint_type_name = endpoint_type
class Endpoint:
""" Defines an object used to hold and transfer information regarding a specific Aeris API endpoint """
def __init__(self,
endpoint_type: EndpointType = None,
location: RequestLocation = None,
action: RequestAction = None,
filter_: [RequestFilter] = None,
sort: RequestSort = None,
params: Dict[ParameterType, str] = None,
query: Dict[RequestQuery, str] = None):
""" Constructor
The Endpoint class can be instantiated with no parameters if configuration is handled later. EndpointTypes
that have been implemented are defined in the EndpointType enum. Undefined EndpointTypes can be
requested using the Custom EndpointType.
Params:
- endpoint_type: Optional - EndpointType - determines which Aeris API endpoint will be called
- location: Optional - RequestLocation - the location for which the request is processed
- action: Optional - RequestAction - the API request action option
- filter_: Optional - [RequestFilter] - a list of API request filters
- sort: Optional - RequestSort - the API request sort option
- params: Optional - Dict[ParameterType, str] - a list of API request parameters
- query: Optional - Dict[RequestQuery, str] - a list of API request quesries
"""
self.endpoint_type = endpoint_type
self.location = location
self.action = action
self.filter_ = filter_
self.sort = sort
self.params = params
self.query = query
|
# Python
import os
import sys
import traceback
# Pycompss
from pycompss.api.task import task
from pycompss.api.parameter import FILE_IN, FILE_OUT
# Adapters commons pycompss
from biobb_adapters.pycompss.biobb_commons import task_config
# Wrapped Biobb
from biobb_analysis.gromacs.gmx_rgyr import GMXRgyr # Importing class instead of module to avoid name collision
task_time_out = int(os.environ.get('TASK_TIME_OUT', 0))
@task(input_structure_path=FILE_IN, input_traj_path=FILE_IN, output_xvg_path=FILE_OUT, input_index_path=FILE_IN,
on_failure="IGNORE", time_out=task_time_out)
def _gmxrgyr(input_structure_path, input_traj_path, output_xvg_path, input_index_path, properties, **kwargs):
task_config.pop_pmi(os.environ)
try:
GMXRgyr(input_structure_path=input_structure_path, input_traj_path=input_traj_path, output_xvg_path=output_xvg_path, input_index_path=input_index_path, properties=properties, **kwargs).launch()
except Exception as e:
traceback.print_exc()
raise e
finally:
sys.stdout.flush()
sys.stderr.flush()
def gmx_rgyr(input_structure_path, input_traj_path, output_xvg_path, input_index_path=None, properties=None, **kwargs):
if (output_xvg_path is None or os.path.exists(output_xvg_path)) and \
True:
print("WARN: Task GMXRgyr already executed.")
else:
_gmxrgyr( input_structure_path, input_traj_path, output_xvg_path, input_index_path, properties, **kwargs) |
import requests
import os
import random
import string
import json
chars = string.ascii_letters + string.digits + '!@#$%^&*()'
# This is an array of email extensions; you can add whatever in here
# todo: put this in a json file instead
emails = ["outlook.com", "gmail.com", "yahoo.com", "hotmail.com", "aol.com", "icloud.com", "mail.com", "yeetmail.com"]
random.seed = (os.urandom(1024))
# This is the URL you want to flood. Should be a login page
url = 'website_url'
names = json.loads(open('names.json').read())
random.shuffle(names)
n = 0
for name in names:
name_extra = ''.join(random.choice(string.digits))
email = name.lower() + name_extra + '@' + random.choice(emails)
username = name.lower() + name_extra
password = ''.join(random.choice(chars) for i in range(8))
uore = [email, username]
user = random.choice(uore)
r = requests.post(url, allow_redirects=False, data={
'username': user,
'password': password,
#'g-captcha-response': '03ADlfD1-Uag3u7R-igURj7sfbiP_1r1cRjxIKdpbpvqP6Q5RJm6vAAQTTrjYhoC0-EVWRmX7MCMJSxEVrtSMim0RF-CFwL6BMoJcb-3lBrMMP2JGzxm5hOvgrzLIvEjV_2q41UzzByQ91AzSs9NRvQWYdJ7GV5ZeFxu-RJli0DnuV-APV-55spFSU6sjrJKLCs2-b-MZfwGZ3_AcKrdUD7Ui-KFCWFVlclGb0exoZHIDl6ALPHWtcYN409La_H2LQB27aLbEsLrpiWoghIXPwj__F0o2fN4jvuQ'
})
n += 1
print ("%s. Yeeting username %s and password %s. Response: %s. Response time: %s" % (n, user, password, r.status_code, r.elapsed.total_seconds()))
|
"""Base with abstract classes for all parsers."""
from abc import ABC, abstractmethod
from typing import Dict, List, Union
from pydantic import BaseModel
class BaseParser(ABC):
"""Base structure for parsers."""
model = BaseModel # Pydantic model to describe the response
def __init__(self, content_response: str):
"""
Instantiate your parser on the response.
Args:
content_response: content response from the API
"""
self.parsed_content: Dict[str, Union[str, dict]] = {}
self.lines = content_response.rstrip().split("\n")
@abstractmethod
def parse(self):
"""Perform parsing of the ``content_response``."""
pass
@property
def validated_entry(self) -> BaseModel:
"""
Retrieve entry validated with the model.
Returns:
Validated entry.
"""
return self.model(**self.parsed_content)
class BaseListParser(ABC):
"""Base structure for parsers."""
model = BaseModel # Pydantic model to describe the response
def __init__(self, content_response: str):
"""
Instantiate your parser on the response.
Args:
content_response: content response from the API
"""
self.parsed_content: List[dict] = []
self.lines = content_response.rstrip().split("\n")
@abstractmethod
def parse(self):
"""Perform parsing of the ``content_response``."""
pass
@property
def validated_model(self) -> BaseModel:
"""
Retrieve entry validated with the model.
Returns:
Validated entry.
"""
return self.model(entries=self.parsed_content)
|
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
import asyncio
import json
import os
import time
from unittest.mock import Mock, patch
import pytest
from azure.core.credentials import AccessToken
from azure.core.exceptions import ClientAuthenticationError
from azure.identity.aio import (
ChainedTokenCredential,
ClientSecretCredential,
DefaultAzureCredential,
EnvironmentCredential,
ManagedIdentityCredential,
)
from azure.identity.aio._credentials.managed_identity import ImdsCredential
from azure.identity._constants import EnvironmentVariables
from helpers import mock_response, Request
from helpers_async import async_validating_transport, wrap_in_future
@pytest.mark.asyncio
async def test_client_secret_environment_credential():
client_id = "fake-client-id"
secret = "fake-client-secret"
tenant_id = "fake-tenant-id"
access_token = "***"
transport = async_validating_transport(
requests=[Request(url_substring=tenant_id, required_data={"client_id": client_id, "client_secret": secret})],
responses=[
mock_response(
json_payload={
"token_type": "Bearer",
"expires_in": 42,
"ext_expires_in": 42,
"access_token": access_token,
}
)
],
)
environment = {
EnvironmentVariables.AZURE_CLIENT_ID: client_id,
EnvironmentVariables.AZURE_CLIENT_SECRET: secret,
EnvironmentVariables.AZURE_TENANT_ID: tenant_id,
}
with patch("os.environ", environment):
token = await EnvironmentCredential(transport=transport).get_token("scope")
# not validating expires_on because doing so requires monkeypatching time, and this is tested elsewhere
assert token.token == access_token
@pytest.mark.asyncio
async def test_credential_chain_error_message():
def raise_authn_error(message):
raise ClientAuthenticationError(message)
first_error = "first_error"
first_credential = Mock(spec=ClientSecretCredential, get_token=lambda _: raise_authn_error(first_error))
second_error = "second_error"
second_credential = Mock(name="second_credential", get_token=lambda _: raise_authn_error(second_error))
with pytest.raises(ClientAuthenticationError) as ex:
await ChainedTokenCredential(first_credential, second_credential).get_token("scope")
assert "ClientSecretCredential" in ex.value.message
assert first_error in ex.value.message
assert second_error in ex.value.message
@pytest.mark.asyncio
async def test_chain_attempts_all_credentials():
async def raise_authn_error(message="it didn't work"):
raise ClientAuthenticationError(message)
expected_token = AccessToken("expected_token", 0)
credentials = [
Mock(get_token=Mock(wraps=raise_authn_error)),
Mock(get_token=Mock(wraps=raise_authn_error)),
Mock(get_token=wrap_in_future(lambda _: expected_token)),
]
token = await ChainedTokenCredential(*credentials).get_token("scope")
assert token is expected_token
for credential in credentials[:-1]:
assert credential.get_token.call_count == 1
@pytest.mark.asyncio
async def test_chain_returns_first_token():
expected_token = Mock()
first_credential = Mock(get_token=wrap_in_future(lambda _: expected_token))
second_credential = Mock(get_token=Mock())
aggregate = ChainedTokenCredential(first_credential, second_credential)
credential = await aggregate.get_token("scope")
assert credential is expected_token
assert second_credential.get_token.call_count == 0
@pytest.mark.asyncio
async def test_imds_credential_cache():
scope = "https://foo.bar"
expired = "this token's expired"
now = int(time.time())
token_payload = {
"access_token": expired,
"refresh_token": "",
"expires_in": 0,
"expires_on": now - 300, # expired 5 minutes ago
"not_before": now,
"resource": scope,
"token_type": "Bearer",
}
mock_response = Mock(
text=lambda encoding=None: json.dumps(token_payload),
headers={"content-type": "application/json"},
status_code=200,
content_type="application/json",
)
mock_send = Mock(return_value=mock_response)
credential = ImdsCredential(transport=Mock(send=wrap_in_future(mock_send)))
token = await credential.get_token(scope)
assert token.token == expired
assert mock_send.call_count == 2 # first request was probing for endpoint availability
# calling get_token again should provoke another HTTP request
good_for_an_hour = "this token's good for an hour"
token_payload["expires_on"] = int(time.time()) + 3600
token_payload["expires_in"] = 3600
token_payload["access_token"] = good_for_an_hour
token = await credential.get_token(scope)
assert token.token == good_for_an_hour
assert mock_send.call_count == 3
# get_token should return the cached token now
token = await credential.get_token(scope)
assert token.token == good_for_an_hour
assert mock_send.call_count == 3
@pytest.mark.asyncio
async def test_imds_credential_retries():
mock_response = Mock(
text=lambda encoding=None: b"{}",
headers={"content-type": "application/json", "Retry-After": "0"},
content_type="application/json",
)
mock_send = Mock(return_value=mock_response)
total_retries = ImdsCredential._create_config().retry_policy.total_retries
for status_code in (404, 429, 500):
mock_send.reset_mock()
mock_response.status_code = status_code
try:
await ImdsCredential(
transport=Mock(send=wrap_in_future(mock_send), sleep=wrap_in_future(lambda _: None))
).get_token("scope")
except ClientAuthenticationError:
pass
# first call was availability probe, second the original request;
# credential should have then exhausted retries for each of these status codes
assert mock_send.call_count == 2 + total_retries
@pytest.mark.asyncio
async def test_managed_identity_app_service():
# in App Service, MSI_SECRET and MSI_ENDPOINT are set
msi_secret = "secret"
success_message = "test passed"
async def validate_request(req, *args, **kwargs):
assert req.url.startswith(os.environ[EnvironmentVariables.MSI_ENDPOINT])
assert req.headers["secret"] == msi_secret
exception = Exception()
exception.message = success_message
raise exception
environment = {EnvironmentVariables.MSI_SECRET: msi_secret, EnvironmentVariables.MSI_ENDPOINT: "https://foo.bar"}
with pytest.raises(Exception) as ex:
with patch("os.environ", environment):
await ManagedIdentityCredential(transport=Mock(send=validate_request)).get_token("https://scope")
assert ex.value.message is success_message
@pytest.mark.asyncio
async def test_managed_identity_cloud_shell():
# in Cloud Shell, only MSI_ENDPOINT is set
msi_endpoint = "https://localhost:50432"
success_message = "test passed"
async def validate_request(req, *args, **kwargs):
assert req.headers["Metadata"] == "true"
assert req.url.startswith(os.environ[EnvironmentVariables.MSI_ENDPOINT])
exception = Exception()
exception.message = success_message
raise exception
environment = {EnvironmentVariables.MSI_ENDPOINT: msi_endpoint}
with pytest.raises(Exception) as ex:
with patch("os.environ", environment):
await ManagedIdentityCredential(transport=Mock(send=validate_request)).get_token("https://scope")
assert ex.value.message is success_message
@pytest.mark.asyncio
async def test_default_credential_shared_cache_use():
with patch("azure.identity.aio._credentials.default.SharedTokenCacheCredential") as mock_credential:
mock_credential.supported = Mock(return_value=False)
# unsupported platform -> default credential shouldn't use shared cache
credential = DefaultAzureCredential()
assert mock_credential.call_count == 0
assert mock_credential.supported.call_count == 1
mock_credential.supported.reset_mock()
mock_credential.supported = Mock(return_value=True)
# supported platform -> default credential should use shared cache
credential = DefaultAzureCredential()
assert mock_credential.call_count == 1
assert mock_credential.supported.call_count == 1
mock_credential.supported.reset_mock()
|
# Copyright (c) 2019-2020 by Rocky Bernstein
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Isolate Python 3.8 version-specific semantic actions here.
"""
########################
# Python 3.8+ changes
#######################
from decompyle3.semantics.consts import PRECEDENCE, TABLE_DIRECT
def customize_for_version38(self, version):
# FIXME: pytest doesn't add proper keys in testing. Reinstate after we have fixed pytest.
# for lhs in 'for forelsestmt forelselaststmt '
# 'forelselaststmtc tryfinally38'.split():
# del TABLE_DIRECT[lhs]
TABLE_DIRECT.update({
"async_for_stmt38": (
"%|async for %c in %c:\n%+%c%-%-\n\n",
(2, "store"), (0, "expr"), (3, "for_block") ),
"async_forelse_stmt38": (
"%|async for %c in %c:\n%+%c%-%|else:\n%+%c%-\n\n",
(2, "store"), (0, "expr"), (3, "for_block"), (-1, "else_suite") ),
"async_with_stmt38": (
"%|async with %c:\n%+%|%c%-",
(0, "expr"), 7),
"async_with_as_stmt38": (
"%|async with %c as %c:\n%+%|%c%-",
(0, "expr"), (6, "store"),
(7, "suite_stmts")
),
"c_forelsestmt38": (
"%|for %c in %c:\n%+%c%-%|else:\n%+%c%-\n\n",
(2, "store"),
(0, "expr"),
(3, "for_block"), -1 ),
"except_cond1a": (
"%|except %c:\n", (1, "expr"),
),
"except_cond_as": (
"%|except %c as %c:\n",
(1, "expr"),
(-2, "STORE_FAST"),
),
"except_handler38": (
"%c", (2, "except_stmts") ),
"except_handler38a": (
"%c", (-2, "stmts") ),
"except_handler38c": (
"%c%+%c%-",
(1, "except_cond1a"),
(2, "except_stmts"),
),
"except_handler_as": (
"%c%+\n%+%c%-",
(1, "except_cond_as"),
(2, "tryfinallystmt"),
),
"except_ret38a": (
"return %c", (4, "expr") ),
# Note: there is a suite_stmts_opt which seems
# to be bookkeeping which is not expressed in source code
"except_ret38": ( "%|return %c\n", (1, "expr") ),
"for38": (
"%|for %c in %c:\n%+%c%-\n\n",
(2, "store"),
(0, "expr"),
(3, "for_block") ),
"forelsestmt38": (
"%|for %c in %c:\n%+%c%-%|else:\n%+%c%-\n\n",
(2, "store"),
(0, "expr"),
(3, "for_block"), -1 ),
"forelselaststmt38": (
"%|for %c in %c:\n%+%c%-%|else:\n%+%c%-",
(2, "store"),
(0, "expr"),
(3, "for_block"), -2 ),
"forelselaststmtc38": (
"%|for %c in %c:\n%+%c%-%|else:\n%+%c%-\n\n",
(2, "store"),
(0, "expr"),
(3, "for_block"), -2 ),
"ifpoplaststmtc": ( "%|if %c:\n%+%c%-",
(0, "testexpr"),
(2, "c_stmts" ) ),
"pop_return": ( "%|return %c\n", (1, "ret_expr") ),
"popb_return": ( "%|return %c\n", (0, "ret_expr") ),
"pop_ex_return": (
"%|return %c\n", (0, "ret_expr")
),
"whilestmt38": (
"%|while %c:\n%+%c%-\n\n",
(1, "testexpr"),
(2, ("c_stmts", "pass"))
),
"whileTruestmt38": (
"%|while True:\n%+%c%-\n\n",
(1, "c_stmts", "pass"),
),
"try_elsestmtl38": (
"%|try:\n%+%c%-%c%|else:\n%+%c%-",
(1, "suite_stmts_opt"),
(3, "except_handler38"),
(5, "else_suitec")
),
"try_except38": (
"%|try:\n%+%c\n%-%|except:\n%+%c%-\n\n",
(2, ("suite_stmts_opt", "suite_stmts")),
(3, ("except_handler38a", "except_handler38b", "except_handler38c"))
),
"try_except38r": (
"%|try:\n%+%c\n%-%|except:\n%+%c%-\n\n",
(1, "return_except"),
(2, "except_handler38b"),
),
"try_except_as": (
"%|try:\n%+%c%-\n%|%-%c\n\n",
(-4, "suite_stmts"), # Go from the end because of POP_BLOCK variation
(-3, "except_handler_as"),
),
"try_except_ret38": (
"%|try:\n%+%c%-\n%|except:\n%+%|%c%-\n\n",
(1, "returns"),
(2, "except_ret38a"),
),
"try_except_ret38a": (
"%|try:\n%+%c%-%c\n\n",
(1, "returns"),
(2, "except_handler38c"),
),
"tryfinally38rstmt": (
"%|try:\n%+%c%-%|finally:\n%+%c%-\n\n",
(0, "sf_pb_call_returns"),
(-1, ("ss_end_finally", "suite_stmts")),
),
"tryfinally38rstmt2": (
"%|try:\n%+%c%-%|finally:\n%+%c%-\n\n",
(4, "returns"),
-2, "ss_end_finally"
),
"tryfinally38rstmt3": (
"%|try:\n%+%|return %c%-\n%|finally:\n%+%c%-\n\n",
(1, "expr"),
(-1, "ss_end_finally")
),
"tryfinally38stmt": (
"%|try:\n%+%c%-%|finally:\n%+%c%-\n\n",
(1, "suite_stmts_opt"),
(6, "suite_stmts_opt") ),
"tryfinally38astmt": (
"%|try:\n%+%c%-%|finally:\n%+%c%-\n\n",
(2, "suite_stmts_opt"),
(8, "suite_stmts_opt") ),
"named_expr": ( # AKA "walrus operator"
"%c := %p", (2, "store"), (0, "expr", PRECEDENCE["named_expr"]-1)
)
})
|
from spira.yevon.io.input_gdsii import *
from spira.yevon.io.output_gdsii import *
|
from django.shortcuts import render
from django.template import Context
from . import forms, models
from django.http import HttpResponse, HttpResponseRedirect
from django.views.generic import CreateView
#beautiful soup is just an awesome name for a python package
#urllib not so much, a bit basic iff i'm honest
import urllib
from bs4 import BeautifulSoup
prevUrl=None
ays=None
def index(request):
return render(request, 'peershake/index.html')
# def simpleSearch(request):
# if request.method =="GET":
# form = forms.SimpleSearchForms(request.GET)
# if form.is_valid():
# main = form.cleaned_data["simpleMain"]
# #get main title, search for all in simpleSearch model that have that title
# paper = models.SearchQuery.objects.get(paperTitle=main)
# doi = paper.paperDOI
# return HttpResponseRedirect('https://www.biorxiv.org/content/10.1101/'+doi+'v1')
# else:
# return HttpResponse("it didn't work")
# else:
# return HttpResponse("it didn't work")
################################
#doi parser
#only works for BioArxiv
def doiCatcher(url):
doi = ""
i=0
for s in url:
try:
int(s)
doi+=s
except:
if s == "/" and doi != "":
doi+=" "
elif s =="." and i not in (0,1,2):
doi+="."
i+=1
return doi[3:]
###############################
def chromeExtension(request):
title = request.GET.get('title')
url = request.GET.get('url')
doi = doiCatcher(url)
if request.method=="POST":
form = forms.ChromeForms(request.POST)
if form.is_valid():
name = form.cleaned_data['name']
genComment = form.cleaned_data['genComment']
specComment = form.cleaned_data['specComment']
email = form.cleaned_data['email']
#aYs = email
#change DOI=to necessary code to autoget from webpage
cE = models.ChromeExtension(paperTitleCE=title, genComment=genComment,specComment=specComment, name=name,email=email,url=url,doi=doi)
cE.save()
return HttpResponseRedirect('/peershake/thankyou/')
else:
print(form.errors)
return HttpResponse("it didn't work1")
else:
return HttpResponse("it didn't work2")
def chromeExtensionBase(request):
title = request.GET.get('title')
url = request.GET.get('url')
context = {"form": forms.ChromeForms, "title":title, "url":url}
print(title, url)
return render(request, 'peershake/ce.html', context=context)
def displayCommentForm(request,title):
if request.method == "POST":
forms.passInto(title)
form=forms.EmailLstForms(data = request.POST,title=title)
print(form.errors)
if form.is_valid():
email = form.cleaned_data["email"]
print(email)
return HttpResponseRedirect("/displayCommentEmail"+"/"+email.email+"/"+email.paperTitleCE)
else:
context = {"form": forms.ChromeForms, "title":title}
return render(request, 'peershake/ce.html', context=context)
return HttpResponse("it didn't work1")
def displayComment(request, title):
main = models.ChromeExtension.objects.filter(paperTitleCE__startswith=title)
form = forms.EmailLstForms(title)
context={"title": title, "main":main, 'form':form}
return render(request, "peershake/comment.html", context=context)
def thankyou(request):
return render(request, "peershake/thankyou.html")
def displayCommentEmail(request, email,title):
main = models.ChromeExtension.objects.filter(paperTitleCE__startswith=title)
mainSub = main.get(email=email)
context={"title":title,"name":mainSub.name, "genComment":mainSub.genComment, "specComment":mainSub.specComment, "email":mainSub.email, "datetime":mainSub.datetime}
return render(request,'peershake/dse.html', context=context)
def displayAll(request, title):
main = models.ChromeExtension.objects.filter(paperTitleCE__startswith=title)
context={"title":title,"main":main}
return render(request, "peershake/all.html", context=context) |
from multiprocessing import Pool
from typing import Union, Tuple, List
import SimpleITK as sitk
import numpy as np
from batchgenerators.utilities.file_and_folder_operations import join, subfolders, subfiles, save_json, isfile, \
load_json, isdir
from surface_distance import compute_surface_distances
from kits21.configuration.labels import HEC_NAME_LIST, KITS_HEC_LABEL_MAPPING
from kits21.configuration.paths import TRAINING_DIR
from kits21.evaluation.metrics import construct_HEC_from_segmentation
def determine_tolerance_label(segmentation_1: np.ndarray, segmentation_2: np.ndarray,
label: Union[int, Tuple[int, ...]], spacing: Tuple[float, float, float]) \
-> float:
mask_1 = construct_HEC_from_segmentation(segmentation_1, label)
mask_2 = construct_HEC_from_segmentation(segmentation_2, label)
mask1_empty = np.sum(mask_2) == 0
mask2_empty = np.sum(mask_1) == 0
if mask1_empty or mask2_empty:
return np.nan
else:
dist = compute_surface_distances(mask_2, mask_1, spacing)
distances_gt_to_pred = dist["distances_gt_to_pred"]
distances_pred_to_gt = dist["distances_pred_to_gt"]
return (np.percentile(distances_gt_to_pred, 95) + np.percentile(distances_pred_to_gt, 95)) / 2
def determine_tolerances_one_sample(fname_1: str, fname_2: str) -> List[float]:
img_1 = sitk.ReadImage(fname_1)
img_2 = sitk.ReadImage(fname_2)
# we need to invert the spacing because SimpleITK is weird
spacing_1 = list(img_1.GetSpacing())[::-1]
img_1_npy = sitk.GetArrayFromImage(img_1)
img_2_npy = sitk.GetArrayFromImage(img_2)
tolerances = []
for hec in HEC_NAME_LIST:
tolerances.append(determine_tolerance_label(
img_1_npy, img_2_npy,
KITS_HEC_LABEL_MAPPING[hec],
tuple(spacing_1)
))
return tolerances
def determine_tolerances_case(case_folder):
segmentation_samples_folder = join(case_folder, 'segmentation_samples')
if not isdir(segmentation_samples_folder):
return
groups = subfolders(segmentation_samples_folder, join=False, prefix='group')
tolerances = []
for g in groups:
nii_files = subfiles(join(segmentation_samples_folder, g), suffix='.nii.gz')
for ref_idx in range(len(nii_files)):
for pred_idx in range(ref_idx + 1, len(nii_files)):
tolerances.append(determine_tolerances_one_sample(nii_files[pred_idx], nii_files[ref_idx]))
save_json({"tolerances": {HEC_NAME_LIST[i]: j for i, j in enumerate(np.mean(tolerances, 0))}}, join(case_folder, 'tolerances.json'))
def compute_tolerances_for_SD(num_proceses: int = 12, overwrite_existing=False):
p = Pool(num_proceses)
case_folders = subfolders(TRAINING_DIR, prefix='case_')
if not overwrite_existing:
c = []
for cs in case_folders:
if not isfile(join(cs, 'tolerances.json')):
c.append(cs)
print(len(c), 'out of', len(case_folders), 'to go...')
case_folders = c
for c in case_folders:
assert isdir(join(c, 'segmentation_samples')), "please generate the segmentation samples first (kits21/annotation/sample_segmentations.py)"
r = p.starmap_async(determine_tolerances_case, ([i] for i in case_folders))
_ = r.get()
p.close()
p.join()
# load and aggregate
case_folders = subfolders(TRAINING_DIR, prefix='case_')
tolerances = {i: [] for i in HEC_NAME_LIST}
for c in case_folders:
if isfile(join(TRAINING_DIR, c, 'tolerances.json')):
tolerances_here = load_json(join(TRAINING_DIR, c, 'tolerances.json'))
for i, hec in enumerate(HEC_NAME_LIST):
tolerances[hec].append(tolerances_here['tolerances'][hec])
tolerances = {i: np.nanmean(j) for i, j in tolerances.items()}
print(tolerances)
return tolerances
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('-num_processes', required=False, default=12, type=int)
args = parser.parse_args()
compute_tolerances_for_SD(args.num_processes)
|
from main.view import View
from main.controller import Controller
import subprocess
class RebootView(View):
def __init__(self, controller: Controller):
super().__init__(controller)
def update_display(self, canvas, bounding_box):
canvas.text((40, 20), "Reboot?", fill="white")
canvas.text((0, 50), "Cancel", fill="white")
canvas.text((110, 50), "OK", fill="white")
def get_name(self):
return "Test View"
def button_pushed(self):
cmd = "reboot"
result = subprocess.check_output(cmd, shell=True).decode('ascii')[:-1]
|
#!/usr/bin/env python3
import urllib.request, urllib.error, json, sys
from os.path import join, expanduser, isfile, exists
from os import makedirs
# Configurations
# Location to save downloaded wallpapers
# Leave the IMAGE_DIR empty to use default directory /Users/USERNAME/Pictures/BingWallpaper
# Or you can set your own custom directory
IMAGE_DIR = ''
# ISO country code
# eg. 'en-US', 'en-NZ', 'zh-CN' or just leave it
COUNTRY_CODE = 'en-US'
# Apple Script to set wallpaper
SCRIPT = """/usr/bin/osascript<<END
tell application "Finder"
set desktop picture to POSIX file "%s"
end tell
END"""
def get_wallpaper_path(file_name):
if '' != IMAGE_DIR.strip():
dir = IMAGE_DIR
else:
dir = join(expanduser("~"), 'Pictures/BingWallpaper')
if not exists(dir):
makedirs(dir)
file_path = join(dir, file_name)
return file_path
# Download a image with given URL
def download_image(url, download_only=False):
file_name = url.split('/')[-1].split("OHR.")[1].split("&")[0]
file_path = get_wallpaper_path(file_name)
if isfile(file_path):
print('Skipped - ' + file_name + ' exists already.')
else:
urllib.request.urlretrieve(url, file_path)
print('Image downloaded --> ' + file_path)
if not download_only:
set_wallpaper(file_path)
# Set Finder wallpaper
# See http://stackoverflow.com/questions/431205/how-can-i-programatically-change-the-background-in-mac-os-x
def set_wallpaper(file_path):
if isfile(file_path):
import subprocess
subprocess.Popen(SCRIPT%file_path, shell=True)
print('Wallpaper set to ' + file_path)
# Display help message
def print_help_message():
msg = '''
Bing Wallpaper for Mac version 1.2
By Declan Gao http://declangao.me
Bing Wallpaper for Mac can batch download and set Bing image of the day as wallpaper on OS X.
Usage:
python bing.py [option]
no argument download today's picture of the day and set it as wallpaper
-d or --download download and save the last 8 pictures withouth changing the current wallpaper
-h or --help display this help message
'''
print(msg)
sys.exit()
def main():
# Check arguments
if len(sys.argv) == 1:
flag_download_only= False
elif len(sys.argv) == 2:
if '-d' == sys.argv[1] or '--download' == sys.argv[1]:
flag_download_only = True
elif '-h' == sys.argv[1] or '--help' == sys.argv[1]:
print_help_message()
else:
print('Invalid argument!')
print_help_message()
else:
print('Invalid arguments!')
print_help_message()
# Choose a proper URL
# The API only returns 8 pictures at most. No need to set the number higher than 8.
# See http://stackoverflow.com/questions/10639914/is-there-a-way-to-get-bings-photo-of-the-day
if flag_download_only:
url = 'http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=8'
else:
# Set n=8 to get only the newest picture of the day
url = 'http://www.bing.com/HPImageArchive.aspx?format=js&idx=0&n=1'
# Use country code if provided
if '' != COUNTRY_CODE.strip():
url += '&mkt=' + COUNTRY_CODE
try:
# Make the request
response = urllib.request.urlopen(url)
json_data = json.load(response) # Get JSON
if 'images' in json_data:
images = json_data['images']
else:
sys.exit('JSON error. Please try again later...')
# Start downloading!
print('Downloading...')
for i in range(len(images)):
url = 'http://www.bing.com' + images[i]['url']
if flag_download_only:
download_image(url, True)
else:
download_image(url)
except(urllib.error.HTTPError, e):
print('Error ' + str(e.code) + '. Please try again later...')
except(urllib.error.URLError, e):
print('Error. Please check your internet connection...')
if __name__ == '__main__':
main()
|
# Generated by Django 3.2.2 on 2021-05-18 21:13
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('images', '0003_auto_20210517_1022'),
]
operations = [
migrations.AlterModelOptions(
name='location',
options={'ordering': ['location']},
),
migrations.RemoveField(
model_name='images',
name='user',
),
migrations.AlterField(
model_name='location',
name='location',
field=models.CharField(max_length=100),
),
migrations.DeleteModel(
name='User',
),
]
|
#
# Copyright (c) nexB Inc. and others. All rights reserved.
# ScanCode is a trademark of nexB Inc.
# SPDX-License-Identifier: Apache-2.0
# See http://www.apache.org/licenses/LICENSE-2.0 for the license text.
# See https://github.com/nexB/scancode-toolkit for support or download.
# See https://aboutcode.org for more information about nexB OSS projects.
#
import traceback
import attr
from license_expression import Licensing
from commoncode.cliutils import PluggableCommandLineOption
from commoncode.cliutils import POST_SCAN_GROUP
from plugincode.post_scan import PostScanPlugin
from plugincode.post_scan import post_scan_impl
from scancode.api import _licenses_data_from_match
from scancode.api import SCANCODE_LICENSEDB_URL
from scancode_analyzer import license_analyzer
from scancode_analyzer import summary
MISSING_OPTIONS_MESSAGE = (
"The scan must be run with these options: "
"--license --license-text --is-license-text --classify --info"
)
@post_scan_impl
class ResultsAnalyzer(PostScanPlugin):
"""
Analyze license detections for potential issues.
"""
codebase_attributes = {
"license_detection_issues_summary": attr.ib(default=attr.Factory(dict))
}
resource_attributes = {
"license_detection_issues": attr.ib(default=attr.Factory(list))
}
sort_order = 80
options = [
PluggableCommandLineOption(
("--analyze-license-results",),
is_flag=True,
default=False,
help="Performs a license detection analysis to detect different kinds of "
"potential license detection issues in scancode. "
+ MISSING_OPTIONS_MESSAGE,
help_group=POST_SCAN_GROUP,
),
]
def is_enabled(self, analyze_license_results, **kwargs):
return analyze_license_results
def process_codebase(self, codebase, **kwargs):
msg = (
"Cannot analyze scan for license detection errors, because "
"required attributes are missing. " + MISSING_OPTIONS_MESSAGE,
)
license_issues = []
count_has_license = 0
count_files_with_issues = 0
for resource in codebase.walk():
if not resource.is_file:
continue
# Case where a license scan was not performed
if not hasattr(resource, "licenses"):
codebase.errors.append(msg)
break
# Where the resource does not have any detected license
license_matches_serialized = getattr(resource, "licenses", [])
if not license_matches_serialized:
continue
# Case where any attribute essential for analysis is missing
if not is_analyzable(resource):
codebase.errors.append(msg)
break
count_has_license += 1
try:
license_matches = LicenseMatch.from_files_licenses(
license_matches_serialized
)
except KeyError as e:
trace = traceback.format_exc()
msg = f"Cannot convert scancode data to LicenseMatch class: {e}\n{trace}"
codebase.errors.append(msg)
raise ScancodeDataChangedError(msg)
try:
ars = list(license_analyzer.LicenseDetectionIssue.from_license_matches(
license_matches=license_matches,
is_license_text=getattr(resource, "is_license_text", False),
is_legal=getattr(resource, "is_legal", False),
path=getattr(resource, "path"),
))
if ars:
count_files_with_issues += 1
license_issues.extend(ars)
resource.license_detection_issues = [
ar.to_dict(is_summary=False)
for ar in ars
]
except Exception as e:
trace = traceback.format_exc()
msg = f"Cannot analyze scan for license scan errors: {e}\n{trace}"
resource.scan_errors.append(msg)
codebase.save_resource(resource)
try:
summary_license = summary.SummaryLicenseIssues.summarize(
license_issues,
count_has_license,
count_files_with_issues,
)
codebase.attributes.license_detection_issues_summary.update(
summary_license.to_dict(),
)
except Exception as e:
trace = traceback.format_exc()
msg = f"Cannot summarize license detection issues: {e}\n{trace}"
resource.scan_errors.append(msg)
codebase.save_resource(resource)
class ScancodeDataChangedError(Exception):
"""
Raised when the scan results data format does not match what we expect.
"""
pass
@attr.s
class LicenseMatch:
"""
Represent a license match to a rule.
"""
license_expression = attr.ib()
score = attr.ib()
start_line = attr.ib()
end_line = attr.ib()
rule_identifier = attr.ib()
is_license_text = attr.ib()
is_license_notice = attr.ib()
is_license_reference = attr.ib()
is_license_tag = attr.ib()
is_license_intro = attr.ib()
matcher = attr.ib()
matched_length = attr.ib()
rule_length = attr.ib()
match_coverage = attr.ib()
rule_relevance = attr.ib()
matched_text = attr.ib()
@classmethod
def from_files_licenses(cls, license_matches):
"""
Return LicenseMatch built from the scancode files.licenses dictionary.
"""
matches = []
licensing = Licensing()
# Whenever we have multiple matches with the same expression, we want to only
# keep the first and skip the secondary matches
skip_secondary_matches = 0
for license_match in license_matches:
if skip_secondary_matches:
skip_secondary_matches -= 1
continue
matched_rule = license_match["matched_rule"]
# key = license_match["key"]
license_expression = matched_rule["license_expression"]
expression_keys = licensing.license_keys(license_expression)
if len(expression_keys) != 1:
skip_secondary_matches = len(expression_keys) - 1
matches.append(
cls(
license_expression = license_expression,
score = license_match["score"],
start_line = license_match["start_line"],
end_line = license_match["end_line"],
rule_identifier = matched_rule["identifier"],
is_license_text = matched_rule["is_license_text"],
is_license_notice = matched_rule["is_license_notice"],
is_license_reference = matched_rule["is_license_reference"],
is_license_tag = matched_rule["is_license_tag"],
is_license_intro = matched_rule["is_license_intro"],
matcher = matched_rule["matcher"],
matched_length = matched_rule["matched_length"],
rule_length = matched_rule["rule_length"],
match_coverage = matched_rule["match_coverage"],
rule_relevance = matched_rule["rule_relevance"],
matched_text = license_match["matched_text"],
)
)
return matches
def to_dict(self):
return attr.asdict(self)
def from_license_match_object(license_matches):
"""
Return LicenseMatch built from a list of licensedcode.match.LicenseMatch objects.
"""
detected_licenses = []
for match in license_matches:
detected_licenses.extend(
_licenses_data_from_match(
match=match,
include_text=True,
license_text_diagnostics=False,
license_url_template=SCANCODE_LICENSEDB_URL)
)
try:
license_matches = LicenseMatch.from_files_licenses(detected_licenses)
except KeyError as e:
msg = f"Cannot convert scancode data to LicenseMatch class:"
raise ScancodeDataChangedError(msg)
return license_matches
def is_analyzable(resource):
"""
Return True if resource has all the data required for the analysis.
Return False if any of the essential attributes are missing from the resource.
:param resource: commoncode.resource.Resource
"""
license_matches = getattr(resource, "licenses", [])
has_is_license_text = hasattr(resource, "is_license_text")
has_is_legal = hasattr(resource, "is_legal")
has_matched_text = all("matched_text" in match for match in license_matches)
return has_is_license_text and has_matched_text and has_is_legal
|
import unittest
from sequence_generation import topological_sort, intersect, State_Space_Error, \
Topology, State_Search_Flags, Node, Stateful_Node, Input, Output, PowerState, \
state_difference, Wire, Constraint
from enzian_descriptions import enzian_nodes, enzian_wires, ISPPAC
import itertools
import random
import copy
from functools import partial
import z3
def compare_unordered_lists(l1, l2):
if isinstance(l1, tuple) and isinstance(l2, tuple) and len(l1) == len(l2):
return all(list(map(lambda x: compare_unordered_lists(*x), list(zip(l1, l2)))))
if isinstance(l1, list) and isinstance(l2, list) and len(l1) == len(l2):
l2_copy = copy.deepcopy(l2)
for e1 in l1:
i = 0
while i < len(l2_copy):
if compare_unordered_lists(e1, l2_copy[i]):
break
i = i + 1
if i < len(l2_copy):
del l2_copy[i]
else:
return False
return True
else:
return l1 == l2
class TestStateDifference(unittest.TestCase):
def test_state_difference(self):
states = [
([(2, 8), (0, 1), {3, 2, 5}], [(4, 5), (1, 1), {3, 7, 6}]),
([(2, 8), {1}], [(4, 5), {0, 1}])
]
results = [
[[(2, 3), (0, 1), {3, 2, 5}], [(6, 8), (0, 1), {3, 2, 5}], [(2, 8), (0, 0), {3, 2, 5}], [(2, 8), (0, 1), {2, 5}]],
[[(2, 3), {1}], [(6, 8), {1}]]
]
for arg, result in list(zip(states, results)):
#with self.subTest(arg = arg):
self.assertEqual(state_difference(*arg), result)
class TestTopologicalSort(unittest.TestCase):
def test_sort(self):
graph = {"w1": set(), "w2": {"w1"}, "w3": {"w1", "w2"}, "w4": set(), "w5" : {"w4"}}
expected = (["w1", "w4"], ["w2", "w5"], ["w3"])
self.assertTrue(compare_unordered_lists(expected, tuple(topological_sort(graph))))
def test_not_sortable(self):
graph = {"w1": set(), "w2": {"w1", "w3"}, "w3": {"w1", "w2"}}
self.assertIsNone(topological_sort(graph))
class TestIntersect(unittest.TestCase):
def test_intersect(self):
arguments_ok = [
({0, 1}, {1}),
((3, 6), (4, 8)),
((2, 3), (3, 5)),
]
arguments_list = map(lambda x:list(x), zip(*arguments_ok))
arguments_fail = [
({0}, {1}),
((2, 3), (5, 6))
]
arguments_fail_list = map(lambda x: list(x), zip(*(arguments_ok + arguments_fail)))
results_ok = [
{1},
(4, 6),
(3, 3)
]
for arg, result in zip(arguments_ok, results_ok):
#with self.subTest(arg = arg):
self.assertEqual(intersect(*arg), result)
self.assertEqual(intersect(*arguments_list), results_ok)
for arg in arguments_fail:
#with self.subTest(arg = arg):
with self.assertRaises(State_Space_Error):
intersect(*arg)
with self.assertRaises(State_Space_Error):
intersect(*arguments_fail_list)
class Z3_Test(unittest.TestCase):
def test_recover_solution(self):
nodes = [("n1", 0x0, Node1, []), ("n2", 0x0, Node2, [])]
wires = [
("w1", "n2", "O1", {("n1", "I1")}),
("w2", "n2", "O2", {("n1", "I2")}),
("w3", "n2", "O3", {("n1", "I3")})
]
topology = Topology(nodes, wires)
solution = topology.parametrized_state_search({}, State_Search_Flags(use_z3 = True, visualize = False))
expected = {"w1": [(5, 5), (44, 44)], "w2": [{0, 1}], "w3": [{3}, {1}]}
expected_sequence = (["set_w1", "set_w2", "set_w3"], ["w1", "w2", "w3"])
self.assertEqual(expected, solution[0])
self.assertTrue(compare_unordered_lists(tuple(solution[3]), expected_sequence))
def test_param_state_search_unsat(self):
nodes = [("n1", 0x0, Node1, []), ("n2", 0x0, Node2, [])]
wires = [
("w1", "n2", "O1", {("n1", "I1")}),
("w2", "n2", "O2", {("n1", "I2")}),
("w3", "n2", "O3", {("n1", "I3")})
]
topology = Topology(nodes, wires)
solution = topology.parametrized_state_search({"w3": [{4}, {1}]}, State_Search_Flags(use_z3 = True, visualize = False))
self.assertEqual(solution, [])
def test_translate_state(self):
topology = Topology([], [])
topology.most_general_state = {'a' : [{}], 'b': [{}], 'c':[{}], 'd' : [{}]}
variables = {}
variables['a'] = z3.Int('a_0')
variables['b'] = z3.Int('b_0')
variables['c'] = z3.Int('c_0')
variables['d'] = z3.Int('d_0')
state = [(8, 8), {5}, "a", ("a", "c")]
problem = z3.Solver()
problem.add(topology.translate_state(state, ['a', 'b', 'c', 'd'], variables))
self.assertEqual(problem.check(), z3.sat)
self.assertEqual({"a": [{8}], "b": [{5}], "c": [{8}], "d": [{8}]}, topology.recover_solution(problem.model()))
class TestParametrizedStateSearch(unittest.TestCase):
def test_param_state_search(self):
node_list = [
("psu_motherboard", 0x71, PowerSupply, []),
("psu", 0x72, PowerSupply, []),
("isppac", 0x55, ISPPAC, ["pac"]),
("cpu", 0x01, CPU_3, []),
("cpu2", 0x01, CPU2, []),
("cpu3", 0x01, CPU2, []),
("cpu4", 0x01, CPU2, [])
]
wire_list = \
[
("vdd33", "psu_motherboard", "OUT0", {("cpu", "VDD33")}),
("vcc_isppac", "psu_motherboard", "OUT1", {("isppac", "VCC")}),
("vcc_in_isppac", "psu_motherboard", "OUT2", {("isppac", "VCC_IN")}),
("vdd_cpu/cpu2", "psu", "OUT0", {("cpu", "VDD"), ("cpu2", "VDD")}),
("vdd_cpu3", "psu", "OUT1", {("cpu3", "VDD")}),
("vdd_cpu4", "psu", "OUT2", {("cpu4", "VDD")}),
("en1_cpu/cpu2", "isppac", "OUT0", {("cpu", "EN1"), ("cpu2", "EN1")}),
("en2_cpu", "isppac", "OUT1", {("cpu", "EN2")}),
("en2_cpu2", "isppac", "OUT2", {("cpu2", "EN2")}),
("en1_cpu3/cpu4", "isppac", "OUT3", {("cpu3", "EN1"), ("cpu4", "EN1")}),
("en2_cpu3/cpu4", "isppac", "OUT4", {("cpu3", "EN2"), ("cpu4", "EN2")})
]
topolgy = Topology(node_list, wire_list)
expected = {
"vdd33": [(0, 0)],
"vcc_isppac": [(2800, 3960)],
"vcc_in_isppac": [(2250, 5500)],
"vdd_cpu/cpu2" : [(0, 0)],
"vdd_cpu3" : [(0, 0)],
"vdd_cpu4" : [(0, 0)],
"en1_cpu/cpu2" : [{0}],
"en2_cpu" : [{1}],
"en2_cpu2" : [{0}],
"en1_cpu3/cpu4" : [{0}],
"en2_cpu3/cpu4" : [{0}]
}
result = topolgy.parametrized_state_search({}, State_Search_Flags(no_output=True, print_solutions=False, advanced_backtracking = False, visualize= False))
result_backtracking = topolgy.parametrized_state_search({}, State_Search_Flags(no_output=True, print_solutions=False, advanced_backtracking=True, visualize = False))
self.assertEqual(len(result), 1)
self.assertEqual(len(result_backtracking), 1)
self.assertEqual(result[0][0], expected)
self.assertEqual(result_backtracking[0][0], expected)
def test_independence_of_sequence_1(self):
node_list = [
("n0", 0x0, Node6, []),
("n1", 0x0, Node3, []),
("n2", 0x0, Node4, []),
("n3", 0x0, Node5, []),
("n4", 0x0, Node5, []),
("n5", 0x0, Node5, []),
("n6", 0x0, Node4, []),
("n7", 0x0, Node6, []),
("n8", 0x0, Node5, [])
]
wire_list = [
("w0", "n0", "O1", {("n1", "I1")}),
("w1", "n1", "O1", {("n2", "I1")}),
("w2", "n1", "O2", {("n3", "I1")}),
("w3", "n1", "O3", {("n4", "I1")}),
("w4", "n2", "O1", {("n6", "I1")}),
("w5", "n6", "O1", {("n5", "I1")}),
("w6", "n7", "O1", {("n8", "I1")})
]
topology = Topology(node_list, wire_list)
w_list = topology.sorted_wires
perm = itertools.permutations(w_list)
print(len(list(perm)))
correct_number_of_solutions = len(topology.parametrized_state_search({}, State_Search_Flags(all_solutions = True, advanced_backtracking = False, visualize=False)))
for p in itertools.permutations(w_list):
print(p)
#topology.sorted_wires = list(map(lambda x: x[0], sorted(list(zip(w_list, list(p))), key= lambda x: x[1])))
topology.sorted_wires = p
solutions = topology.parametrized_state_search({}, State_Search_Flags(all_solutions = True, advanced_backtracking =True, print_solutions=False, visualize=False))
print(len(solutions))
self.assertEqual(len(solutions), correct_number_of_solutions)
def test_independence_of_sequence_2(self):
enzian = Topology(enzian_nodes, enzian_wires)
enzian.current_node_state.update({"cpu" : "POWERED_ON", "fpga" : "POWERED_DOWN"})
expected = len(enzian.parametrized_state_search({}, State_Search_Flags(print_solutions=False, advanced_backtracking=False, extend=False, all_solutions=True)))
print(expected)
for i in range(1):
print(i)
random.shuffle(enzian.sorted_wires)
solutions = enzian.parametrized_state_search({}, State_Search_Flags(print_solutions=False, advanced_backtracking=True, extend=False, all_solutions=True))
self.assertEqual(len(solutions), expected)
class Node1(Node):
I1 = Input([(4, 9), (25, 60)], "power")
I2 = Input([{0, 1}], "logical")
I3 = Input([{6, 3, 4}, {8, 1, 4}], "power")
def __init__(self, name, bus_addr):
super(Node1, self).__init__(name, bus_addr, Node1)
class Node2(Node):
O1 = Output([(0, 25), (0, 250)], [
Constraint([(5, 5), (44, 44)], {}, partial(Constraint.explicit, "O1", set(), set()))
], "power", Wire.gpio_set)
O2 = Output([{0, 1}], [Constraint([{0, 1}], {}, partial(Constraint.explicit, "O2", set(), set()))], "logical", Wire.gpio_set)
O3 = Output([{3, 4, 7}, {29, 1, 99}], [Constraint([{3}, {1}], {}, partial(Constraint.explicit, "O3", set(), set()))], "power", Wire.gpio_set)
def __init__(self, name, bus_addr):
super(Node2, self).__init__(name, bus_addr, Node2)
#required for test parametrized state search
class CPU2(Stateful_Node):
VDD = Input([(0, 2600)], "power")
EN1 = Input([{0, 1}], "logical")
EN2 = Input([{0, 1}], "logical")
states = (lambda vdd, en1, en2: {
"POWERED_DOWN" : PowerState({vdd : [(0, 0)], en1 : [{0}], en2 : [{0}]}, {
"POWERED_ON": [
({en1 : [{0}]}, "")
],
"POWERED_DOWN": []}),
"POWERED_ON" : PowerState({vdd : [(2300, 2600)], en1 : [{1}], en2 : [{0}]}, {
"POWERED_DOWN": [
({vdd: [(2300, 2400)]}, "wait until " + vdd + " stabilized"),
({en1 : [{1}]}, ""),
({en2 : [{1}], vdd: [(2000, 2600)]}, "")
],
"POWERED_ON": []})
}, ["VDD", "EN1", "EN2"])
def __init__(self, name, bus_addr):
super(CPU2, self).__init__(name, bus_addr, "POWERED_DOWN", CPU2)
class CPU_3(Stateful_Node):
VDD33 = Input([(0, 4000)], "power")
VDD = Input([(0, 2500)], "power")
EN1 = Input([{0, 1}], "logical")
EN2 = Input([{0, 1}], "logical")
states = (lambda vdd33, vdd, en1, en2: {
"POWERED_DOWN" : PowerState({vdd33: [(0, 0)], vdd : [(0, 0)], en1 : [{0}], en2 : [{1}]}, {
"POWERED_ON": [
({en1 : [{0}]}, "")
],
"POWERED_DOWN": []}),
"POWERED_ON" : PowerState({vdd33: [(3000, 4000)], vdd : [(2000, 2500)], en1 : [{1}], en2 : [{0}]}, {
"POWERED_DOWN": [
({en2 : [{1}]}, ""),
({vdd33: [(3000, 4000)], vdd: [(2000, 2500)]}, "wait until " + vdd + " stabilized"),
({en1 : [{1}]}, ""),
({vdd: [(2000, 2200)]}, "")
],
"POWERED_ON": []})
}, ["VDD33", "VDD", "EN1", "EN2"])
def __init__(self, name, bus_addr):
super(CPU_3, self).__init__(name, bus_addr, "POWERED_DOWN", CPU_3)
class PowerSupply(Node):
OUT0 = Output([(0, 12000)], [Constraint([(0, 12000)], {}, partial(Constraint.explicit, "OUT0", set(), set()))], "power", Wire.gpio_set)
OUT1 = Output([(0, 12000)], [Constraint([(0, 12000)], {}, partial(Constraint.explicit, "OUT1", set(), set()))], "power", Wire.gpio_set)
OUT2 = Output([(0, 12000)], [Constraint([(0, 12000)], {}, partial(Constraint.explicit, "OUT2", set(), set()))], "power", Wire.gpio_set)
def __init__(self, name, bus_addr):
super(PowerSupply, self).__init__(name, bus_addr, PowerSupply)
class Node3(Node):
device = "node3"
I1 = Input([{0, 1}], "logical")
O1 = Output([{0, 1}], [
Constraint([{1}], {"I1" : [{1}]}, partial(Constraint.explicit, "O1", {"I1"}, set())),
Constraint([{0}], {"I1" : [{0}]}, partial(Constraint.explicit, "O1", {"I1"}, set()))
], "logical", Wire.pin_set)
O2 = Output([{0, 1}], [
Constraint([{1}], {"I1" : [{1}]}, partial(Constraint.explicit, "O2", {"I1"}, set())),
Constraint ([{0}], {"I1" : [{0}]}, partial(Constraint.explicit, "O2", {"I1"}, set()))
], "logical", Wire.pin_set)
O3 = Output([{0, 1}], [
Constraint([{1}], {"I1" : [{1}]}, partial(Constraint.explicit, "O3", {"I1"}, set())),
Constraint([{0}], {"I1" : [{0}]}, partial(Constraint.explicit, "O3", {"I1"}, set()))
], "logical", Wire.pin_set)
def __init__(self, name, bus_addr):
super(Node3, self).__init__(name, bus_addr, Node3)
class Node4(Node):
device = "node4"
I1 = Input([{0, 1}], "logical")
O1 = Output([{0, 1}], [
Constraint([{0}], {"I1" : [{1}]}, partial(Constraint.explicit, "O1", {"I1"}, set())),
Constraint([{1}], {"I1" : [{0}]}, partial(Constraint.explicit, "O1", {"I1"}, set())),
], "logical", Wire.pin_set)
def __init__(self, name, bus_addr):
super(Node4, self).__init__(name, bus_addr, Node4)
class Node5(Node):
I1 = Input([{0, 1}], "logical")
def __init__(self, name, bus_addr):
super(Node5, self).__init__(name, bus_addr, Node5)
class Node6(Node):
O1 = Output([{0, 1}], [Constraint([{0}], {}, partial(Constraint.explicit, "O1", set(), set())), Constraint([{1}], {}, partial(Constraint.explicit, "O1", set(), set()))], "logical", Wire.gpio_set)
def __init__(self, name, bus_addr):
super(Node6, self).__init__(name, bus_addr, Node6)
if __name__ == '__main__':
unittest.main() |
# zhoudoao@foxmail.com
# 2020.5.12
""" Bayesian LeNet-5 and LeNet-5 model.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
tf.enable_v2_behavior()
tfd = tfp.distributions
def bayesian_lenet5(num_classes,
kl_divergence_function):
model = tf.keras.models.Sequential([
tfp.layers.Convolution2DFlipout(
6, kernel_size=5, padding='SAME',
kernel_divergence_fn=kl_divergence_function,
activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D(
pool_size=[2, 2], strides=[2, 2],
padding='SAME'),
tfp.layers.Convolution2DFlipout(
16, kernel_size=5, padding='SAME',
kernel_divergence_fn=kl_divergence_function,
activation=tf.nn.relu),
tf.keras.layers.MaxPooling2D(
pool_size=[2, 2], strides=[2, 2],
padding='SAME'),
tfp.layers.Convolution2DFlipout(
120, kernel_size=5, padding='SAME',
kernel_divergence_fn=kl_divergence_function,
activation=tf.nn.relu),
tf.keras.layers.Flatten(),
tfp.layers.DenseFlipout(
84, kernel_divergence_fn=kl_divergence_function,
activation=tf.nn.relu),
tfp.layers.DenseFlipout(
num_classes, kernel_divergence_fn=kl_divergence_function,
activation=tf.nn.softmax)
])
return model
def lenet5(num_classes, activation=tf.nn.relu):
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(6, kernel_size=5, padding='SAME',
activation=activation),
tf.keras.layers.MaxPooling2D(pool_size=[2, 2],
strides=[2, 2], padding='SAME'),
tf.keras.layers.Conv2D(16, kernel_size=5, padding='SAME',
activation=activation),
tf.keras.layers.MaxPooling2D(pool_size=[2, 2], strides=[2, 2],
padding='SAME'),
tf.keras.layers.Conv2D(120, kernel_size=5, padding='SAME',
activation=activation),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(84, activation=activation),
tf.keras.layers.Dense(num_classes, activation=tf.nn.softmax)
])
return model
|
"""Each ListNode holds a reference to its previous node
as well as its next node in the List."""
class ListNode:
def __init__(self, value, prev=None, next=None):
self.value = value
self.prev = prev
self.next = next
"""Wrap the given value in a ListNode and insert it
after this node. Note that this node could already
have a next node it is pointing to."""
def insert_after(self, value):
current_next = self.next
self.next = ListNode(value, self, current_next)
if current_next:
current_next.prev = self.next
"""Wrap the given value in a ListNode and insert it
before this node. Note that this node could already
have a previous node it is pointing to."""
def insert_before(self, value):
current_prev = self.prev
self.prev = ListNode(value, current_prev, self)
if current_prev:
current_prev.next = self.prev
"""Rearranges this ListNode's previous and next pointers
accordingly, effectively deleting this ListNode."""
def delete(self):
if self.prev:
self.prev.next = self.next
if self.next:
self.next.prev = self.prev
"""Our doubly-linked list class. It holds references to
the list's head and tail nodes."""
class DoublyLinkedList:
def __init__(self, node=None):
self.head = node
self.tail = node
self.length = 1 if node else 0
def __len__(self):
return self.length
def add_to_head(self, value):
new_node = ListNode(value)
self.length += 1
if not self.head and not self.tail:
self.head = new_node
self.tail = new_node
else:
self.head.prev = new_node
new_node.next = self.head
self.head = new_node
def remove_from_head(self):
if not self.head:
return None
self.length -= 1
if self.head is self.tail:
current_head = self.head
self.head = None
self.tail = None
return current_head.value
else:
current_head = self.head
self.head = self.head.next
self.head.prev = None
return current_head.value
def add_to_tail(self, value):
new_node = ListNode(value)
self.length += 1
if not self.head and not self.tail:
self.head = new_node
self.tail = new_node
else:
self.tail.next = new_node
new_node.prev = self.tail
self.tail = new_node
def remove_from_tail(self):
if not self.tail:
return None
self.length -= 1
if self.head is self.tail:
current_tail = self.tail
self.head = None
self.tail = None
return current_tail.value
else:
current_tail = self.tail
self.tail = self.tail.prev
self.tail.next = None
return current_tail.value
def move_to_front(self, node):
if node is self.head:
return
if node is self.tail:
self.remove_from_tail()
else:
node.delete()
self.length -= 1
self.add_to_head(node.value)
def move_to_end(self, node):
if node is self.tail:
return
if node is self.head:
self.remove_from_head()
else:
node.delete()
self.length -= 1
self.add_to_tail(node.value)
def delete(self, node):
# TODO: what if node is not in list? what if it's in the middle?
if self.head is self.tail or self.head is node:
self.remove_from_head() # taking advantage of existing method
# TODO: investigate memory leak by leaving extra nodes around
elif self.tail is node:
self.remove_from_tail()
def get_max(self):
if not self.head:
return None
max_value = self.head.value
current = self.head
while current:
if current.value > max_value:
max_value = current.value
current = current.next
return max_value
|
from option import option
import pytest
from typing import Optional, Callable
def test_some():
opt = option.some(5)
assert opt == 5
assert isinstance(opt, int)
def test_some_exception():
with pytest.raises(option.InvalidArgument):
option.some(None)
def test_none():
assert option.none() == None
def test_value():
opt = option.some(5)
nopt = option.none()
assert option.value(1, opt) == 5
assert option.value(1, nopt) == 1
def test_get():
opt = option.some("Dave")
nopt = option.none()
assert option.get(opt) == "Dave"
with pytest.raises(option.InvalidArgument):
assert option.get(nopt)
def test_bind():
opt = option.some("Dave")
nopt = option.none()
assert option.bind(opt, str.upper) == "DAVE"
assert option.bind(nopt, str.upper) == None
def test_join():
optopt: Optional[Optional[str]] = option.some(option.some("Dave"))
assert option.join(optopt) == "Dave"
noption = None
assert option.join(noption) == None
def test_map():
opt: Optional[int] = option.some(55)
assert option.map(str, opt) == "55"
assert option.map(lambda n: n / 2, opt) == 27.5
assert option.map(str, option.none()) == None
def test_fold():
opt: Optional[int] = option.some(55)
nopt: Optional[int] = option.none()
assert option.fold(-1, lambda n: n * 2, opt) == 110
assert option.fold(-1, lambda n: n * 2, nopt) == -1
def test_iter(capsys):
opt: Optional[int] = option.some(55)
option.iter(print, opt)
captured = capsys.readouterr()
assert captured.out == "55\n"
option.iter(print, option.none())
captured = capsys.readouterr()
assert captured.out == ""
def test_is_none():
assert option.is_none(option.some(55)) == False
assert option.is_none(option.none()) == True
def test_is_some():
assert option.is_some(option.some(55)) == True
assert option.is_some(option.none()) == False
def test_equal():
opt1 = option.some(10)
opt2 = option.some(10)
opt3 = option.some(55)
opt4 = option.none()
compare: Callable[[int, int], bool] = lambda x, y: x == y
assert option.equal(compare, opt1, opt2)
assert option.equal(compare, opt1, opt3) == False
assert option.equal(compare, opt1, opt4) == False
assert option.equal(compare, opt4, None)
def test_compare():
opt1 = option.some(10)
opt2 = option.some(10)
opt3 = option.some(55)
opt4 = option.none()
compare: Callable[[int, int], int] = lambda x, y: x - y
assert option.compare(compare, opt1, opt2) == 0
assert option.compare(compare, opt1, opt3) < 0
assert option.compare(compare, opt3, opt1) > 0
assert option.compare(compare, opt1, opt4) > 0
assert option.compare(compare, opt1, opt4) > 0
assert option.compare(compare, opt4, None) == 0
def test_to_list():
opt = option.some(55)
assert option.to_list(opt) == [55]
assert option.to_list(option.none()) == []
def test_to_sequence():
opt = option.some(55)
assert option.to_seq(opt) == [55]
assert option.to_seq(option.none()) == []
|
def get_action_value(mdp, state_values, state, action, gamma):
""" Computes Q(s,a) as in formula above """
return sum(
prob * (mdp.get_reward(state, action, state_dash) + gamma * state_values[state_dash])
for state_dash, prob in mdp.get_next_states(state, action).items()
)
|
import torch
from torch import tensor
import torch.nn as nn
import sys,os
import math
import sys
sys.path.append(os.getcwd())
from lib.utils import initialize_weights
# from lib.models.common2 import DepthSeperabelConv2d as Conv
# from lib.models.common2 import SPP, Bottleneck, BottleneckCSP, Focus, Concat, Detect
from lib.models.common import Conv, SPP, Bottleneck, BottleneckCSP, Focus, Concat, Detect
from torch.nn import Upsample
from lib.utils import check_anchor_order
from lib.core.evaluate import SegmentationMetric
from lib.utils.utils import time_synchronized
CSPDarknet_s = [
[ -1, Focus, [3, 32, 3]],
[ -1, Conv, [32, 64, 3, 2]],
[ -1, BottleneckCSP, [64, 64, 1]],
[ -1, Conv, [64, 128, 3, 2]],
[ -1, BottleneckCSP, [128, 128, 3]],
[ -1, Conv, [128, 256, 3, 2]],
[ -1, BottleneckCSP, [256, 256, 3]],
[ -1, Conv, [256, 512, 3, 2]],
[ -1, SPP, [512, 512, [5, 9, 13]]],
[ -1, BottleneckCSP, [512, 512, 1, False]]
]
# MCnet = [
# [ -1, Focus, [3, 32, 3]],
# [ -1, Conv, [32, 64, 3, 2]],
# [ -1, BottleneckCSP, [64, 64, 1]],
# [ -1, Conv, [64, 128, 3, 2]],
# [ -1, BottleneckCSP, [128, 128, 3]],
# [ -1, Conv, [128, 256, 3, 2]],
# [ -1, BottleneckCSP, [256, 256, 3]],
# [ -1, Conv, [256, 512, 3, 2]],
# [ -1, SPP, [512, 512, [5, 9, 13]]],
# [ -1, BottleneckCSP, [512, 512, 1, False]],
# [ -1, Conv,[512, 256, 1, 1]],
# [ -1, Upsample, [None, 2, 'nearest']],
# [ [-1, 6], Concat, [1]],
# [ -1, BottleneckCSP, [512, 256, 1, False]],
# [ -1, Conv, [256, 128, 1, 1]],
# [ -1, Upsample, [None, 2, 'nearest']],
# [ [-1,4], Concat, [1]],
# [ -1, BottleneckCSP, [256, 128, 1, False]],
# [ -1, Conv, [128, 128, 3, 2]],
# [ [-1, 14], Concat, [1]],
# [ -1, BottleneckCSP, [256, 256, 1, False]],
# [ -1, Conv, [256, 256, 3, 2]],
# [ [-1, 10], Concat, [1]],
# [ -1, BottleneckCSP, [512, 512, 1, False]],
# [ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]],
# [ 17, Conv, [128, 64, 3, 1]],
# [ -1, Upsample, [None, 2, 'nearest']],
# [ [-1,2], Concat, [1]],
# [ -1, BottleneckCSP, [128, 64, 1, False]],
# [ -1, Conv, [64, 32, 3, 1]],
# [ -1, Upsample, [None, 2, 'nearest']],
# [ -1, Conv, [32, 16, 3, 1]],
# [ -1, BottleneckCSP, [16, 8, 1, False]],
# [ -1, Upsample, [None, 2, 'nearest']],
# [ -1, Conv, [8, 2, 3, 1]] #segmentation output
# ]
MCnet_SPP = [
[ -1, Focus, [3, 32, 3]],
[ -1, Conv, [32, 64, 3, 2]],
[ -1, BottleneckCSP, [64, 64, 1]],
[ -1, Conv, [64, 128, 3, 2]],
[ -1, BottleneckCSP, [128, 128, 3]],
[ -1, Conv, [128, 256, 3, 2]],
[ -1, BottleneckCSP, [256, 256, 3]],
[ -1, Conv, [256, 512, 3, 2]],
[ -1, SPP, [512, 512, [5, 9, 13]]],
[ -1, BottleneckCSP, [512, 512, 1, False]],
[ -1, Conv,[512, 256, 1, 1]],
[ -1, Upsample, [None, 2, 'nearest']],
[ [-1, 6], Concat, [1]],
[ -1, BottleneckCSP, [512, 256, 1, False]],
[ -1, Conv, [256, 128, 1, 1]],
[ -1, Upsample, [None, 2, 'nearest']],
[ [-1,4], Concat, [1]],
[ -1, BottleneckCSP, [256, 128, 1, False]],
[ -1, Conv, [128, 128, 3, 2]],
[ [-1, 14], Concat, [1]],
[ -1, BottleneckCSP, [256, 256, 1, False]],
[ -1, Conv, [256, 256, 3, 2]],
[ [-1, 10], Concat, [1]],
[ -1, BottleneckCSP, [512, 512, 1, False]],
# [ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]],
[ [17, 20, 23], Detect, [13, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]],
[ 17, Conv, [128, 64, 3, 1]],
[ -1, Upsample, [None, 2, 'nearest']],
[ [-1,2], Concat, [1]],
[ -1, BottleneckCSP, [128, 64, 1, False]],
[ -1, Conv, [64, 32, 3, 1]],
[ -1, Upsample, [None, 2, 'nearest']],
[ -1, Conv, [32, 16, 3, 1]],
[ -1, BottleneckCSP, [16, 8, 1, False]],
[ -1, Upsample, [None, 2, 'nearest']],
[ -1, SPP, [8, 2, [5, 9, 13]]] #segmentation output
]
# [2,6,3,9,5,13], [7,19,11,26,17,39], [28,64,44,103,61,183]
MCnet_fast = [
[ -1, Focus, [3, 32, 3]],#0
[ -1, Conv, [32, 64, 3, 2]],#1
[ -1, BottleneckCSP, [64, 128, 1, True, True]],#2
[ -1, BottleneckCSP, [128, 256, 1, True, True]],#4
[ -1, BottleneckCSP, [256, 512, 1, True, True]],#6
[ -1, SPP, [512, 512, [5, 9, 13]]],#8
[ -1, BottleneckCSP, [512, 512, 1, False]],#9
[ -1, Conv,[512, 256, 1, 1]],#10
[ -1, Upsample, [None, 2, 'nearest']],#11
[ [-1, 6], Concat, [1]],#12
[ -1, BottleneckCSP, [512, 256, 1, False]],#13
[ -1, Conv, [256, 128, 1, 1]],#14
[ -1, Upsample, [None, 2, 'nearest']],#15
[ [-1,4], Concat, [1]],#16
[ -1, BottleneckCSP, [256, 128, 1, False, True]],#17
[ [-1, 14], Concat, [1]],#19
[ -1, BottleneckCSP, [256, 256, 1, False, True]],#20
[ [-1, 10], Concat, [1]],#22
[ -1, BottleneckCSP, [512, 512, 1, False]],#23
[ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]], #Detect output 24
[ 16, Conv, [256, 64, 3, 1]],#25
[ -1, Upsample, [None, 2, 'nearest']],#26
[ [-1,2], Concat, [1]],#27
[ -1, BottleneckCSP, [128, 32, 1, False]],#28
# [ -1, Conv, [64, 32, 1, 1]],#29
[ -1, Upsample, [None, 2, 'nearest']],#30
# [ -1, Conv, [32, 16, 1, 1]],#31
[ -1, BottleneckCSP, [32, 8, 1, False]],#32
[ -1, Upsample, [None, 2, 'nearest']],#33
[ -1, Conv, [8, 2, 1, 1]], #Driving area segmentation output#34
[ 16, Conv, [256, 64, 3, 1]],
[ -1, Upsample, [None, 2, 'nearest']],
[ [-1,2], Concat, [1]],
[ -1, BottleneckCSP, [128, 32, 1, False]],
# [ -1, Conv, [64, 32, 1, 1]],
[ -1, Upsample, [None, 2, 'nearest']],
# [ -1, Conv, [32, 16, 1, 1]],
[ 31, BottleneckCSP, [32, 8, 1, False]],#35
[ -1, Upsample, [None, 2, 'nearest']],#36
[ -1, Conv, [8, 2, 1, 1]], #Lane line segmentation output #37
]
MCnet_light = [
[ -1, Focus, [3, 32, 3]],#0
[ -1, Conv, [32, 64, 3, 2]],#1
[ -1, BottleneckCSP, [64, 64, 1]],#2
[ -1, Conv, [64, 128, 3, 2]],#3
[ -1, BottleneckCSP, [128, 128, 3]],#4
[ -1, Conv, [128, 256, 3, 2]],#5
[ -1, BottleneckCSP, [256, 256, 3]],#6
[ -1, Conv, [256, 512, 3, 2]],#7
[ -1, SPP, [512, 512, [5, 9, 13]]],#8
[ -1, BottleneckCSP, [512, 512, 1, False]],#9
[ -1, Conv,[512, 256, 1, 1]],#10
[ -1, Upsample, [None, 2, 'nearest']],#11
[ [-1, 6], Concat, [1]],#12
[ -1, BottleneckCSP, [512, 256, 1, False]],#13
[ -1, Conv, [256, 128, 1, 1]],#14
[ -1, Upsample, [None, 2, 'nearest']],#15
[ [-1,4], Concat, [1]],#16
[ -1, BottleneckCSP, [256, 128, 1, False]],#17
[ -1, Conv, [128, 128, 3, 2]],#18
[ [-1, 14], Concat, [1]],#19
[ -1, BottleneckCSP, [256, 256, 1, False]],#20
[ -1, Conv, [256, 256, 3, 2]],#21
[ [-1, 10], Concat, [1]],#22
[ -1, BottleneckCSP, [512, 512, 1, False]],#23
[ [17, 20, 23], Detect, [1, [[4,12,6,18,10,27], [15,38,24,59,39,78], [51,125,73,168,97,292]], [128, 256, 512]]], #Detect output 24
[ 16, Conv, [256, 128, 3, 1]],#25
[ -1, Upsample, [None, 2, 'nearest']],#26
# [ [-1,2], Concat, [1]],#27
[ -1, BottleneckCSP, [128, 64, 1, False]],#27
[ -1, Conv, [64, 32, 3, 1]],#28
[ -1, Upsample, [None, 2, 'nearest']],#29
[ -1, Conv, [32, 16, 3, 1]],#30
[ -1, BottleneckCSP, [16, 8, 1, False]],#31
[ -1, Upsample, [None, 2, 'nearest']],#32
[ -1, Conv, [8, 3, 3, 1]], #Driving area segmentation output#33
# [ 16, Conv, [128, 64, 3, 1]],
# [ -1, Upsample, [None, 2, 'nearest']],
# [ [-1,2], Concat, [1]],
# [ -1, BottleneckCSP, [128, 64, 1, False]],
# [ -1, Conv, [64, 32, 3, 1]],
# [ -1, Upsample, [None, 2, 'nearest']],
# [ -1, Conv, [32, 16, 3, 1]],
[ 30, BottleneckCSP, [16, 8, 1, False]],#34
[ -1, Upsample, [None, 2, 'nearest']],#35
[ -1, Conv, [8, 2, 3, 1]], #Lane line segmentation output #36
]
# The lane line and the driving area segment branches share information with each other
MCnet_share = [
[ -1, Focus, [3, 32, 3]], #0
[ -1, Conv, [32, 64, 3, 2]], #1
[ -1, BottleneckCSP, [64, 64, 1]], #2
[ -1, Conv, [64, 128, 3, 2]], #3
[ -1, BottleneckCSP, [128, 128, 3]], #4
[ -1, Conv, [128, 256, 3, 2]], #5
[ -1, BottleneckCSP, [256, 256, 3]], #6
[ -1, Conv, [256, 512, 3, 2]], #7
[ -1, SPP, [512, 512, [5, 9, 13]]], #8
[ -1, BottleneckCSP, [512, 512, 1, False]], #9
[ -1, Conv,[512, 256, 1, 1]], #10
[ -1, Upsample, [None, 2, 'nearest']], #11
[ [-1, 6], Concat, [1]], #12
[ -1, BottleneckCSP, [512, 256, 1, False]], #13
[ -1, Conv, [256, 128, 1, 1]], #14
[ -1, Upsample, [None, 2, 'nearest']], #15
[ [-1,4], Concat, [1]], #16
[ -1, BottleneckCSP, [256, 128, 1, False]], #17
[ -1, Conv, [128, 128, 3, 2]], #18
[ [-1, 14], Concat, [1]], #19
[ -1, BottleneckCSP, [256, 256, 1, False]], #20
[ -1, Conv, [256, 256, 3, 2]], #21
[ [-1, 10], Concat, [1]], #22
[ -1, BottleneckCSP, [512, 512, 1, False]], #23
[ [17, 20, 23], Detect, [1, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]], #Detect output 24
[ 16, Conv, [256, 64, 3, 1]], #25
[ -1, Upsample, [None, 2, 'nearest']], #26
[ [-1,2], Concat, [1]], #27
[ -1, BottleneckCSP, [128, 64, 1, False]], #28
[ -1, Conv, [64, 32, 3, 1]], #29
[ -1, Upsample, [None, 2, 'nearest']], #30
[ -1, Conv, [32, 16, 3, 1]], #31
[ -1, BottleneckCSP, [16, 8, 1, False]], #32 driving area segment neck
[ 16, Conv, [256, 64, 3, 1]], #33
[ -1, Upsample, [None, 2, 'nearest']], #34
[ [-1,2], Concat, [1]], #35
[ -1, BottleneckCSP, [128, 64, 1, False]], #36
[ -1, Conv, [64, 32, 3, 1]], #37
[ -1, Upsample, [None, 2, 'nearest']], #38
[ -1, Conv, [32, 16, 3, 1]], #39
[ -1, BottleneckCSP, [16, 8, 1, False]], #40 lane line segment neck
[ [31,39], Concat, [1]], #41
[ -1, Conv, [32, 8, 3, 1]], #42 Share_Block
[ [32,42], Concat, [1]], #43
[ -1, Upsample, [None, 2, 'nearest']], #44
[ -1, Conv, [16, 2, 3, 1]], #45 Driving area segmentation output
[ [40,42], Concat, [1]], #46
[ -1, Upsample, [None, 2, 'nearest']], #47
[ -1, Conv, [16, 2, 3, 1]] #48Lane line segmentation output
]
# The lane line and the driving area segment branches without share information with each other
MCnet_no_share = [
[ -1, Focus, [3, 32, 3]], #0
[ -1, Conv, [32, 64, 3, 2]], #1
[ -1, BottleneckCSP, [64, 64, 1]], #2
[ -1, Conv, [64, 128, 3, 2]], #3
[ -1, BottleneckCSP, [128, 128, 3]], #4
[ -1, Conv, [128, 256, 3, 2]], #5
[ -1, BottleneckCSP, [256, 256, 3]], #6
[ -1, Conv, [256, 512, 3, 2]], #7
[ -1, SPP, [512, 512, [5, 9, 13]]], #8
[ -1, BottleneckCSP, [512, 512, 1, False]], #9
[ -1, Conv,[512, 256, 1, 1]], #10
[ -1, Upsample, [None, 2, 'nearest']], #11
[ [-1, 6], Concat, [1]], #12
[ -1, BottleneckCSP, [512, 256, 1, False]], #13
[ -1, Conv, [256, 128, 1, 1]], #14
[ -1, Upsample, [None, 2, 'nearest']], #15
[ [-1,4], Concat, [1]], #16
[ -1, BottleneckCSP, [256, 128, 1, False]], #17
[ -1, Conv, [128, 128, 3, 2]], #18
[ [-1, 14], Concat, [1]], #19
[ -1, BottleneckCSP, [256, 256, 1, False]], #20
[ -1, Conv, [256, 256, 3, 2]], #21
[ [-1, 10], Concat, [1]], #22
[ -1, BottleneckCSP, [512, 512, 1, False]], #23
[ [17, 20, 23], Detect, [13, [[3,9,5,11,4,20], [7,18,6,39,12,31], [19,50,38,81,68,157]], [128, 256, 512]]], #Detect output 24
[ 16, Conv, [256, 64, 3, 1]], #25
[ -1, Upsample, [None, 2, 'nearest']], #26
[ [-1,2], Concat, [1]], #27
[ -1, BottleneckCSP, [128, 64, 1, False]], #28
[ -1, Conv, [64, 32, 3, 1]], #29
[ -1, Upsample, [None, 2, 'nearest']], #30
[ -1, Conv, [32, 16, 3, 1]], #31
[ -1, BottleneckCSP, [16, 8, 1, False]], #32 driving area segment neck
[ -1, Upsample, [None, 2, 'nearest']], #33
[ -1, Conv, [8, 3, 3, 1]], #34 Driving area segmentation output
[ 16, Conv, [256, 64, 3, 1]], #35
[ -1, Upsample, [None, 2, 'nearest']], #36
[ [-1,2], Concat, [1]], #37
[ -1, BottleneckCSP, [128, 64, 1, False]], #38
[ -1, Conv, [64, 32, 3, 1]], #39
[ -1, Upsample, [None, 2, 'nearest']], #40
[ -1, Conv, [32, 16, 3, 1]], #41
[ -1, BottleneckCSP, [16, 8, 1, False]], #42 lane line segment neck
[ -1, Upsample, [None, 2, 'nearest']], #43
[ -1, Conv, [8, 2, 3, 1]] #44 Lane line segmentation output
]
class MCnet(nn.Module):
def __init__(self, block_cfg, **kwargs):
super(MCnet, self).__init__()
layers, save= [], []
self.nc = 13
self.detector_index = -1
self.Da_out_idx = 45 if len(block_cfg)==49 else 34
# self.Da_out_idx = 37
# Build model
# print(block_cfg)
for i, (from_, block, args) in enumerate(block_cfg):
block = eval(block) if isinstance(block, str) else block # eval strings
if block is Detect:
self.detector_index = i
block_ = block(*args)
block_.index, block_.from_ = i, from_
layers.append(block_)
save.extend(x % i for x in ([from_] if isinstance(from_, int) else from_) if x != -1) # append to savelist
self.model, self.save = nn.Sequential(*layers), sorted(save)
self.names = [str(i) for i in range(self.nc)]
# set stride、anchor for detector
Detector = self.model[self.detector_index] # detector
if isinstance(Detector, Detect):
s = 128 # 2x min stride
# for x in self.forward(torch.zeros(1, 3, s, s)):
# print (x.shape)
with torch.no_grad():
detects, _, _= self.forward(torch.zeros(1, 3, s, s))
Detector.stride = torch.tensor([s / x.shape[-2] for x in detects]) # forward
# print("stride"+str(Detector.stride ))
Detector.anchors /= Detector.stride.view(-1, 1, 1) # Set the anchors for the corresponding scale
check_anchor_order(Detector)
self.stride = Detector.stride
self._initialize_biases()
initialize_weights(self)
def forward(self, x):
cache = []
out = []
#times = []
for i, block in enumerate(self.model):
#t0 = time_synchronized()
if block.from_ != -1:
x = cache[block.from_] if isinstance(block.from_, int) else [x if j == -1 else cache[j] for j in block.from_] #calculate concat detect
x = block(x)
if isinstance(block, Detect): # save detector result
out.append(x)
if i == self.Da_out_idx: #save driving area segment result
m=nn.Sigmoid()
out.append(m(x))
cache.append(x if block.index in self.save else None)
"""t1 = time_synchronized()
print(str(i) + " : " + str(t1-t0))
times.append(t1-t0)
print(sum(times[:25]))
print(sum(times[25:33]))
print(sum(times[33:41]))
print(sum(times[41:43]))
print(sum(times[43:46]))
print(sum(times[46:]))"""
m=nn.Sigmoid()
out.append(m(x))
return out
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
# m = self.model[-1] # Detect() module
m = self.model[self.detector_index] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
class CSPDarknet(nn.Module):
def __init__(self, block_cfg, **kwargs):
super(CSPDarknet, self).__init__()
layers, save= [], []
# self.nc = 13 #output category num
self.nc = 1
self.detector_index = -1
# Build model
for i, (from_, block, args) in enumerate(block_cfg):
block = eval(block) if isinstance(block, str) else block # eval strings
if block is Detect:
self.detector_index = i
block_ = block(*args)
block_.index, block_.from_ = i, from_
layers.append(block_)
save.extend(x % i for x in ([from_] if isinstance(from_, int) else from_) if x != -1) # append to savelist
self.model, self.save = nn.Sequential(*layers), sorted(save)
self.names = [str(i) for i in range(self.nc)]
# set stride、anchor for detector
Detector = self.model[self.detector_index] # detector
if isinstance(Detector, Detect):
s = 128 # 2x min stride
# for x in self.forward(torch.zeros(1, 3, s, s)):
# print (x.shape)
with torch.no_grad():
detects, _ = self.forward(torch.zeros(1, 3, s, s))
Detector.stride = torch.tensor([s / x.shape[-2] for x in detects]) # forward
# print("stride"+str(Detector.stride ))
Detector.anchors /= Detector.stride.view(-1, 1, 1) # Set the anchors for the corresponding scale
check_anchor_order(Detector)
self.stride = Detector.stride
self._initialize_biases()
initialize_weights(self)
def forward(self, x):
cache = []
out = []
for i, block in enumerate(self.model):
if block.from_ != -1:
x = cache[block.from_] if isinstance(block.from_, int) else [x if j == -1 else cache[j] for j in block.from_] #calculate concat detect
start = time.time()
x = block(x)
end = time.time()
print(start-end)
"""y = None if isinstance(x, list) else x.shape"""
if isinstance(block, Detect): # save detector result
out.append(x)
cache.append(x if block.index in self.save else None)
m=nn.Sigmoid()
out.append(m(x))
# out.append(x)
# print(out[0][0].shape, out[0][1].shape, out[0][2].shape)
return out
def _initialize_biases(self, cf=None): # initialize biases into Detect(), cf is class frequency
# https://arxiv.org/abs/1708.02002 section 3.3
# cf = torch.bincount(torch.tensor(np.concatenate(dataset.labels, 0)[:, 0]).long(), minlength=nc) + 1.
# m = self.model[-1] # Detect() module
m = self.model[self.detector_index] # Detect() module
for mi, s in zip(m.m, m.stride): # from
b = mi.bias.view(m.na, -1) # conv.bias(255) to (3,85)
b[:, 4] += math.log(8 / (640 / s) ** 2) # obj (8 objects per 640 image)
b[:, 5:] += math.log(0.6 / (m.nc - 0.99)) if cf is None else torch.log(cf / cf.sum()) # cls
mi.bias = torch.nn.Parameter(b.view(-1), requires_grad=True)
def get_net(cfg, **kwargs):
# m_block_cfg = MCnet_share if cfg.MODEL.STRU_WITHSHARE else MCnet_no_share
m_block_cfg = MCnet_no_share
model = MCnet(m_block_cfg, **kwargs)
return model
if __name__ == "__main__":
from torch.utils.tensorboard import SummaryWriter
model = get_net(False)
input_ = torch.randn((1, 3, 256, 256))
gt_ = torch.rand((1, 2, 256, 256))
metric = SegmentationMetric(2)
detects, dring_area_seg, lane_line_seg = model(input_)
for det in detects:
print(det.shape)
print(dring_area_seg.shape)
print(dring_area_seg.view(-1).shape)
_,predict=torch.max(dring_area_seg, 1)
print(predict.shape)
print(lane_line_seg.shape)
_,lane_line_pred=torch.max(lane_line_seg, 1)
_,lane_line_gt=torch.max(gt_, 1)
metric.reset()
metric.addBatch(lane_line_pred.cpu(), lane_line_gt.cpu())
acc = metric.pixelAccuracy()
meanAcc = metric.meanPixelAccuracy()
mIoU = metric.meanIntersectionOverUnion()
FWIoU = metric.Frequency_Weighted_Intersection_over_Union()
IoU = metric.IntersectionOverUnion()
print(IoU)
print(mIoU) |
# -*- coding: utf-8 -*-
from wakatime.main import execute
from wakatime.packages import requests
import logging
import os
import time
import shutil
import sys
import uuid
from testfixtures import log_capture
from wakatime.arguments import parse_arguments
from wakatime.compat import u, is_py3
from wakatime.constants import (
API_ERROR,
AUTH_ERROR,
SUCCESS,
)
from wakatime.packages.requests.exceptions import RequestException
from wakatime.packages.requests.models import Response
from wakatime.utils import get_user_agent
from .utils import mock, json, ANY, CustomResponse, TemporaryDirectory, TestCase, NamedTemporaryFile
class ArgumentsTestCase(TestCase):
patch_these = [
'wakatime.packages.requests.adapters.HTTPAdapter.send',
'wakatime.offlinequeue.Queue.push',
['wakatime.offlinequeue.Queue.pop', None],
['wakatime.offlinequeue.Queue.connect', None],
'wakatime.session_cache.SessionCache.save',
'wakatime.session_cache.SessionCache.delete',
['wakatime.session_cache.SessionCache.get', requests.session],
['wakatime.session_cache.SessionCache.connect', None],
]
@log_capture()
def test_help_contents(self, logs):
logging.disable(logging.NOTSET)
args = ['--help']
with self.assertRaises(SystemExit) as e:
execute(args)
self.assertEquals(int(str(e.exception)), 0)
expected_stdout = open('tests/samples/output/test_help_contents').read()
self.assertEquals(sys.stdout.getvalue(), expected_stdout)
self.assertEquals(sys.stderr.getvalue(), '')
self.assertNothingLogged(logs)
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_not_called()
@log_capture()
def test_argument_parsing(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
key = str(uuid.uuid4())
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--key', key, '--config', config]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
self.patched['wakatime.session_cache.SessionCache.get'].assert_called_once_with()
self.patched['wakatime.session_cache.SessionCache.delete'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.save'].assert_called_once_with(ANY)
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_called_once_with()
@log_capture()
def test_argument_parsing_strips_quotes(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/python.py'
plugin = '"abc plugin\\"with quotes"'
args = ['--file', '"' + entity + '"', '--config', config, '--time', now, '--plugin', plugin]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
ua = get_user_agent().replace('Unknown/0', 'abc plugin"with quotes')
heartbeat = {
'entity': os.path.realpath(entity),
'project': os.path.basename(os.path.abspath('.')),
'branch': ANY,
'time': float(now),
'type': 'file',
'cursorpos': None,
'dependencies': ['sqlalchemy', 'jinja', 'simplejson', 'flask', 'app', 'django', 'pygments', 'unittest', 'mock'],
'language': u('Python'),
'lineno': None,
'lines': 37,
'is_write': False,
'user_agent': ua,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_lineno_and_cursorpos(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
entity = 'tests/samples/codefiles/twolinefile.txt'
config = 'tests/samples/configs/good_config.cfg'
now = u(int(time.time()))
heartbeat = {
'language': 'Text only',
'lines': 2,
'entity': os.path.realpath(entity),
'project': os.path.basename(os.path.abspath('.')),
'cursorpos': '4',
'lineno': '3',
'branch': ANY,
'time': float(now),
'is_write': False,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}
args = ['--entity', entity, '--config', config, '--time', now, '--lineno', '3', '--cursorpos', '4', '--verbose']
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
self.assertIn('WakaTime DEBUG Sending heartbeats to api', actual)
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_invalid_timeout_passed_via_command_line(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
config = 'tests/samples/configs/good_config.cfg'
key = str(uuid.uuid4())
args = ['--file', entity, '--key', key, '--config', config, '--timeout', 'abc']
with self.assertRaises(SystemExit) as e:
execute(args)
self.assertNothingLogged(logs)
self.assertEquals(int(str(e.exception)), 2)
self.assertEquals(sys.stdout.getvalue(), '')
expected_stderr = open('tests/samples/output/main_test_timeout_passed_via_command_line').read()
self.assertEquals(sys.stderr.getvalue(), expected_stderr)
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.get'].assert_not_called()
@log_capture()
def test_missing_entity_file(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
entity = 'tests/samples/codefiles/missingfile.txt'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--verbose']
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'WakaTime DEBUG File does not exist; ignoring this heartbeat.'
self.assertIn(expected, actual)
self.assertHeartbeatNotSent()
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheUntouched()
@log_capture()
def test_missing_entity_argument(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
config = 'tests/samples/configs/good_config.cfg'
args = ['--config', config]
with self.assertRaises(SystemExit) as e:
execute(args)
self.assertEquals(int(str(e.exception)), 2)
self.assertEquals(sys.stdout.getvalue(), '')
expected = 'error: argument --entity is required'
self.assertIn(expected, sys.stderr.getvalue())
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
expected = ''
self.assertEquals(log_output, expected)
self.patched['wakatime.session_cache.SessionCache.get'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.delete'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.save'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_not_called()
@log_capture()
def test_missing_api_key(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
config = 'tests/samples/configs/missing_api_key.cfg'
args = ['--config', config]
with self.assertRaises(SystemExit) as e:
execute(args)
self.assertEquals(int(str(e.exception)), AUTH_ERROR)
self.assertEquals(sys.stdout.getvalue(), '')
expected = 'error: Missing api key. Find your api key from wakatime.com/settings/api-key.'
self.assertIn(expected, sys.stderr.getvalue())
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
expected = ''
self.assertEquals(log_output, expected)
self.patched['wakatime.session_cache.SessionCache.get'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.delete'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.save'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_not_called()
@log_capture()
def test_invalid_api_key(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
key = 'an-invalid-key'
args = ['--key', key]
with self.assertRaises(SystemExit) as e:
execute(args)
self.assertEquals(int(str(e.exception)), AUTH_ERROR)
self.assertEquals(sys.stdout.getvalue(), '')
expected = 'error: Invalid api key. Find your api key from wakatime.com/settings/api-key.'
self.assertIn(expected, sys.stderr.getvalue())
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
expected = ''
self.assertEquals(log_output, expected)
self.patched['wakatime.session_cache.SessionCache.get'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.delete'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.save'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_not_called()
@log_capture()
def test_api_key_passed_via_command_line(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with TemporaryDirectory() as tempdir:
filename = list(filter(lambda x: x.endswith('.txt'), os.listdir(u('tests/samples/codefiles/unicode'))))[0]
entity = os.path.join('tests/samples/codefiles/unicode', filename)
shutil.copy(entity, os.path.join(tempdir, filename))
entity = os.path.realpath(os.path.join(tempdir, filename))
now = u(int(time.time()))
key = str(uuid.uuid4())
args = ['--file', entity, '--key', key, '--time', now, '--config', 'fake-foobar']
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'language': 'Text only',
'lines': 0,
'entity': os.path.realpath(entity),
'project': None,
'time': float(now),
'type': 'file',
'cursorpos': None,
'dependencies': [],
'lineno': None,
'is_write': False,
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_proxy_argument(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
proxy = 'localhost:1337'
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--proxy', proxy]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
self.patched['wakatime.session_cache.SessionCache.get'].assert_called_once_with()
self.patched['wakatime.session_cache.SessionCache.delete'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.save'].assert_called_once_with(ANY)
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_called_once_with()
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].assert_called_once_with(ANY, cert=None, proxies={'https': proxy}, stream=False, timeout=60, verify=True)
@log_capture()
def test_disable_ssl_verify_argument(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--no-ssl-verify']
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
self.patched['wakatime.session_cache.SessionCache.get'].assert_called_once_with()
self.patched['wakatime.session_cache.SessionCache.delete'].assert_not_called()
self.patched['wakatime.session_cache.SessionCache.save'].assert_called_once_with(ANY)
self.patched['wakatime.offlinequeue.Queue.push'].assert_not_called()
self.patched['wakatime.offlinequeue.Queue.pop'].assert_called_once_with()
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].assert_called_once_with(ANY, cert=None, proxies=ANY, stream=False, timeout=60, verify=False)
@log_capture()
def test_write_argument(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
now = u(int(time.time()))
key = str(uuid.uuid4())
heartbeat = {
'language': 'Text only',
'lines': 0,
'entity': entity,
'project': None,
'time': float(now),
'type': 'file',
'is_write': True,
'dependencies': [],
'user_agent': ANY,
}
args = ['--file', entity, '--key', key, '--write', '--verbose',
'--config', 'tests/samples/configs/good_config.cfg', '--time', now]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
self.assertIn('WakaTime DEBUG Sending heartbeats to api', actual)
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_entity_type_domain(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
entity = 'google.com'
config = 'tests/samples/configs/good_config.cfg'
now = u(int(time.time()))
args = ['--entity', entity, '--entity-type', 'domain', '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'entity': u(entity),
'time': float(now),
'type': 'domain',
'cursorpos': None,
'language': None,
'lineno': None,
'lines': None,
'is_write': False,
'dependencies': [],
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_entity_type_app(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
entity = 'Firefox'
config = 'tests/samples/configs/good_config.cfg'
now = u(int(time.time()))
args = ['--entity', entity, '--entity-type', 'app', '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'entity': u(entity),
'time': float(now),
'type': 'app',
'cursorpos': None,
'dependencies': [],
'language': None,
'lineno': None,
'lines': None,
'is_write': False,
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_valid_categories(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
config = 'tests/samples/configs/good_config.cfg'
now = u(int(time.time()))
valid_categories = [
'coding',
'building',
'indexing',
'debugging',
'running tests',
'manual testing',
'browsing',
'code reviewing',
'designing',
]
for category in valid_categories:
args = ['--entity', entity, '--category', category, '--config', config, '--time', now]
self.resetMocks()
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'entity': u(entity),
'time': float(now),
'type': 'file',
'category': category,
'cursorpos': None,
'language': 'Text only',
'lines': 0,
'is_write': False,
'dependencies': [],
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_invalid_category(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
config = 'tests/samples/configs/good_config.cfg'
now = u(int(time.time()))
category = 'foobar'
args = ['--entity', entity, '--category', category, '--config', config, '--time', now]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'entity': u(entity),
'time': float(now),
'type': 'file',
'category': None,
'cursorpos': None,
'language': 'Text only',
'lines': 0,
'is_write': False,
'dependencies': [],
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_old_alternate_language_argument_still_supported(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
language = 'Java'
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
entity = 'tests/samples/codefiles/python.py'
args = ['--file', entity, '--config', config, '--time', now, '--alternate-language', language.upper()]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'entity': os.path.realpath(entity),
'project': os.path.basename(os.path.abspath('.')),
'branch': ANY,
'time': float(now),
'type': 'file',
'cursorpos': None,
'dependencies': [],
'language': u(language),
'lineno': None,
'lines': 37,
'is_write': False,
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_extra_heartbeats_alternate_project_not_used(self, logs):
logging.disable(logging.NOTSET)
response = CustomResponse()
response.response_text = '{"responses": [[null, 201], [null,201]]}'
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now1 = u(int(time.time()))
project1 = os.path.basename(os.path.abspath('.'))
project_not_used = 'xyz'
entity1 = os.path.abspath('tests/samples/codefiles/emptyfile.txt')
entity2 = os.path.abspath('tests/samples/codefiles/twolinefile.txt')
config = 'tests/samples/configs/good_config.cfg'
args = ['--time', now1, '--file', entity1, '--config', config, '--extra-heartbeats']
with mock.patch('wakatime.main.sys.stdin') as mock_stdin:
now2 = int(time.time())
heartbeats = json.dumps([{
'timestamp': now2,
'entity': entity2,
'entity_type': 'file',
'alternate_project': project_not_used,
'is_write': True,
}])
mock_stdin.readline.return_value = heartbeats
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'language': 'Text only',
'lines': 0,
'entity': entity1,
'project': project1,
'branch': ANY,
'time': float(now1),
'is_write': False,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}
extra_heartbeats = [{
'language': 'Text only',
'lines': 2,
'entity': entity2,
'project': project1,
'branch': ANY,
'time': float(now2),
'is_write': True,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}]
self.assertHeartbeatSent(heartbeat, extra_heartbeats=extra_heartbeats)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_extra_heartbeats_using_project_from_editor(self, logs):
logging.disable(logging.NOTSET)
response = CustomResponse()
response.response_text = '{"responses": [[null, 201], [null,201]]}'
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now1 = u(int(time.time()))
project1 = os.path.basename(os.path.abspath('.'))
project2 = 'xyz'
entity1 = os.path.abspath('tests/samples/codefiles/emptyfile.txt')
entity2 = os.path.abspath('tests/samples/codefiles/twolinefile.txt')
config = 'tests/samples/configs/good_config.cfg'
args = ['--time', now1, '--file', entity1, '--config', config, '--extra-heartbeats']
with mock.patch('wakatime.main.sys.stdin') as mock_stdin:
now2 = int(time.time())
heartbeats = json.dumps([{
'timestamp': now2,
'entity': entity2,
'entity_type': 'file',
'project': project2,
'is_write': True,
}])
mock_stdin.readline.return_value = heartbeats
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'language': 'Text only',
'lines': 0,
'entity': entity1,
'project': project1,
'branch': ANY,
'time': float(now1),
'is_write': False,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}
extra_heartbeats = [{
'language': 'Text only',
'lines': 2,
'entity': entity2,
'project': project2,
'branch': ANY,
'time': float(now2),
'is_write': True,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}]
self.assertHeartbeatSent(heartbeat, extra_heartbeats=extra_heartbeats)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_extra_heartbeats_when_project_not_detected(self, logs):
logging.disable(logging.NOTSET)
response = CustomResponse()
response.response_text = '{"responses": [[null, 201], [null,201]]}'
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
now1 = u(int(time.time()))
project1 = os.path.basename(os.path.abspath('.'))
entity1 = os.path.abspath('tests/samples/codefiles/emptyfile.txt')
entity2 = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
config = 'tests/samples/configs/good_config.cfg'
args = ['--time', now1, '--file', entity1, '--config', config, '--extra-heartbeats']
with mock.patch('wakatime.main.sys.stdin') as mock_stdin:
now2 = int(time.time())
heartbeats = json.dumps([{
'timestamp': now2,
'entity': entity2,
'entity_type': 'file',
'is_write': True,
}])
mock_stdin.readline.return_value = heartbeats
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'language': 'Text only',
'lines': 0,
'entity': entity1,
'project': project1,
'branch': ANY,
'time': float(now1),
'is_write': False,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}
extra_heartbeats = [{
'language': 'Text only',
'lines': 2,
'entity': entity2,
'project': None,
'time': float(now2),
'is_write': True,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}]
self.assertHeartbeatSent(heartbeat, extra_heartbeats=extra_heartbeats)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_extra_heartbeats_when_project_not_detected_alternate_project_used(self, logs):
logging.disable(logging.NOTSET)
response = CustomResponse()
response.response_text = '{"responses": [[null, 201], [null,201]]}'
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
now1 = u(int(time.time()))
project1 = os.path.basename(os.path.abspath('.'))
project2 = 'xyz'
entity1 = os.path.abspath('tests/samples/codefiles/emptyfile.txt')
entity2 = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
config = 'tests/samples/configs/good_config.cfg'
args = ['--time', now1, '--file', entity1, '--config', config, '--extra-heartbeats']
with mock.patch('wakatime.main.sys.stdin') as mock_stdin:
now2 = int(time.time())
heartbeats = json.dumps([{
'timestamp': now2,
'entity': entity2,
'alternate_project': project2,
'entity_type': 'file',
'is_write': True,
}])
mock_stdin.readline.return_value = heartbeats
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'language': 'Text only',
'lines': 0,
'entity': entity1,
'project': project1,
'branch': ANY,
'time': float(now1),
'is_write': False,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}
extra_heartbeats = [{
'language': 'Text only',
'lines': 2,
'entity': entity2,
'project': project2,
'time': float(now2),
'is_write': True,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}]
self.assertHeartbeatSent(heartbeat, extra_heartbeats=extra_heartbeats)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_extra_heartbeats_with_malformed_json(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.abspath('tests/samples/codefiles/emptyfile.txt')
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--extra-heartbeats']
with mock.patch('wakatime.main.sys.stdin') as mock_stdin:
heartbeats = '[{foobar}]'
mock_stdin.readline.return_value = heartbeats
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
self.assertIn('WakaTime WARNING Malformed extra heartbeats json', actual)
self.assertHeartbeatSent()
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_extra_heartbeats_with_null_heartbeat(self, logs):
logging.disable(logging.NOTSET)
response = CustomResponse()
response.response_text = '{"responses": [[null, 201], [null,201]]}'
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now1 = u(int(time.time()))
project1 = os.path.basename(os.path.abspath('.'))
project_not_used = 'xyz'
entity1 = os.path.abspath('tests/samples/codefiles/emptyfile.txt')
entity2 = os.path.abspath('tests/samples/codefiles/twolinefile.txt')
config = 'tests/samples/configs/good_config.cfg'
args = ['--time', now1, '--file', entity1, '--config', config, '--extra-heartbeats']
with mock.patch('wakatime.main.sys.stdin') as mock_stdin:
now2 = int(time.time())
heartbeats = json.dumps([
None,
{
'timestamp': now2,
'entity': entity2,
'entity_type': 'file',
'alternate_project': project_not_used,
'is_write': True,
},
])
mock_stdin.readline.return_value = heartbeats
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
heartbeat = {
'language': 'Text only',
'lines': 0,
'entity': entity1,
'project': project1,
'branch': ANY,
'time': float(now1),
'is_write': False,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}
extra_heartbeats = [{
'language': 'Text only',
'lines': 2,
'entity': entity2,
'project': ANY,
'branch': ANY,
'time': float(now2),
'is_write': True,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}]
self.assertHeartbeatSent(heartbeat, extra_heartbeats=extra_heartbeats)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_extra_heartbeats_with_skipped_heartbeat(self, logs):
logging.disable(logging.NOTSET)
response = CustomResponse()
response.response_text = '{"responses": [[null, 201], [null,201]]}'
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
now1 = u(int(time.time()))
project_not_used = 'xyz'
entity1 = os.path.abspath('tests/samples/codefiles/emptyfile.txt')
entity2 = os.path.abspath('tests/samples/codefiles/twolinefile.txt')
config = 'tests/samples/configs/good_config.cfg'
args = ['--time', now1, '--file', entity1, '--config', config, '--extra-heartbeats', '--exclude', 'twoline']
with mock.patch('wakatime.main.sys.stdin') as mock_stdin:
now2 = int(time.time())
heartbeats = json.dumps([
{
'timestamp': now2,
'entity': entity2,
'entity_type': 'file',
'alternate_project': project_not_used,
'is_write': True,
},
])
mock_stdin.readline.return_value = heartbeats
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'WakaTime WARNING Results from api not matching heartbeats sent.'
self.assertIn(expected, actual)
heartbeat = {
'language': 'Text only',
'lines': 0,
'entity': entity1,
'project': ANY,
'branch': ANY,
'time': float(now1),
'is_write': False,
'type': 'file',
'dependencies': [],
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_exclude_unknown_project_arg(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/emptyfile.txt'
shutil.copy(entity, os.path.join(tempdir, 'emptyfile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'emptyfile.txt'))
config = 'tests/samples/configs/good_config.cfg'
args = ['--file', entity, '--config', config, '--exclude-unknown-project', '--verbose', '--log-file', '~/.wakatime.log']
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
actual = self.getLogOutput(logs)
expected = 'WakaTime DEBUG Skipping because project unknown.'
self.assertEquals(actual, expected)
self.assertHeartbeatNotSent()
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheUntouched()
@log_capture()
def test_uses_wakatime_home_env_variable(self, logs):
logging.disable(logging.NOTSET)
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
key = str(uuid.uuid4())
config = 'tests/samples/configs/good_config.cfg'
logfile = os.path.realpath(os.path.join(tempdir, '.wakatime.log'))
args = ['--file', entity, '--key', key, '--config', config]
with mock.patch.object(sys, 'argv', ['wakatime'] + args):
args, configs = parse_arguments()
self.assertEquals(args.log_file, None)
with mock.patch('os.environ.get') as mock_env:
mock_env.return_value = os.path.realpath(tempdir)
args, configs = parse_arguments()
self.assertEquals(args.log_file, logfile)
self.assertNothingPrinted()
self.assertNothingLogged(logs)
@log_capture()
def test_legacy_disableoffline_arg_supported(self, logs):
logging.disable(logging.NOTSET)
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].side_effect = RequestException('requests exception')
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/twolinefile.txt'
shutil.copy(entity, os.path.join(tempdir, 'twolinefile.txt'))
entity = os.path.realpath(os.path.join(tempdir, 'twolinefile.txt'))
now = u(int(time.time()))
key = str(uuid.uuid4())
args = ['--file', entity, '--key', key, '--disableoffline',
'--config', 'tests/samples/configs/good_config.cfg', '--time', now]
retval = execute(args)
self.assertEquals(retval, API_ERROR)
self.assertNothingPrinted()
log_output = u("\n").join([u(' ').join(x) for x in logs.actual()])
expected = "WakaTime ERROR {'RequestException': u'requests exception'}"
if is_py3:
expected = "WakaTime ERROR {'RequestException': 'requests exception'}"
self.assertEquals(expected, log_output)
self.assertHeartbeatSent()
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsNotSynced()
self.assertSessionCacheDeleted()
def test_legacy_hidefilenames_arg_supported(self):
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = CustomResponse()
with TemporaryDirectory() as tempdir:
entity = 'tests/samples/codefiles/python.py'
shutil.copy(entity, os.path.join(tempdir, 'python.py'))
entity = os.path.realpath(os.path.join(tempdir, 'python.py'))
now = u(int(time.time()))
config = 'tests/samples/configs/good_config.cfg'
key = str(uuid.uuid4())
project = 'abcxyz'
args = ['--file', entity, '--key', key, '--config', config, '--time', now, '--hidefilenames', '--logfile', '~/.wakatime.log', '--alternate-project', project]
retval = execute(args)
self.assertEquals(retval, SUCCESS)
self.assertNothingPrinted()
heartbeat = {
'language': 'Python',
'lines': None,
'entity': 'HIDDEN.py',
'project': project,
'time': float(now),
'is_write': False,
'type': 'file',
'dependencies': None,
'user_agent': ANY,
}
self.assertHeartbeatSent(heartbeat)
self.assertHeartbeatNotSavedOffline()
self.assertOfflineHeartbeatsSynced()
self.assertSessionCacheSaved()
@log_capture()
def test_deprecated_logfile_arg_supported(self, logs):
logging.disable(logging.NOTSET)
response = Response()
response.status_code = 0
self.patched['wakatime.packages.requests.adapters.HTTPAdapter.send'].return_value = response
with NamedTemporaryFile() as fh:
now = u(int(time.time()))
entity = 'tests/samples/codefiles/python.py'
config = 'tests/samples/configs/good_config.cfg'
logfile = os.path.realpath(fh.name)
args = ['--file', entity, '--config', config, '--time', now, '--logfile', logfile]
execute(args)
retval = execute(args)
self.assertEquals(retval, 102)
self.assertNothingPrinted()
self.assertEquals(logging.WARNING, logging.getLogger('WakaTime').level)
self.assertEquals(logfile, logging.getLogger('WakaTime').handlers[0].baseFilename)
logs.check()
|
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
census=np.concatenate((new_record,data))
age=np.array(census[:,:1])
max_age=np.max(age)
min_age=np.min(age)
age_mean=np.mean(age)
age_std=np.std(age)
print(max_age)
print(min_age)
print(age_mean)
print(age_std)
race_0=census[census[:,2]==0]
race_1=census[census[:,2]==1]
race_2=census[census[:,2]==2]
race_3=census[census[:,2]==3]
race_4=census[census[:,2]==4]
len_0=len(race_0)
len_1=len(race_1)
len_2=len(race_2)
len_3=len(race_3)
len_4=len(race_4)
min_race=0
min_race=min(len_0,len_1,len_2,len_3,len_4)
if min_race==len_0:
minority_race=0
if min_race==len_1:
minority_race=1
if min_race==len_2:
minority_race=2
if min_race==len_3:
minority_race=3
if min_race==len_4:
minority_race=4
print(minority_race)
senior_citizens=census[census[:,0]>60]
working_hours_sum=sum(senior_citizens[:,6])
print(working_hours_sum)
senior_citizens_len=len(senior_citizens)
avg_working_hours=working_hours_sum/senior_citizens_len
print(avg_working_hours)
high=census[census[:,1]>10]
low=census[census[:,1]<=10]
avg_pay_high=np.mean(high[:,7])
avg_pay_low=np.mean(low[:,7])
print(round(avg_pay_high,2))
print(round(avg_pay_low,2))
np.array_equal(avg_pay_high,avg_pay_low)
|
#!/usr/bin/env python
from ..exrpc.rpclib import *
from ..exrpc.server import *
from ..mllib.model_util import ModelID
class df_to_sparse_info:
'''A python container for holding information related to dataframe to sparse conversion'''
def __init__(cls,info_id):
cls.__uid = info_id
def load(cls,dirname):
cls.release()
if (type(dirname).__name__ != 'str'):
raise TypeError("Expected String, Found: " + type(dirname).__name__)
info_id = ModelID.get() #getting unique id for conversion info to be registered
(host, port) = FrovedisServer.getServerInstance()
rpclib.load_dftable_to_sparse_info(host,port,info_id,dirname.encode('ascii'))
cls.__uid = info_id
excpt = rpclib.check_server_exception()
if excpt["status"]: raise RuntimeError(excpt["info"])
return cls
def save(cls,dirname):
if cls.__uid is None:
raise ValueError("Operation on invalid frovedis dftable_to_sparse_info!")
if (type(dirname).__name__ != 'str'):
raise TypeError("Expected String, Found: " + type(dirname).__name__)
(host, port) = FrovedisServer.getServerInstance()
rpclib.save_dftable_to_sparse_info(host,port,cls.get(),dirname.encode('ascii'))
excpt = rpclib.check_server_exception()
if excpt["status"]: raise RuntimeError(excpt["info"])
def release(cls):
if cls.__uid is None:
raise ValueError("Operation on invalid frovedis dftable_to_sparse_info!")
(host, port) = FrovedisServer.getServerInstance()
rpclib.release_dftable_to_sparse_info(host,port,cls.get())
cls.__uid = None
def get(cls): return cls.__uid
|
#!/usr/bin/env python
from math import radians
import rospy
from sensor_msgs.msg import LaserScan
from geometry_msgs.msg import Twist
class Obstacle():
def __init__(self):
rospy.init_node('obstacle')
self.LIDAR_ERR = 0.05
self._cmd_pub = rospy.Publisher('cmd_vel', Twist, queue_size=1)
self.twist = Twist()
rospy.on_shutdown(self.shutdown)
self.obstacle()
def get_scan(self):
msg = rospy.wait_for_message("scan", LaserScan)
self.scan_filter = []
self.scan_filter_left = []
self.scan_filter_right = []
self.scan_filter_back = []
for i in range(360):
if i <= 15 or i > 335:
if msg.ranges[i] >= self.LIDAR_ERR:
self.scan_filter.append(msg.ranges[i])
if i >= 225 and i < 315:
if msg.ranges[i] >= self.LIDAR_ERR:
self.scan_filter_right.append(msg.ranges[i])
if i >= 35 and i < 125:
if msg.ranges[i] >= self.LIDAR_ERR:
self.scan_filter_left.append(msg.ranges[i])
if i >= 145 and i < 215:
if msg.ranges[i] >= self.LIDAR_ERR:
self.scan_filter_back.append(msg.ranges[i])
def move (self, forward, theta):
self.twist.linear.x = forward
self.twist.angular.z = radians(theta)
self._cmd_pub.publish(self.twist)
def shutdown(self):
# stop turtlebot
rospy.loginfo("Stop - Robot is shutdown")
self.move(0.0, 0.0)
rospy.sleep(1)
def obstacle(self):
while not rospy.is_shutdown():
self.get_scan()
if min(self.scan_filter) < 0.3:
self.move(0.0,0.0)
rospy.loginfo('Stop!')
rospy.sleep(0.5)
index = 0
counter = 0
if min(self.scan_filter_left) > min(self.scan_filter_right):
self.move(0.0,45.0)
index = 1
else:
self.move(0.0,-45.0)
rospy.sleep(2)
self.get_scan()
rospy.loginfo('distance of the obstacle LEFT: %f', min(self.scan_filter_left))
rospy.loginfo('distance of the obstacle RIGHT: %f', min(self.scan_filter_right))
while min(self.scan_filter_left) <0.35 or min(self.scan_filter_right) < 0.35:
self.move(0.1,0.0)
self.get_scan()
if min(self.scan_filter) < 0.3 and index ==1:
self.move(0.0,45)
rospy.sleep(2)
rospy.loginfo('Obstacle in front Break1')
break
if min(self.scan_filter) < 0.3 and index ==0:
self.move(0.0,-45)
rospy.sleep(2)
rospy.loginfo('Obstacle in front Break1')
break
rospy.loginfo('distance of the obstacle LEFT: %f', min(self.scan_filter_left))
rospy.loginfo('distance of the obstacle RIGHT: %f', min(self.scan_filter_right))
rospy.sleep(0.1)
counter = counter+1
rospy.loginfo('%d', counter)
if index == 1:
self.move(0.0,-45)
else:
self.move(0.0,45)
rospy.sleep(2)
self.move(0.0,0.0)
rospy.loginfo('Up to this OK')
rospy.loginfo(' ****distance of the obstacle LEFT: %f', min(self.scan_filter_left))
rospy.loginfo(' ****distance of the obstacle RIGHT: %f', min(self.scan_filter_right))
while min(self.scan_filter_left) <0.46 or min(self.scan_filter_right) < 0.46:
self.move(0.1,0.0)
rospy.sleep(0.15)
self.get_scan()
rospy.loginfo('distance of the obstacle LEFT: %f', min(self.scan_filter_left))
rospy.loginfo('distance of the obstacle RIGHT: %f', min(self.scan_filter_right))
if min(self.scan_filter) < 0.3 and index ==1:
rospy.loginfo('Obstacle in front Break2')
self.move(0.0,45)
rospy.sleep(2)
break
if min(self.scan_filter) < 0.3 and index ==0:
rospy.loginfo('Obstacle in front Break2')
self.move(0.0,-45)
rospy.sleep(2)
break
if index == 1:
self.move(0.0,-45)
else:
self.move(0.0,45)
rospy.sleep(2)
for i in range(counter):
self.move(0.1,0)
rospy.sleep(0.2)
if min(self.scan_filter) < 0.3 and index ==1:
self.move(0.0,45)
rospy.sleep(2)
rospy.loginfo('Obstacle in front Break3')
break
if min(self.scan_filter) < 0.3 and index ==0:
self.move(0.0,-45)
rospy.sleep(2)
rospy.loginfo('Obstacle in front Break2')
break
self.move(0.0,0.0)
rospy.sleep(1)
if index == 1:
self.move(0.0,45)
else :
self.move(0.0,-45)
rospy.sleep(2)
self.move(0.1,0)
#if min(self.scan_filter_left) <= 0.5:
# self.twist.linear.x = 0.05
# self.twist.angular.z = 0.0
# self._cmd_pub.publish(self.twist)
#else:
# self.twist.linear.x = 0.0
# self.twist.angular.z = 90
# rospy.loginfo('distance of the obstacle : %f', min(self.scan_filter))
# rospy.sleep(0.5)
# self.twist.linear.x = 0.05
# self._cmd_pub.publish(self.twist)
else:
self.move(0.05,0.0)
rospy.loginfo('distance of the obstacle : %f', min(self.scan_filter))
def main():
try:
obstacle = Obstacle()
except rospy.ROSInterruptException:
pass
if __name__ == '__main__':
main()
|
import os
import sys
import logging
import logging.handlers
from logging_gelf.handlers import GELFUDPSocketHandler
from collections import defaultdict
LOG_LEVELS = {
"DEBUG": logging.DEBUG,
"INFO": logging.INFO,
"WARNING": logging.WARNING,
"ERROR": logging.ERROR,
"CRITICAL": logging.CRITICAL
}
class MultiLogger(logging.Handler):
'''
Python logger to handle logging to multiple destinations such as
console, file and gelf
'''
def __init__(self, loggers, level='info'):
logging.Handler.__init__(self)
if 'level' in loggers:
self.level = LOG_LEVELS[loggers['level'].upper()]
else:
self.level = LOG_LEVELS[level.upper()]
if 'console' in loggers:
self.console = loggers['console']
else:
self.console = defaultdict(None)
if 'file' in loggers:
self.file = loggers['file']
else:
self.file = defaultdict(None)
if 'gelf' in loggers:
self.gelf = loggers['gelf']
if 'host' not in self.gelf:
self.gelf['host'] = 'localhost'
if 'port' not in self.gelf:
self.gelf['port'] = 12021
else:
self.gelf = defaultdict(None)
def getLogger(self):
self._logger = logging.getLogger(os.path.basename(sys.argv[0]))
self._logger.setLevel(self.level)
format = logging.Formatter(
'%(asctime)s %(name)s [%(levelname)s] %(message)s')
if self.console:
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setFormatter(format)
if 'level' in self.console:
console_level = LOG_LEVELS[self.console['level'].upper()]
console_handler.setLevel(console_level)
self._logger.addHandler(console_handler)
if self.file:
file_handler = logging.FileHandler(self.file['path'])
file_handler.setFormatter(format)
if 'level' in self.file:
file_level = LOG_LEVELS[self.file['level'].upper()]
file_handler.setLevel(file_level)
self._logger.addHandler(file_handler)
if self.gelf:
gelf_handler = GELFUDPSocketHandler(host=self.gelf['host'],
port=self.gelf['port'])
if 'level' in self.gelf:
gelf_level = LOG_LEVELS[self.gelf['level'].upper()]
gelf_handler.setLevel(gelf_level)
self._logger.addHandler(gelf_handler)
return self._logger
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.