max_stars_repo_path stringlengths 3 269 | max_stars_repo_name stringlengths 4 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.05M | score float64 0.23 5.13 | int_score int64 0 5 |
|---|---|---|---|---|---|---|
molo/helpers/request_schemas.py | djvaroli/samsung_oct | 2 | 12768851 | <filename>molo/helpers/request_schemas.py
from typing import *
from pydantic import BaseModel
class GeneratePDFReportSchema(BaseModel):
predictionData: List[dict] | 1.484375 | 1 |
smoke/features/steps/delete_steps.py | nhojpatrick/openshift_jenkins | 267 | 12768852 | <reponame>nhojpatrick/openshift_jenkins<gh_stars>100-1000
from smoke.features.steps.openshift import Openshift
from kubernetes import client, config
oc = Openshift()
v1 = client.CoreV1Api()
@then(u'we delete deploymentconfig.apps.openshift.io "jenkins"')
def del_dc(context):
res = oc.delete("deploymentconfig","jenkins",context.current_project)
if res == None:
raise AssertionError
@then(u'we delete route.route.openshift.io "jenkins"')
def del_route(context):
res = oc.delete("route","jenkins",context.current_project)
if res == None:
raise AssertionError
@then(u'delete configmap "jenkins-trusted-ca-bundle"')
def del_cm(context):
res = oc.delete("configmap","jenkins-trusted-ca-bundle",context.current_project)
if res == None:
raise AssertionError
@then(u'delete serviceaccount "jenkins"')
def del_sa(context):
res = oc.delete("serviceaccount","jenkins",context.current_project)
if res == None:
raise AssertionError
@then(u'delete rolebinding.authorization.openshift.io "jenkins_edit"')
def del_rb(context):
res = oc.delete("rolebinding","jenkins_edit",context.current_project)
if res == None:
raise AssertionError
@then(u'delete service "jenkins"')
def del_svc(context):
res = oc.delete("service","jenkins",context.current_project)
if res == None:
raise AssertionError
@then(u'delete service "jenkins-jnlp"')
def del_svc_jnlp(context):
res = oc.delete("service","jenkins-jnlp",context.current_project)
if res == None:
raise AssertionError
@then(u'delete all buildconfigs')
def del_bc(context):
res = oc.delete("bc","--all",context.current_project)
if res == None:
raise AssertionError
@then(u'delete all builds')
def del_builds(context):
res = oc.delete("builds","--all",context.current_project)
if res == None:
raise AssertionError
@then(u'delete all deploymentconfig')
def del_alldc(context):
res = oc.delete("deploymentconfig","--all",context.current_project)
if res == None:
raise AssertionError
@then(u'delete all remaining test resources')
@given(u'cleared from all test resources')
def del_all_remaining_test_resources(context):
delete_command = "all,rolebindings.authorization.openshift.io,bc,cm,is,pvc,sa,secret"
oc.delete(delete_command,"-l app=jenkins-ephemeral",context.current_project)
oc.delete(delete_command,"-l app=jenkins-persistent",context.current_project)
oc.delete(delete_command,"-l app=openshift-jee-sample",context.current_project)
oc.delete(delete_command,"-l app=jenkins-pipeline-example",context.current_project)
| 2.046875 | 2 |
Lab 2/key.py | lcchiang/Artificial-Intelligence-Course | 0 | 12768853 | <gh_stars>0
# Login information for the 6.034 Automated Tester
USERNAME="lcchiang_MIT_EDU"
PASSWORD="<PASSWORD>"
XMLRPC_URL="https://ai6034.mit.edu/labs/xmlrpc/"
| 0.953125 | 1 |
contrib/spanlib/src/template.setup.py | xylar/cdat | 62 | 12768854 | <filename>contrib/spanlib/src/template.setup.py
######################################################################
## SpanLib, Raynaud 2006-2009
######################################################################
import os
import sys
from getopt import gnu_getopt
# Gather up all the files we need.
files = ['spanlib.pyf','spanlib_pywrap.f90','spanlib.f90']
libDirs=_LIBDIRS_ # like ['/usr/local/lib']
incDirs=_INCDIRS_ # like ['/usr/local/include']
libs=_LIBS_ # like ['lapack95', 'lapack','blas']
extra_link_args=[]
## Get package version info
version='_VERSION_' # Like 0.1
description='Python extension to spanlib fortran library'
author = '<NAME> and <NAME>'
author_email = '<EMAIL>'
url="http://spanlib.sf.net"
description
## scypy_distutils Script
from numpy.distutils.core import setup, Extension
# Some useful directories.
## from distutils.sysconfig import get_python_inc, get_python_lib
## python_incdir = os.path.join( get_python_inc(plat_specific=1) )
## python_libdir = os.path.join( get_python_lib(plat_specific=1) )
if sys.platform=='darwin':
extra_link_args += ['-bundle','-bundle_loader '+sys.prefix+'/bin/python']
## setup the python module
setup(name="spanlib",
version=version,
description=description,
author=author,
author_email=author_email,
maintainer=author,
maintainer_email=author_email,
## Build fortran wrappers, uses f2py
## directories to search for libraries defined in setup.cfg
ext_modules = [
Extension('spanlib.spanlib_fort',
files,
libraries=libs,
library_dirs=libDirs,
include_dirs=incDirs,
extra_link_args=extra_link_args,
),],
license="GNU LGPL",
## Install these to their own directory
package_dir={'spanlib':'../lib'},
packages = ["spanlib"],
)
| 1.75 | 2 |
rocket/a_car_rental/b_website/migrations/0002_auto_20210908_1356.py | t444ov/django-carrental | 0 | 12768855 | <filename>rocket/a_car_rental/b_website/migrations/0002_auto_20210908_1356.py<gh_stars>0
# Generated by Django 3.2.7 on 2021-09-08 10:56
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('b_website', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='aboutus',
options={'verbose_name': 'about us', 'verbose_name_plural': 'About us'},
),
migrations.AlterModelOptions(
name='article',
options={'verbose_name': 'article', 'verbose_name_plural': 'Articles'},
),
migrations.AlterModelTable(
name='aboutus',
table='about_us',
),
migrations.AlterModelTable(
name='article',
table='articles',
),
]
| 1.664063 | 2 |
api/v1/ev_gpq.py | muslax/aces-api | 0 | 12768856 | <filename>api/v1/ev_gpq.py
import logging
from random import shuffle
from typing import Any, List
from fastapi import APIRouter, Depends
from api.v1.deps import get_current_active_user, get_current_project_admin
from crud.persona import find_one as find_persona
from crud.utils import raise_not_found, raise_bad_request
from models.ev.gpq import EvidenceBase, GPQEvidence, GPQRow
router = APIRouter()
@router.post("/{username}")
async def create_evidence_doc(
project: str,
username: str,
current_user: User = Depends(get_current_active_user)
) -> Any:
'''TEST'''
logging.info(">>> " + __name__ + ":create_evidence_doc")
project = project.strip().lower()
username = username.strip().lower()
persona = find_persona(project, username)
if not persona:
raise_not_found()
model = EvidenceBase(
"license" = current_user.license,
"projectId" = project,
"username" = username,
"fullname" = persona.fullname
)
# Check item numbers from project
# Assume it is 30
numItems = 30
rows: List[GPQRow] = []
seqs = [i for i in range(1, numItems + 1)]
shuffle(seqs)
for i in range(numItems):
rows.append(GPQRow(
"seq" = i + 1,
"wbSeq" = seqs[i]
))
| 2.4375 | 2 |
setup.py | KiranNanduri/Stocksdata | 1 | 12768857 | <reponame>KiranNanduri/Stocksdata<filename>setup.py
# -*- coding: utf-8 -*-
"""
Created on Sat Oct 25 22:59:19 2021
@author: kiran
"""
from setuptools import setup, find_packages
classifiers = [
"Development Status :: 5 - Production/Stable",
"Intended Audience :: Science/Research",
"Operating System :: Microsoft :: Windows :: Windows 10",
"Operating System :: POSIX :: Linux",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3"
]
setup(
name='stocksdata',
version='1.0.2',
description='Package of download bhavcopy data from nse website',
long_description=open('README.md').read()+'\n\n'+open('CHANGELOG.txt').read(),
url='https://github.com/KiranNanduri/StockCode',
author='<NAME>',
author_email='<EMAIL>',
license='MIT',
classifiers=classifiers,
keywords='nse',
packages=find_packages(),
install_requires=['requests==2.25.1']
)
| 1.179688 | 1 |
rapid_plotly/barplot.py | def-mycroft/rapid-plotly | 1 | 12768858 | <reponame>def-mycroft/rapid-plotly
"""Convenience function for creating a Plotly barplot
Use `create_graph` to create an attractive, highly interactive Plotly
barplot, either in a Jupyter notebook or as an html file.
"""
import plotly.graph_objs as go
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
from copy import copy
import numpy as np
import pandas as pd
from . import helpers
output_graph = helpers.output_graph
def create_errors(error, error_barwidth):
"""Creates dict of errors for barplot"""
if isinstance(error, str):
error_y = {}
else:
error_y = {
'type': 'data',
'array': error,
'thickness': error_barwidth,
'width': int((error_barwidth * 2.5) / 2),
'visible': True
}
return error_y
def create_trace(in_data, colors, col, hoverinfo, names, errors,
error_barwidth):
"""Creates a barplot trace for a column in `in_data`"""
if isinstance(errors, str):
error_y = {}
else:
error_y = create_errors(errors[col], error_barwidth)
trace = go.Bar(
x=list(in_data.index),
y=in_data[col],
name=col,
text=names[col],
marker=go.bar.Marker(color=colors[col]),
hoverinfo=hoverinfo,
error_y=error_y
)
return trace
def create_graph(in_data, names='', colors='', errors='', error_barwidth=4,
title='title', xlab='xlab', ylab='ylab', y2lab='y2lab',
hoverinfo=None, annotations=[], filepath='', aux_traces=[],
layout='', alt_y=False, aux_first=False, figonly=False,
imagesize=None):
"""Creates grouped barplot
The `in_data` arg must be a dataframe in the form:
bar1 bar2
x_category1 3.13 15.84
x_category2 6.67 6.08
Where `in_data.index` (x_category1, x_category2 above) are the x
labels on the graph, and each column (bar1, bar2 in the above
example) is a bargroup. Each cell represents the height of the bar.
In the above example, the 3.13 and 15.84 bars would be grouped
together and the 6.67 and 6.08 bars would be grouped together. Bar1
would be on the left of each bar group.
Note that `in_data` can be passed with a single column to create
a normal barplot (as opposed to a grouped barplot).
Error bars can be easily created by passing a DataFrame similar
to `in_data` where each cell represents the "+/-" value for the
error bar, e.g. if the value is "1.5", the error bar will range
"1.5" units (in terms of the y-axis) above and "1.5" units below
the bar.
The `aux_traces` arg can be used to create an overlaying trace, such
as a line graph overlaying the bars. To plot `aux_traces` on a
secondary axis, the `yaxis` parameter of the trace must be set to
'y2' and the `alt_y` arg must be passed to this function as `True`.
Parameters
----------
in_data : DataFrame of traces. Data in columns will be used as
traces and index will be used as x-axis.
names : DataFrame of hovertext values. Should mirror `in_data` in
form.
colors : dict of colors for traces. dict keys should mirror
`in_data` columns. Can use hex colors or keyword colors, see Plotly
specifications on colors for keyword options.
errors : a DataFrame of error values for each bar. Should mirror
`in_data` in form. Each cell in `errors` will be the "+/-" value
for the error bars.
error_barwidth : the width, in pixels, of the error bar.
title : title for top of graph. Use '<br>' tag for subtitle. Tags
'<i>' and '<b>' can be used for italics and bold, respectively.
xlab : label for x-axis.
ylab : label for y-ayis.
y2lab : label for aly y axis.
hoverinfo : either None or 'text'. Passed to the trace in
`create_trace`. By default, Plotly displays the value upon hover,
passing 'text' here will show only the value configured in the
`names` DataFrame.
annotations : a list of dicts for annotations. For example:
```
[{'text':'More cylinders correlates to better<br> fuel mileage',
'x':1.5, 'y':24.5, 'showarrow':False}]
```
The 'x' and 'y' keys are coordinates in terms of the graph axes, and
the 'text' key is the annotation text.
filepath : optional, if included will write image to file. Can be
written as a .html file or a .png file.
aux_traces : list of traces to be added to the graph data. Allows
for customization of additional traces beyond what default
functionality provides.
aux_first : bool, if True then aux traces will be added first.
layout : allows for a customized layout. Default layout is in the
helpers module, can be accessed:
```
from rapid_plotly import helpers
layout = helpers.layout
```
Here is the default layout:
```
{'hovermode': 'closest', 'plot_bgcolor': 'rgb(229, 229, 229)',
'title': 'title', 'xaxis': {'gridcolor': 'rgb(255,255,255)',
'tickangle': 30, 'title': 'xlab',
'zerolinecolor': 'rgb(255,255,255)'},
'yaxis': {'gridcolor': 'rgb(255,255,255)', 'title': 'ylab',
'zerolinecolor': 'rgb(255,255,255)'}}
```
alt_y : bool, used to place aux_traces on alternate axis.
"""
# use default colors if none are passed
# otherwise use passed dataframe
if isinstance(colors, str):
colors = helpers.default_colors(in_data.columns)
# setup names and errors if nothing is passed
if isinstance(names, str):
names = dict(zip(in_data.columns, in_data.columns))
# create list of traces
data = list()
if alt_y:
yaxis='y1'
# create and append traces
for col in in_data.columns:
data.append(create_trace(in_data, colors, col, hoverinfo, names,
errors, error_barwidth))
# if more than one trace, add multiple traces...
# ... and change order of traces depending on aux_first
if len(aux_traces) > 0:
if aux_first:
data = aux_traces + data
else:
data = data + aux_traces
# create layout
# if no layout is passed, use default layout from helpers
if layout == '':
layout = helpers.layout
layout['title'] = title
layout['xaxis']['title'] = xlab
layout['yaxis']['title'] = ylab
layout['annotations'] = annotations
layout = go.Layout(layout)
if alt_y:
y = copy(layout['yaxis'])
y['title'] = y2lab
y['side'] = 'right'
y['overlaying'] = 'y'
layout['yaxis2'] = y
# create figure
fig = go.Figure(data=data, layout=layout)
# output graph
# setup imagesize, used only for pngs
if not imagesize:
output_graph(filepath=filepath, fig=fig, figonly=figonly)
elif imagesize:
width, height = imagesize
output_graph(filepath=filepath, fig=fig, figonly=figonly,
width=width, height=height)
return fig
| 3.03125 | 3 |
BuildingDepot-v3.2.8/buildingdepot/CentralService/app/auth/forms.py | Entromorgan/GIoTTo | 0 | 12768859 | """
CentalService.auth.views
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Contains all the forms for the CentralService authorization functions.
The two forms that are used are for login and for creating a new user.
@copyright: (c) 2016 SynergyLabs
@license: UCSD License. See License file for details.
"""
from flask_wtf import Form
from wtforms import StringField, PasswordField, BooleanField, SubmitField
from wtforms.validators import DataRequired, Email, Length, EqualTo
from wtforms import ValidationError
from ..models.cs_models import User
class LoginForm(Form):
email = StringField('Email', validators=[DataRequired(), Email()])
password = PasswordField('Password', validators=[DataRequired()])
remember_me = BooleanField('Keep me logged in')
submit = SubmitField('Log In')
class RegistrationForm(Form):
password = PasswordField('Password',
validators=[DataRequired(), EqualTo('password2', message='Passwords must match')])
password2 = PasswordField('<PASSWORD>', validators=[DataRequired()])
submit = SubmitField('Register')
def validate_email(self, field):
if User.objects(email=field.data).first() is not None:
raise ValidationError('Email already registered.')
| 2.6875 | 3 |
movies/migrations/0006_movies_ph_credit.py | Saifur43/Movie-Success-Prediction | 7 | 12768860 | # Generated by Django 2.1.7 on 2019-04-10 01:21
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('movies', '0005_auto_20190410_0659'),
]
operations = [
migrations.AddField(
model_name='movies',
name='ph_credit',
field=models.IntegerField(blank=True, null=True),
),
]
| 1.539063 | 2 |
tests/unit/modules/grains_test.py | jkur/salt | 2 | 12768861 | <filename>tests/unit/modules/grains_test.py
# -*- coding: utf-8 -*-
# Import python libs
from __future__ import absolute_import
import copy
# Import Salt Testing libs
from salttesting import TestCase
from salttesting.helpers import ensure_in_syspath
ensure_in_syspath('../../')
# Import Salt libs
from salt.exceptions import SaltException
from salt.modules import grains as grainsmod
from salt.utils import dictupdate
grainsmod.__grains__ = {
'os_family': 'MockedOS',
'1': '1',
'2': '2',
}
class GrainsModuleTestCase(TestCase):
def test_filter_by(self):
dict1 = {'A': 'B', 'C': {'D': {'E': 'F', 'G': 'H'}}}
dict2 = {
'default': {
'A': 'B',
'C': {
'D': 'E'
},
},
'1': {
'A': 'X',
},
'2': {
'C': {
'D': 'H',
},
},
'MockedOS': {
'A': 'Z',
},
}
mdict1 = {'D': {'E': 'I'}, 'J': 'K'}
mdict2 = {'A': 'Z'}
mdict3 = {'C': {'D': 'J'}}
# test None result with non existent grain and no default
res = grainsmod.filter_by(dict1, grain='xxx')
self.assertIs(res, None)
# test None result with os_family grain and no matching result
res = grainsmod.filter_by(dict1)
self.assertIs(res, None)
# test with non existent grain, and a given default key
res = grainsmod.filter_by(dict1, grain='xxx', default='C')
self.assertEqual(res, {'D': {'E': 'F', 'G': 'H'}})
# add a merge dictionary, F disappears
res = grainsmod.filter_by(dict1, grain='xxx', merge=mdict1, default='C')
self.assertEqual(res, {'D': {'E': 'I', 'G': 'H'}, 'J': 'K'})
# dict1 was altered, reestablish
dict1 = {'A': 'B', 'C': {'D': {'E': 'F', 'G': 'H'}}}
# default is not present in dict1, check we only have merge in result
res = grainsmod.filter_by(dict1, grain='xxx', merge=mdict1, default='Z')
self.assertEqual(res, mdict1)
# default is not present in dict1, and no merge, should get None
res = grainsmod.filter_by(dict1, grain='xxx', default='Z')
self.assertIs(res, None)
#test giving a list as merge argument raise exception
self.assertRaises(
SaltException,
grainsmod.filter_by,
dict1,
'xxx',
['foo'],
'C'
)
#Now, re-test with an existing grain (os_family), but with no match.
res = grainsmod.filter_by(dict1)
self.assertIs(res, None)
res = grainsmod.filter_by(dict1, default='C')
self.assertEqual(res, {'D': {'E': 'F', 'G': 'H'}})
res = grainsmod.filter_by(dict1, merge=mdict1, default='C')
self.assertEqual(res, {'D': {'E': 'I', 'G': 'H'}, 'J': 'K'})
# dict1 was altered, reestablish
dict1 = {'A': 'B', 'C': {'D': {'E': 'F', 'G': 'H'}}}
res = grainsmod.filter_by(dict1, merge=mdict1, default='Z')
self.assertEqual(res, mdict1)
res = grainsmod.filter_by(dict1, default='Z')
self.assertIs(res, None)
# this one is in fact a traceback in updatedict, merging a string with a dictionary
self.assertRaises(
TypeError,
grainsmod.filter_by,
dict1,
merge=mdict1,
default='A'
)
#Now, re-test with a matching grain.
dict1 = {'A': 'B', 'MockedOS': {'D': {'E': 'F', 'G': 'H'}}}
res = grainsmod.filter_by(dict1)
self.assertEqual(res, {'D': {'E': 'F', 'G': 'H'}})
res = grainsmod.filter_by(dict1, default='A')
self.assertEqual(res, {'D': {'E': 'F', 'G': 'H'}})
res = grainsmod.filter_by(dict1, merge=mdict1, default='A')
self.assertEqual(res, {'D': {'E': 'I', 'G': 'H'}, 'J': 'K'})
# dict1 was altered, reestablish
dict1 = {'A': 'B', 'MockedOS': {'D': {'E': 'F', 'G': 'H'}}}
res = grainsmod.filter_by(dict1, merge=mdict1, default='Z')
self.assertEqual(res, {'D': {'E': 'I', 'G': 'H'}, 'J': 'K'})
# dict1 was altered, reestablish
dict1 = {'A': 'B', 'MockedOS': {'D': {'E': 'F', 'G': 'H'}}}
res = grainsmod.filter_by(dict1, default='Z')
self.assertEqual(res, {'D': {'E': 'F', 'G': 'H'}})
# Base tests
# NOTE: these may fail to detect errors if dictupdate.update() is broken
# but then the unit test for dictupdate.update() should fail and expose
# that. The purpose of these tests is it validate the logic of how
# in filter_by() processes its arguments.
# Test with just the base
res = grainsmod.filter_by(dict2, grain='xxx', default='xxx', base='default')
self.assertEqual(res, dict2['default'])
# Test the base with the OS grain look-up
res = grainsmod.filter_by(dict2, default='xxx', base='default')
self.assertEqual(
res,
dictupdate.update(copy.deepcopy(dict2['default']), dict2['MockedOS'])
)
# Test the base with default
res = grainsmod.filter_by(dict2, grain='xxx', base='default')
self.assertEqual(res, dict2['default'])
res = grainsmod.filter_by(dict2, grain='1', base='default')
self.assertEqual(
res,
dictupdate.update(copy.deepcopy(dict2['default']), dict2['1'])
)
res = grainsmod.filter_by(dict2, base='default', merge=mdict2)
self.assertEqual(
res,
dictupdate.update(
dictupdate.update(
copy.deepcopy(dict2['default']),
dict2['MockedOS']),
mdict2
)
)
res = grainsmod.filter_by(dict2, base='default', merge=mdict3)
self.assertEqual(
res,
dictupdate.update(
dictupdate.update(
copy.deepcopy(dict2['default']),
dict2['MockedOS']),
mdict3
)
)
if __name__ == '__main__':
from integration import run_tests
run_tests(GrainsModuleTestCase, needs_daemon=False)
| 2.3125 | 2 |
nexuscasc/api/endpoint/__init__.py | vjda/nexus3-casc-cli | 7 | 12768862 | from .script import Script
from .status_check import CheckStatus
| 1.046875 | 1 |
supervisor_jcmd/tests/test_controllerplugin.py | astronouth7303/supervisor-jcmd | 84 | 12768863 | import sys
import unittest
try:
from StringIO import StringIO
except:
from io import StringIO
class TestControllerPlugin(unittest.TestCase):
# Factory
def test_make_cache_controllerplugin_factory(self):
from supervisor_cache import controllerplugin
controller = DummyController()
plugin = controllerplugin.make_cache_controllerplugin(controller)
self.assertEqual(controller, plugin.ctl)
# Constructor
def test_ctor_assigns_controller(self):
controller = DummyController()
plugin = self.makeOne(controller)
self.assertEqual(controller, plugin.ctl)
# cache_clear
def test_do_cache_clear(self):
controller = DummyController()
plugin = self.makeOne(controller)
cache_interface = plugin.cache
cache_interface.cache = dict(foo='bar')
plugin.do_cache_clear('')
self.assertEqual({}, cache_interface.cache)
def test_do_cache_clear_accepts_no_args(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.do_cache_clear('arg')
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_clear'))
def test_help_cache_clear(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.help_cache_clear()
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_clear'))
# cache_count
def test_do_cache_count(self):
controller = DummyController()
plugin = self.makeOne(controller)
cache_interface = plugin.cache
cache_interface.cache = dict(foo='bar', baz='qux')
plugin.do_cache_count('')
output = controller.sio.getvalue()
self.assertEqual('2', output)
def test_do_cache_count_accepts_no_args(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.do_cache_count('arg')
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_count'))
def test_help_cache_count(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.help_cache_count()
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_count'))
# cache_delete
def test_do_cache_delete(self):
controller = DummyController()
plugin = self.makeOne(controller)
cache_interface = plugin.cache
cache_interface.cache = dict(foo='bar', baz='qux')
plugin.do_cache_delete('foo')
self.assertTrue('foo' not in cache_interface.cache.keys())
self.assertEqual('qux', cache_interface.cache['baz'])
def test_do_cache_delete_accepts_a_quoted_arg(self):
controller = DummyController()
plugin = self.makeOne(controller)
cache_interface = plugin.cache
cache_interface.cache = {'f o o': 'bar', 'baz': 'qux'}
plugin.do_cache_delete('"f o o"')
self.assertTrue('f o o' not in cache_interface.cache.keys())
def test_do_cache_delete_accepts_only_one_arg(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.do_cache_delete('first second')
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_delete'))
def test_help_cache_delete(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.help_cache_delete()
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_delete <key>'))
# cache_fetch
def test_do_cache_fetch(self):
controller = DummyController()
plugin = self.makeOne(controller)
cache_interface = plugin.cache
cache_interface.cache = dict(foo='bar')
plugin.do_cache_fetch('foo')
out = controller.sio.getvalue()
self.assertEqual("'bar'", out)
def test_do_cache_fetch_accepts_a_quoted_arg(self):
controller = DummyController()
plugin = self.makeOne(controller)
cache_interface = plugin.cache
cache_interface.cache = {'f o o': 'bar'}
plugin.do_cache_fetch('"f o o"')
out = controller.sio.getvalue()
self.assertEqual("'bar'", out)
def test_do_cache_fetch_accepts_only_one_arg(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.do_cache_fetch('first second')
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_fetch'))
def test_help_cache_fetch(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.help_cache_fetch()
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_fetch <key>'))
# cache_keys
def test_do_cache_keys(self):
controller = DummyController()
plugin = self.makeOne(controller)
cache_interface = plugin.cache
cache_interface.cache = dict(foo='bar', baz='qux')
plugin.do_cache_keys('')
output = controller.sio.getvalue()
self.assertTrue('foo' in output)
self.assertTrue('baz' in output)
def test_do_cache_keys_accepts_no_args(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.do_cache_keys('arg')
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_keys'))
def test_help_cache_keys(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.help_cache_keys()
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_keys'))
# cache_store
def test_do_cache_store(self):
controller = DummyController()
plugin = self.makeOne(controller)
cache_interface = plugin.cache
cache_interface.cache = {}
plugin.do_cache_store('foo bar')
self.assertEqual('bar', cache_interface.cache['foo'])
def test_do_cache_store_accepts_a_quoted_key(self):
controller = DummyController()
plugin = self.makeOne(controller)
cache_interface = plugin.cache
cache_interface.cache = {}
plugin.do_cache_store('"foo bar" baz')
self.assertEqual('baz', cache_interface.cache['foo bar'])
def test_do_cache_store_accepts_a_quoted_value(self):
controller = DummyController()
plugin = self.makeOne(controller)
cache_interface = plugin.cache
cache_interface.cache = {}
plugin.do_cache_store('foo "bar baz"')
self.assertEqual('bar baz', cache_interface.cache['foo'])
def test_do_cache_store_accepts_no_less_than_two_args(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.do_cache_store('first')
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_store'))
def test_do_cache_store_accepts_no_more_than_two_args(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.do_cache_store('first second third')
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_store'))
def test_help_cache_store(self):
controller = DummyController()
plugin = self.makeOne(controller)
plugin.help_cache_store()
out = controller.sio.getvalue()
self.assertTrue(out.startswith('cache_store <key> <value>'))
# Test Helpers
def makeOne(self, *arg, **kw):
return self.getTargetClass()(*arg, **kw)
def getTargetClass(self):
from supervisor_cache.controllerplugin import CacheControllerPlugin
return CacheControllerPlugin
class DummyController:
def __init__(self):
self.sio = StringIO()
def output(self, out):
assert(isinstance(out, str))
self.sio.write(out)
def get_server_proxy(self, namespace=None):
if namespace == 'cache':
from supervisor.tests.base import DummySupervisor
supervisor = DummySupervisor()
from supervisor_cache.rpcinterface import CacheNamespaceRPCInterface
cache = CacheNamespaceRPCInterface(supervisor)
return cache
def test_suite():
return unittest.findTestCases(sys.modules[__name__])
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| 2.59375 | 3 |
Estruturas de Dados/arvore_binaria_de_busca.py | da-ferreira/algorithms_and_data_structures | 0 | 12768864 |
# Autor: <NAME>
# Implementacao de uma Árvore Binária de Busca
#
# Exemplo de uma Árvore Binária de Busca que será usado durante o código.
# O elemento à esquerda são menores que o valor do pai, e os elementos à
# direita são maiores ou iguais ao valor do pai.
#
# 61
# /\
# / \
# / \
# 43 89
# / \ /
# 16 51 66
# / \ \ \
# 11 32 55 79
# / \
# 77 82
#
# 61, 43, 16, 11, 32, 51, 55, 89, 66, 79, 77, 82
ROOT = 'root' # valor constante que será usado como valor padrão em alguns casos no código.
# o nó permite ligar os elementos esparsos da árvore
class Node:
def __init__(self, element): # atributos da classe
self.data = element
self.left = None
self.right = None
def __str__(self):
return f'{self.data}'
class BinarySearchTree:
def __init__(self, element=None, node=None):
# Iniciando a raíz da árvore a partir de um nó já criado.
if node is not None:
self.root = node
# Caso contrário, cria-se um nó e atribui para a raíz da árvore.
elif element is not None:
self.root = Node(element)
else:
self.root = None
def insert(self, element):
father = None # pai do elemento que será inserido
point = self.root
while point is not None:
father = point
# Se o elemento for menor, ele segue descendo à esquerda,
# caso contrário, segue descendo à direita.
if element < point.data:
point = point.left
else:
point = point.right
# Se a árvore estiver vazia, o elemento que entra é a raíz.
if father is None:
self.root = Node(element)
# Adicionando o elemento
elif element < father.data:
father.left = Node(element)
else:
father.right = Node(element)
def search(self, element, node=0):
# Começa a busca pela raíz.
if node == 0:
node = self.root
# Caso o item não esteja na árvore retorna None
if node is None:
return node
# Retorna uma sub-árvore
elif node.data == element:
return BinarySearchTree(node=node)
elif element < node.data:
return self.search(element, node.left) # Se o elemento for menor que o valor do nó, ele segue pela esquerda.
return self.search(element, node.right) # Se for maior, segue pela direita.
# Para a remoção de um elemento da árvore, tem 3 casos possíveis:
# 1. Quando o elemento removido é uma folha.
# 2. Quando o elemento removido tem apenas um filho (esquerda ou direita)
# 3. Quando o elemento removido tem os dois filhos (esquerda e direita)
# No 3º caso removemos o elementos, e substituimos pelo menor elemento da sua
# sub-árvore da direita.
def remove(self, element, node=ROOT):
if node == ROOT:
node = self.root
# Desceu até um ramo nulo, ou seja, o elemento não está na árvore.
if node is None:
return node
# se o elemento for menor, ele desce para a esquerda
if element < node.data:
node.left = self.remove(element, node.left)
# se o elemento for menor, ele desce para a direita
elif element > node.data:
node.right = self.remove(element, node.right)
# Elemento encontrado, ou seja, igual.
else:
# O primeiro caso, quando o elemento a ser removido é uma folha
# retorna-se None, ao filho da esquerda ou da direita do pai do elemento,
# que é equivalente a remover o elemento.
# O segundo caso, quando o elemento que apenas um filho (esquerda ou direita),
# retorna para como filho (à esquerda ou direita) do pai do elemento, o nó do
# filho que não é Nulo do elemento removido.
if node.left is None:
return node.right
elif node.right is None:
return node.left
# O terceiro caso, quando o elemento tem dois filhos.
else:
# O substituto será o menor elemento da sub-árvore da direita,
# que vai substituir o elemento removido.
substitute = self.min(node.right)
# Trocando o valor do removido pelo seu substituto.
node.data = substitute
# removendo o valor substituto da sub-árvore da direita.
node.right = self.remove(substitute, node.right)
# retornando o proprio nó para os casos que não foram feitas alterações para à esquerda ou direita
return node
# O menor elemento da árvore está o máximo à esquerda,
# e que não possua um filho à esquerda.
def min(self, node=ROOT):
if node == ROOT:
node = self.root
while node.left is not None:
node = node.left
return node.data
# O maior elemento da árvore está o máximo à direita,
# e que não possua um filho à direita.
def max(self, node=ROOT):
if node == ROOT:
node = self.root
while node.right is not None:
node = node.right
return node.data
# Percurso em pré ordem.
# O percurso mostra a raíz, depois o elemento à esquerda e o elemento à dereita.
# A ordem visitada no exemplo visto acima sera:
# 61, 43, 16, 11, 32, 51, 55, 89, 66, 79, 77, 82
def pre_order_route(self, node=ROOT):
if node == ROOT:
node = self.root
if node is not None:
print(node, end=' ')
self.pre_order_route(node.left)
self.pre_order_route(node.right)
# Percurso em ordem simétrica/em ordem.
# O percurso mostra primeiro o elemento da esquerda, depois a raíz, e
# o elemento da direita, recursivamente. Na BST os elementos ficam em ordem crescente,
# quando é usado esse percurso.
# A ordem visitada no exemplo visto acima sera:
# 11, 16, 32, 43, 51, 55, 61, 66, 77, 79, 82, 89
def inorder_route(self, node=None):
if node is None:
node = self.root
# Chamando recursivamente e mostrando os elementos
if node.left is not None:
self.inorder_route(node.left)
print(node, end=' ') # mostrando o elemento do nó
if node.right is not None:
self.inorder_route(node.right)
# Percurso em pós ordem: exibi os filhos da esquerda e direita antes de mostrar a sí mesmo,
# visitando recursivamente sua sub-árvore da esquerda, e depois sua sub-árvore da direta.
# A ordem visitada no exemplo visto acima sera:
# 11, 32, 16, 55, 51, 43, 77, 82, 79, 66, 89, 61
def post_order_route(self, node=None):
if node is None: # Começa o percurso pela raíz da árvore
node = self.root
if node.left is not None:
self.post_order_route(node.left)
if node.right is not None:
self.post_order_route(node.right)
print(node, end=' ') # mostrando o elemento do nó
# Percurso em Nível
# O elementos visitados são os de cada nivel da árvore, da esquerda para a direita.
# Para obter esse percurso é necessário usar uma Fila, seguindo a regra: primeiro a entrar,
# primeiro a sair.
# A ordem visitada no exemplo visto acima sera:
# 61, 43, 89, 16, 51, 66, 11, 32, 55, 79, 77, 82
def route_at_level(self, node=ROOT):
if node == ROOT:
node = self.root
queue = []
queue.append(node) # insere no final, e remove do inicio
while len(queue) > 0:
node = queue.pop(0)
if node.left is not None:
queue.append(node.left)
if node.right is not None:
queue.append(node.right)
print(node, end=' ')
# Altura da árvore: raíz ate sua folha mais profunda.
# O objetivo é olhar a altura das sub-arvores da esquerda e direita,
# utilizando o percurso em pós ordem, e pegar a maior altura e incrementar 1.
def height(self, node=None):
if node is None:
node = self.root # altura da arvore completa
height_left = 0 # altura da sub-árvore esquerda
height_right = 0 # altura da sub-árvore direita
if node.left is not None:
height_left = self.height(node.left)
if node.right is not None:
height_right = self.height(node.right)
if height_left > height_right:
return height_left + 1
return height_right + 1
if __name__ == '__main__':
bst = BinarySearchTree()
# valores do exemplo
valores = [61, 43, 89, 16, 51, 66, 11, 32, 55, 79, 77, 82]
for i in valores:
bst.insert(i)
print('Percurso em Pré Ordem:')
bst.pre_order_route()
print('\n\nPercurso em Ordem Simétrica/Em ordem:')
bst.inorder_route()
print('\n\nPercurso em Pós Ordem:')
bst.post_order_route()
print('\n\nPercurso em Nível:')
bst.route_at_level()
print('\n')
print(f'Menor elemento da árvore: {bst.min()}') # -> 11
print(f'Maior elemento da árvore: {bst.max()}') # -> 89
print(f'Altura da árvore: {bst.height()}') # -> 5
print(bst.remove(66)) # <- removendo o 66
print('\nPercurso em Ordem Simétrica/Em ordem:')
bst.inorder_route()
print('\n\nPercurso em Nível:')
bst.route_at_level()
| 3.96875 | 4 |
python/bits/190.reverse-bits.py | Nobodylesszb/LeetCode | 0 | 12768865 | <reponame>Nobodylesszb/LeetCode<filename>python/bits/190.reverse-bits.py
class Solution:
def reverseBits(self, n):
res = 0
for _ in range(32):
res = (res<<1) + (n&1) # 右移动
n>>=1 # 左移
return res
s =Solution()
b = s.reverseBits(101010111)
print(b)
| 3.296875 | 3 |
haas_lib_bundles/python/docs/examples/home_intrusion_alarm/esp32/code/buzzer.py | wstong999/AliOS-Things | 0 | 12768866 | <gh_stars>0
from driver import PWM
class BUZZER(object):
def __init__(self, pwmObj,data=None):
self.pwmObj = None
if not isinstance(pwmObj, PWM):
raise ValueError("parameter is not an PWM object")
self.pwmObj = pwmObj
if data is not None:
self.setOptionDuty(data)
def setOptionDuty(self,data):
if self.pwmObj is None:
raise ValueError("invalid PWM object")
self.pwmObj.setOption(data)
def start(self,data):
if self.pwmObj is None:
raise ValueError("invalid PWM object")
self.setOptionDuty(data)
def close(self,data):
if self.pwmObj is None:
raise ValueError("invalid PWM object")
self.setOptionDuty(data)
| 2.90625 | 3 |
test/test_vasicek.py | slpenn13/pythoninterestrates | 0 | 12768867 | ''' Pytest corresponding to calc_norm_v '''
# import numpy as np
import shortrate_model_vasicek as vas
import interest_rate_capfloor_convenience as intconv
mdl = vas.short_rate_vasicek(kappa=0.86, theta=0.08, sigma=0.01, r0=0.06,
norm_method=intconv.calc_v_norm_1d, dbg=True)
def test_norm():
''' init_ test'''
assert round(1e6*mdl.calc_norm_v(t=0.0, t0=0.25, t1=0.5, dbg=True), 3) == 1.028
def test_pt_05():
''' initial pricing test '''
assert round(mdl.price_zero(0.5), 4) == 0.9686
def test_put():
''' init test for put calculation '''
x1 = mdl.price_zero(0.25)
x2 = mdl.price_zero(0.5)
assert round(1e4*mdl.calc_european_put(strike=(x2/x1), t0=0.25, t1=0.5), 3) == 3.918
| 2.71875 | 3 |
wikipedia_parser/wikipedia_api/urls.py | ojones/wikipedia_parser | 9 | 12768868 | ENDPOINT = "https://en.wikipedia.org/w/api.php"
def make_url(id, is_expanded=False, is_html=False):
# TODO: isdigit is not robust enough, title could be number instead of string
return make_id_url(id, is_expanded, is_html) if str(id).isdigit() else make_title_url(id, is_expanded, is_html)
def make_title_url(id, is_expanded=False, is_html=False):
url = ENDPOINT + "?action=query&titles=" + url_safe_spaces(str(id)) + "&prop=revisions|pageprops&rvprop=content&format=json"
if is_expanded:
url += "&rvexpandtemplates"
if is_html:
url = ENDPOINT + "?format=json&rvprop=content&prop=revisions&rvparse=&titles=" + url_safe_spaces(str(id)) + "&rvlimit=1&action=query"
return url
def make_id_url(id, is_expanded=False, is_html=False):
url = ENDPOINT + "?action=query&pageids=" + url_safe_spaces(str(id)) + "&prop=revisions|pageprops&rvprop=content&format=json"
if is_expanded:
url += "&rvexpandtemplates"
if is_html:
url = ENDPOINT + "?format=json&rvprop=content&prop=revisions&rvparse=&pageids=" + url_safe_spaces(str(id)) + "&rvlimit=1&action=query"
return url
def url_safe_spaces(text):
return text.replace(" ", "%20") | 3 | 3 |
test/e2e/htdocs/cgi/echo.py | notroj/mod_h2 | 0 | 12768869 | <reponame>notroj/mod_h2
#!/usr/bin/env python
import sys, cgi, os
status = '200 Ok'
content = ''
for line in sys.stdin:
content += line
# Just echo what we get
print "Status: 200"
print """Content-Type: application/data\n"""
print content,
| 2.1875 | 2 |
sphinx/source/docs/user_guide/source_examples/plotting_arrow.py | quasiben/bokeh | 1 | 12768870 | from bokeh.plotting import figure, output_file, show
from bokeh.models import Arrow, OpenHead, NormalHead, VeeHead
output_file("arrow.html", title="arrow.py example")
p = figure(plot_width=600, plot_height=600, x_range=(-0.1,1.1), y_range=(-0.1,0.8))
p.circle(x=[0, 1, 0.5], y=[0, 0, 0.7], radius=0.1, color=["navy", "yellow", "red"], fill_alpha=0.1)
p.add_layout(Arrow(end=OpenHead(), x_start=0, y_start=0, x_end=1, y_end=0))
p.add_layout(Arrow(end=NormalHead(), x_start=1, y_start=0, x_end=0.5, y_end=0.7))
p.add_layout(Arrow(end=VeeHead(), x_start=0.5, y_start=0.7, x_end=0, y_end=0))
show(p)
| 2.84375 | 3 |
utils/auth_utils.py | darth-dodo/hustlers-den | 3 | 12768871 | from django.core.exceptions import PermissionDenied
from django.utils.crypto import get_random_string
from rest_framework_jwt.settings import api_settings
from utils.constants import AUTO_GENERATED_PASSWORD_LENGTH
# binding.pry equivalent
# import code; code.interact(local=locals())
def get_hustler_data(hustler_object):
"""
Serializes a Hustler object for JSON
:param hustler_object: Hustler object
:return: dict
"""
from hustlers.api.serializers import HustlerSerializer
serialized_hustler_data = HustlerSerializer(hustler_object).data
return serialized_hustler_data
def jwt_response_payload_handler(token=None, user=None, request=None):
"""
Custom JWT payload creator
/auth/login/ will redirects to this endpoint
User auth using tokens or user object wrapper around vanilla auth/login
:param token: JWT token
:param user: User object
:param request: Request object
:return: dict
"""
if hasattr(user, "hustler"):
if user.is_active is False:
raise PermissionDenied("Hustler is inactive")
else:
raise PermissionDenied("Hustler does not exist!")
hustler_data = get_hustler_data(user.hustler)
if token is None:
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
payload = jwt_payload_handler(user)
token = jwt_encode_handler(payload)
return_data = {"auth_token": token, "hustler_data": hustler_data}
return return_data
def generate_hustler_password(length_of_password=AUTO_GENERATED_PASSWORD_LENGTH):
"""
:param length_of_password:
:return:
"""
return get_random_string(length_of_password)
| 2.28125 | 2 |
dataservice/api/common/id_service.py | ConnorBarnhill/kf-api-dataservice | 6 | 12768872 | <reponame>ConnorBarnhill/kf-api-dataservice
import uuid
import random
import base32_crockford as b32
def uuid_generator():
"""
Returns a stringified uuid of 36 characters
"""
return str(uuid.uuid4())
def kf_id_generator(prefix):
"""
Returns a function to generator
(Crockford)[http://www.crockford.com/wrmg/base32.html] base 32
encoded number up to 8 characters left padded with 0 and prefixed with
a two character value representing the entity type and delimited by
an underscore
Ex:
'PT_0004PEDE'
'SA_D167JSHP'
'DM_ZZZZZZZZ'
'ST_00000000'
"""
assert len(prefix) == 2, 'Prefix must be two characters'
prefix = prefix.upper()
def generator():
return '{0}_{1:0>8}'.format(prefix,
b32.encode(random.randint(0, 32**8-1)))
return generator
| 3.078125 | 3 |
app/app/cal.py | bigdave2058/recipe-app-api | 0 | 12768873 | <filename>app/app/cal.py
def addNumber(x,y)
"""Add two numbers""
return x + y
def subtract(x,y)
""" subtract two numbers """
return 0
def multiply(x,y)
""" multiply two numbers """
return x,y
| 2.5 | 2 |
src/vtd_interface/viRDBIcd.py | StanfordASL/vtd_interface | 20 | 12768874 | <reponame>StanfordASL/vtd_interface
# Automatically generated from the C message definitions
import cstruct
RDB_DEFAULT_PORT = 48190
RDB_FEEDBACK_PORT = 48191
RDB_IMAGE_PORT = 48192
RDB_MAGIC_NO = 35712
RDB_VERSION = 0x011D
RDB_SIZE_OBJECT_NAME = 32
RDB_SIZE_SCP_NAME = 64
RDB_SIZE_FILENAME = 1024
RDB_SIZE_TRLIGHT_PHASES = 8
RDB_PKG_ID_START_OF_FRAME = 1
RDB_PKG_ID_END_OF_FRAME = 2
RDB_PKG_ID_COORD_SYSTEM = 3
RDB_PKG_ID_COORD = 4
RDB_PKG_ID_ROAD_POS = 5
RDB_PKG_ID_LANE_INFO = 6
RDB_PKG_ID_ROADMARK = 7
RDB_PKG_ID_OBJECT_CFG = 8
RDB_PKG_ID_OBJECT_STATE = 9
RDB_PKG_ID_VEHICLE_SYSTEMS = 10
RDB_PKG_ID_VEHICLE_SETUP = 11
RDB_PKG_ID_ENGINE = 12
RDB_PKG_ID_DRIVETRAIN = 13
RDB_PKG_ID_WHEEL = 14
RDB_PKG_ID_PED_ANIMATION = 15
RDB_PKG_ID_SENSOR_STATE = 16
RDB_PKG_ID_SENSOR_OBJECT = 17
RDB_PKG_ID_CAMERA = 18
RDB_PKG_ID_CONTACT_POINT = 19
RDB_PKG_ID_TRAFFIC_SIGN = 20
RDB_PKG_ID_ROAD_STATE = 21
RDB_PKG_ID_IMAGE = 22
RDB_PKG_ID_LIGHT_SOURCE = 23
RDB_PKG_ID_ENVIRONMENT = 24
RDB_PKG_ID_TRIGGER = 25
RDB_PKG_ID_DRIVER_CTRL = 26
RDB_PKG_ID_TRAFFIC_LIGHT = 27
RDB_PKG_ID_SYNC = 28
RDB_PKG_ID_DRIVER_PERCEPTION = 29
RDB_PKG_ID_LIGHT_MAP = 30
RDB_PKG_ID_TONE_MAPPING = 31
RDB_PKG_ID_ROAD_QUERY = 32
RDB_PKG_ID_SCP = 33
RDB_PKG_ID_TRAJECTORY = 34
RDB_PKG_ID_DYN_2_STEER = 35
RDB_PKG_ID_STEER_2_DYN = 36
RDB_PKG_ID_PROXY = 37
RDB_PKG_ID_MOTION_SYSTEM = 38
RDB_PKG_ID_OCCLUSION_MATRIX = 39
RDB_PKG_ID_FREESPACE = 40
RDB_PKG_ID_DYN_EL_SWITCH = 41
RDB_PKG_ID_DYN_EL_DOF = 42
RDB_PKG_ID_IG_FRAME = 43
RDB_PKG_ID_RAY = 44
RDB_PKG_ID_RT_PERFORMANCE = 45
RDB_PKG_ID_CUSTOM_SCORING = 10000
RDB_PKG_ID_CUSTOM_OBJECT_CTRL_TRACK = 10001
RDB_PKG_ID_CUSTOM_LIGHT_B = 10002
RDB_PKG_ID_CUSTOM_LIGHT_A = 10003
RDB_PKG_ID_CUSTOM_LIGHT_GROUP_B = 10004
RDB_PKG_ID_CUSTOM_AUDI_FORUM = 12000
RDB_PKG_ID_CUSTOM_OPTIX_START = 12100
RDB_PKG_ID_OPTIX_BUFFER = 12101
RDB_PKG_ID_CUSTOM_OPTIX_END = 12149
RDB_PKG_ID_CUSTOM_USER_A_START = 12150
RDB_PKG_ID_CUSTOM_USER_A_END = 12174
RDB_PKG_ID_CUSTOM_USER_B_START = 12175
RDB_PKG_ID_CUSTOM_USER_B_END = 12200
RDB_OBJECT_CATEGORY_NONE = 0
RDB_OBJECT_CATEGORY_PLAYER = 1
RDB_OBJECT_CATEGORY_SENSOR = 2
RDB_OBJECT_CATEGORY_CAMERA = 3
RDB_OBJECT_CATEGORY_LIGHT_POINT = 4
RDB_OBJECT_CATEGORY_COMMON = 5
RDB_OBJECT_CATEGORY_OPENDRIVE = 6
RDB_OBJECT_TYPE_NONE = 0
RDB_OBJECT_TYPE_PLAYER_NONE = 0
RDB_OBJECT_TYPE_PLAYER_CAR = 1
RDB_OBJECT_TYPE_PLAYER_TRUCK = 2
RDB_OBJECT_TYPE_PLAYER_VAN = 3
RDB_OBJECT_TYPE_PLAYER_BIKE = 4
RDB_OBJECT_TYPE_PLAYER_PEDESTRIAN = 5
RDB_OBJECT_TYPE_PLAYER_PED_GROUP = 6
RDB_OBJECT_TYPE_POLE = 7
RDB_OBJECT_TYPE_TREE = 8
RDB_OBJECT_TYPE_BARRIER = 9
RDB_OBJECT_TYPE_OPT1 = 10
RDB_OBJECT_TYPE_OPT2 = 11
RDB_OBJECT_TYPE_OPT3 = 12
RDB_OBJECT_TYPE_PLAYER_MOTORBIKE = 13
RDB_OBJECT_TYPE_PLAYER_BUS = 14
RDB_OBJECT_TYPE_STREET_LAMP = 15
RDB_OBJECT_TYPE_TRAFFIC_SIGN = 16
RDB_OBJECT_TYPE_HEADLIGHT = 17
RDB_OBJECT_TYPE_PLAYER_TRAILER = 18
RDB_OBJECT_TYPE_BUILDING = 19
RDB_OBJECT_TYPE_PARKING_SPACE = 20
RDB_OBJECT_TYPE_ROAD_WORKS = 21
RDB_OBJECT_TYPE_ROAD_MISC = 22
RDB_OBJECT_TYPE_TUNNEL = 23
RDB_OBJECT_TYPE_LEGACY = 24
RDB_OBJECT_TYPE_VEGETATION = 25
RDB_OBJECT_TYPE_MISC_MOTORWAY = 26
RDB_OBJECT_TYPE_MISC_TOWN = 27
RDB_OBJECT_TYPE_PATCH = 28
RDB_OBJECT_TYPE_OTHER = 29
RDB_OBJECT_PLAYER_SEMI_TRAILER = 30
RDB_OBJECT_PLAYER_RAILCAR = 31
RDB_OBJECT_PLAYER_RAILCAR_SEMI_HEAD = 32
RDB_OBJECT_PLAYER_RAILCAR_SEMI_BACK = 33
RDB_OBJECT_TYPE_VEH_LIGHT_FRONT_LEFT = 34
RDB_OBJECT_TYPE_VEH_LIGHT_FRONT_RIGHT = 35
RDB_OBJECT_TYPE_VEH_LIGHT_REAR_LEFT = 36
RDB_OBJECT_TYPE_VEH_LIGHT_REAR_RIGHT = 37
RDB_OBJECT_TYPE_VEH_CABIN = 38
RDB_LANE_BORDER_UNKNOWN = 0
RDB_LANE_BORDER_NONE = 1
RDB_LANE_BORDER_POLE = 2
RDB_LANE_BORDER_BARRIER = 3
RDB_LANE_BORDER_SOFT_SHOULDER = 4
RDB_LANE_BORDER_HARD_SHOULDER = 5
RDB_LANE_BORDER_CURB = 6
RDB_ROADMARK_TYPE_NONE = 0
RDB_ROADMARK_TYPE_SOLID = 1
RDB_ROADMARK_TYPE_BROKEN = 2
RDB_ROADMARK_TYPE_CURB = 3
RDB_ROADMARK_TYPE_GRASS = 4
RDB_ROADMARK_TYPE_BOTDOT = 5
RDB_ROADMARK_TYPE_OTHER = 6
RDB_ROADMARK_COLOR_NONE = 0
RDB_ROADMARK_COLOR_WHITE = 1
RDB_ROADMARK_COLOR_RED = 2
RDB_ROADMARK_COLOR_YELLOW = 3
RDB_ROADMARK_COLOR_OTHER = 4
RDB_ROADMARK_COLOR_BLUE = 5
RDB_ROADMARK_COLOR_GREEN = 6
RDB_WHEEL_ID_FRONT_LEFT = 0
RDB_GEAR_BOX_TYPE_AUTOMATIC = 0
RDB_GEAR_BOX_TYPE_MANUAL = 1
RDB_GEAR_BOX_POS_AUTO = 0
RDB_GEAR_BOX_POS_P = 1
RDB_GEAR_BOX_POS_R = 2
RDB_GEAR_BOX_POS_N = 3
RDB_GEAR_BOX_POS_D = 4
RDB_GEAR_BOX_POS_1 = 5
RDB_GEAR_BOX_POS_2 = 6
RDB_GEAR_BOX_POS_3 = 7
RDB_GEAR_BOX_POS_4 = 8
RDB_GEAR_BOX_POS_5 = 9
RDB_GEAR_BOX_POS_6 = 10
RDB_GEAR_BOX_POS_7 = 11
RDB_GEAR_BOX_POS_8 = 12
RDB_GEAR_BOX_POS_9 = 13
RDB_GEAR_BOX_POS_10 = 14
RDB_GEAR_BOX_POS_11 = 15
RDB_GEAR_BOX_POS_12 = 16
RDB_GEAR_BOX_POS_13 = 17
RDB_GEAR_BOX_POS_14 = 18
RDB_GEAR_BOX_POS_15 = 19
RDB_GEAR_BOX_POS_16 = 20
RDB_GEAR_BOX_POS_R1 = 21
RDB_GEAR_BOX_POS_R2 = 22
RDB_GEAR_BOX_POS_R3 = 23
RDB_GEAR_BOX_POS_M = 24
RDB_GEAR_BOX_POS_M_UP = 25
RDB_GEAR_BOX_POS_M_DOWN = 26
RDB_GEAR_BOX_POS_C = 27
RDB_GEAR_BOX_POS_MS = 28
RDB_GEAR_BOX_POS_CS = 29
RDB_GEAR_BOX_POS_PS = 30
RDB_GEAR_BOX_POS_RS = 31
RDB_GEAR_BOX_POS_NS = 32
RDB_GEAR_BOX_POS_DS = 33
RDB_DRIVETRAIN_TYPE_FRONT = 0
RDB_DRIVETRAIN_TYPE_REAR = 1
RDB_DRIVETRAIN_TYPE_AWD = 2
RDB_PIX_FORMAT_RGB = 0
RDB_PIX_FORMAT_RGB_16 = 1
RDB_PIX_FORMAT_RGB_24 = 2
RDB_PIX_FORMAT_RGBA = 3
RDB_PIX_FORMAT_RGBA_16 = 4
RDB_PIX_FORMAT_RGBA_24 = 5
RDB_PIX_FORMAT_BW_8 = 6
RDB_PIX_FORMAT_BW_16 = 7
RDB_PIX_FORMAT_BW_24 = 8
RDB_PIX_FORMAT_DEPTH_8 = 9
RDB_PIX_FORMAT_DEPTH_16 = 10
RDB_PIX_FORMAT_DEPTH_24 = 11
RDB_PIX_FORMAT_RGB_32_F = 12
RDB_PIX_FORMAT_RGBA_32_F = 13
RDB_PIX_FORMAT_LUM_32_F = 14
RDB_PIX_FORMAT_LUMA_32_F = 15
RDB_PIX_FORMAT_RGB_16_F = 16
RDB_PIX_FORMAT_RGBA_16_F = 17
RDB_PIX_FORMAT_LUM_16_F = 18
RDB_PIX_FORMAT_LUMA_16_F = 19
RDB_PIX_FORMAT_DEPTH_32 = 20
RDB_PIX_FORMAT_BW_32 = 21
RDB_PIX_FORMAT_RGB_32 = 22
RDB_PIX_FORMAT_RGBA_32 = 23
RDB_PIX_FORMAT_R3_G2_B2 = 24
RDB_PIX_FORMAT_R3_G2_B2_A8 = 25
RDB_PIX_FORMAT_R5_G6_B5 = 26
RDB_PIX_FORMAT_R5_G6_B5_A16 = 27
RDB_PIX_FORMAT_RED8 = 28
RDB_PIX_FORMAT_RED16 = 29
RDB_PIX_FORMAT_RED16F = 30
RDB_PIX_FORMAT_RED24 = 31
RDB_PIX_FORMAT_RED32 = 32
RDB_PIX_FORMAT_RED32F = 33
RDB_PIX_FORMAT_RG8 = 34
RDB_PIX_FORMAT_RG16 = 35
RDB_PIX_FORMAT_RG16F = 36
RDB_PIX_FORMAT_RG32 = 37
RDB_PIX_FORMAT_RG32F = 38
RDB_PIX_FORMAT_RGB8 = 39
RDB_PIX_FORMAT_RGBA8 = 40
RDB_PIX_FORMAT_RGB8_A24 = 41
RDB_PIX_FORMAT_RGB16 = 42
RDB_PIX_FORMAT_RGB16F = 43
RDB_PIX_FORMAT_RGBA16 = 44
RDB_PIX_FORMAT_RGBA16F = 45
RDB_PIX_FORMAT_RGB32 = 46
RDB_PIX_FORMAT_RGB32F = 47
RDB_PIX_FORMAT_RGBA32 = 48
RDB_PIX_FORMAT_RGBA32F = 49
RDB_PIX_FORMAT_DEPTH8 = 50
RDB_PIX_FORMAT_DEPTH16 = 51
RDB_PIX_FORMAT_DEPTH24 = 52
RDB_PIX_FORMAT_DEPTH32 = 53
RDB_PIX_FORMAT_CUSTOM_01 = 151
RDB_PIX_FORMAT_CUSTOM_02 = 152
RDB_PIX_FORMAT_CUSTOM_03 = 153
RDB_SENSOR_TYPE_NONE = 0
RDB_SENSOR_TYPE_RADAR = 1
RDB_SENSOR_TYPE_VIDEO = 2
RDB_TRLIGHT_PHASE_OFF = 0
RDB_TRLIGHT_PHASE_STOP = 1
RDB_TRLIGHT_PHASE_STOP_ATTN = 2
RDB_TRLIGHT_PHASE_GO = 3
RDB_TRLIGHT_PHASE_GO_EXCL = 4
RDB_TRLIGHT_PHASE_ATTN = 5
RDB_TRLIGHT_PHASE_BLINK = 6
RDB_TRLIGHT_PHASE_UNKNOWN = 7
RDB_COORD_TYPE_INERTIAL = 0
RDB_COORD_TYPE_RESERVED_1 = 1
RDB_COORD_TYPE_PLAYER = 2
RDB_COORD_TYPE_SENSOR = 3
RDB_COORD_TYPE_USK = 4
RDB_COORD_TYPE_USER = 5
RDB_COORD_TYPE_WINDOW = 6
RDB_COORD_TYPE_TEXTURE = 7
RDB_COORD_TYPE_RELATIVE_START = 8
RDB_COORD_TYPE_GEO = 9
RDB_COORD_TYPE_TRACK = 10
RDB_ENV_CLOUD_STATE_OFF = 0
RDB_ENV_CLOUD_STATE_0_8 = 1
RDB_ENV_CLOUD_STATE_4_8 = 2
RDB_ENV_CLOUD_STATE_6_8 = 3
RDB_ENV_CLOUD_STATE_8_8 = 4
RDB_FUNCTION_TYPE_NONE = 0
RDB_FUNCTION_TYPE_TONE_MAPPING = 1
RDB_ROAD_TYPE_UNKNOWN = 0
RDB_ROAD_TYPE_RURAL = 1
RDB_ROAD_TYPE_MOTORWAY = 2
RDB_ROAD_TYPE_TOWN = 3
RDB_ROAD_TYPE_LOW_SPEED = 4
RDB_ROAD_TYPE_PEDESTRIAN = 5
RDB_DRIVER_SOURCE_UNKNOWN = 0
RDB_DRIVER_SOURCE_GHOSTDRIVER = 1
RDB_SHM_SIZE_TC = 5242880
RDB_FREESPACE_STATE_OBJECT_NONE = 0
RDB_FREESPACE_STATE_OBJECT_SAME_DIR = 1
RDB_FREESPACE_STATE_OBJECT_ONCOMING = 2
RDB_DYN_EL_SCOPE_UNKNOWN = 0
RDB_DYN_EL_SCOPE_STATIC_DB = 1
RDB_DYN_EL_SCOPE_DYN_OBJECT = 2
RDB_DYN_EL_SCOPE_ANY = 3
RDB_DYN_EL_SCOPE_FIRST = 4
RDB_DYN_EL_SCOPE_STATIC_DB_SIGNAL = 5
RDB_DYN_EL_SCOPE_STATIC_DB_SWITCH = 6
RDB_RAY_TYPE_UNKNOWN = 0
RDB_RAY_TYPE_EMIT = 1
RDB_RAY_TYPE_HIT = 2
RDB_PKG_FLAG_NONE = 0x0000
RDB_PKG_FLAG_EXTENDED = 0x0001
RDB_PKG_FLAG_HIDDEN = 0x0002
RDB_OBJECT_VIS_FLAG_ALL = 0xffff
RDB_OBJECT_VIS_FLAG_NONE = 0x0000
RDB_OBJECT_VIS_FLAG_GFX = 0x0001
RDB_OBJECT_VIS_FLAG_TRAFFIC = 0x0002
RDB_OBJECT_VIS_FLAG_RECORDER = 0x0004
RDB_VEHICLE_LIGHT_OFF = 0x00000000
RDB_VEHICLE_LIGHT_PARK = 0x00000001
RDB_VEHICLE_LIGHT_LOW_BEAM = 0x00000002
RDB_VEHICLE_LIGHT_HIGH_BEAM = 0x00000004
RDB_VEHICLE_LIGHT_REAR_BRAKE = 0x00000008
RDB_VEHICLE_LIGHT_REAR_DRIVE = 0x00000010
RDB_VEHICLE_LIGHT_INDICATOR_L = 0x00000020
RDB_VEHICLE_LIGHT_INDICATOR_R = 0x00000040
RDB_VEHICLE_LIGHT_FLASH = 0x00000080
RDB_VEHICLE_LIGHT_FRONT_FOG = 0x00000100
RDB_VEHICLE_LIGHT_REAR_FOG = 0x00000200
RDB_VEHICLE_LIGHT_VIRES1 = 0x00000400
RDB_VEHICLE_LIGHT_DRL = 0x00000800
RDB_VEHICLE_LIGHT_DRL_LEFT_LOW = 0x00001000
RDB_VEHICLE_LIGHT_DRL_RIGHT_LOW = 0x00002000
RDB_VEHICLE_LIGHT_EMERGENCY = 0x00004000
RDB_VEHICLE_LIGHT_INDICATOR_LAMP_ON = 0x00008000
RDB_VEHICLE_LIGHT_FORCE = 0x00010000
RDB_VEHICLE_ACC_FLAG_OFF = 0x00
RDB_VEHICLE_ACC_FLAG_DIST_1 = 0x01
RDB_VEHICLE_ACC_FLAG_DIST_2 = 0x02
RDB_VEHICLE_ACC_FLAG_DIST_3 = 0x03
RDB_VEHICLE_ACC_FLAG_TARGET = 0x04
RDB_VEHICLE_ACC_FLAG_SPEED = 0x08
RDB_VEHICLE_DISPLAY_LIGHT_OFF = 0x0000
RDB_VEHICLE_DISPLAY_LIGHT_01 = 0x0001
RDB_VEHICLE_DISPLAY_LIGHT_02 = 0x0002
RDB_VEHICLE_DISPLAY_LIGHT_03 = 0x0004
RDB_VEHICLE_DISPLAY_LIGHT_04 = 0x0008
RDB_VEHICLE_DISPLAY_LIGHT_05 = 0x0010
RDB_VEHICLE_DISPLAY_LIGHT_06 = 0x0020
RDB_VEHICLE_DISPLAY_LIGHT_07 = 0x0040
RDB_VEHICLE_DISPLAY_LIGHT_08 = 0x0080
RDB_VEHICLE_DISPLAY_LIGHT_09 = 0x0100
RDB_VEHICLE_DISPLAY_LIGHT_10 = 0x0200
RDB_VEHICLE_DISPLAY_LIGHT_11 = 0x0400
RDB_VEHICLE_DISPLAY_LIGHT_12 = 0x0800
RDB_VEHICLE_DISPLAY_LIGHT_13 = 0x1000
RDB_VEHICLE_DISPLAY_LIGHT_14 = 0x2000
RDB_VEHICLE_DISPLAY_LIGHT_15 = 0x4000
RDB_VEHICLE_DISPLAY_LIGHT_16 = 0x8000
RDB_LANE_EXISTS_OWN = 0x01
RDB_LANE_EXISTS_LEFT = 0x02
RDB_LANE_EXISTS_RIGHT = 0x04
RDB_LANE_STATUS_NONE = 0x0000
RDB_LANE_STATUS_ROADWORKS = 0x0001
RDB_LANE_STATUS_EXIT = 0x0002
RDB_LANE_STATUS_ENTRY = 0x0004
RDB_LANE_STATUS_LINKED = 0x0008
RDB_LANE_STATUS_WET = 0x0010
RDB_LANE_STATUS_SNOW = 0x0020
RDB_DRIVER_FLAG_NONE = 0x00000000
RDB_DRIVER_FLAG_INDICATOR_L = 0x00000001
RDB_DRIVER_FLAG_INDICATOR_R = 0x00000002
RDB_DRIVER_FLAG_PARKING_BRAKE = 0x00000004
RDB_DRIVER_FLAG_LIGHT_LOW_BEAM = 0x00000008
RDB_DRIVER_FLAG_LIGHT_HIGH_BEAM = 0x00000010
RDB_DRIVER_FLAG_LIGHT_FOG_FRONT = 0x00000020
RDB_DRIVER_FLAG_LIGHT_FOG_REAR = 0x00000040
RDB_DRIVER_FLAG_LIGHT_EMERGENCY = 0x00000080
RDB_DRIVER_FLAG_LIGHT_PRIORITY = 0x00000100
RDB_DRIVER_FLAG_COLLISION = 0x00000200
RDB_MOCKUP_INPUT0_MFL_PLUS = 0x00000001
RDB_MOCKUP_INPUT0_MFL_MINUS = 0x00000002
RDB_MOCKUP_INPUT0_MFL_PHONE = 0x00000004
RDB_MOCKUP_INPUT0_MFL_VOICE = 0x00000008
RDB_MOCKUP_INPUT0_MFL_UP = 0x00000010
RDB_MOCKUP_INPUT0_MFL_DOWN = 0x00000020
RDB_MOCKUP_INPUT0_MFL_DIAMOND = 0x00000040
RDB_MOCKUP_INPUT0_MFL_STAR = 0x00000080
RDB_MOCKUP_INPUT0_TURN_UP = 0x00000100
RDB_MOCKUP_INPUT0_TURN_UP_2 = 0x00000200
RDB_MOCKUP_INPUT0_TURN_DOWN = 0x00000400
RDB_MOCKUP_INPUT0_TURN_DOWN_2 = 0x00000800
RDB_MOCKUP_INPUT0_TURN_FLASHER = 0x00001000
RDB_MOCKUP_INPUT0_TURN_HIGHBEAM = 0x00002000
RDB_MOCKUP_INPUT0_TURN_CHECK = 0x00004000
RDB_MOCKUP_INPUT0_TURN_BC = 0x00008000
RDB_MOCKUP_INPUT0_ACC_BACK = 0x00010000
RDB_MOCKUP_INPUT0_ACC_BACK_2 = 0x00020000
RDB_MOCKUP_INPUT0_ACC_FWD = 0x00040000
RDB_MOCKUP_INPUT0_ACC_FWD_2 = 0x00080000
RDB_MOCKUP_INPUT0_ACC_UP = 0x00100000
RDB_MOCKUP_INPUT0_ACC_DOWN = 0x00200000
RDB_MOCKUP_INPUT0_ACC_SET = 0x00400000
RDB_MOCKUP_INPUT0_HORN = 0x00800000
RDB_MOCKUP_INPUT0_WIPER_INTERVAL = 0x03000000
RDB_MOCKUP_INPUT0_WIPER_INTERVAL_1 = 0x01000000
RDB_MOCKUP_INPUT0_WIPER_INTERVAL_2 = 0x02000000
RDB_MOCKUP_INPUT0_WIPER_INTERVAL_3 = 0x03000000
RDB_MOCKUP_INPUT0_WIPER_AUTO = 0x04000000
RDB_MOCKUP_INPUT0_WIPER_BACK = 0x08000000
RDB_MOCKUP_INPUT0_WIPER_UP = 0x10000000
RDB_MOCKUP_INPUT0_WIPER_UP_2 = 0x20000000
RDB_MOCKUP_INPUT0_WIPER_DOWN = 0x40000000
RDB_MOCKUP_INPUT1_ZBE_COUNTER = 0x0000FFFF
RDB_MOCKUP_INPUT1_ZBE_FWD = 0x00010000
RDB_MOCKUP_INPUT1_ZBE_BACK = 0x00020000
RDB_MOCKUP_INPUT1_ZBE_LEFT = 0x00040000
RDB_MOCKUP_INPUT1_ZBE_RIGHT = 0x00080000
RDB_MOCKUP_INPUT1_ZBE_PRESS = 0x00100000
RDB_MOCKUP_INPUT1_ZBE_MENU = 0x00200000
RDB_MOCKUP_INPUT1_GWS_P = 0x00400000
RDB_MOCKUP_INPUT1_GWS_LOCK = 0x00800000
RDB_MOCKUP_INPUT1_GWS_FWD = 0x01000000
RDB_MOCKUP_INPUT1_GWS_FWD_2 = 0x02000000
RDB_MOCKUP_INPUT1_GWS_BACK = 0x04000000
RDB_MOCKUP_INPUT1_GWS_BACK_2 = 0x08000000
RDB_MOCKUP_INPUT1_GWS_AUTO_N = 0x10000000
RDB_MOCKUP_INPUT1_GWS_MAN_N = 0x20000000
RDB_MOCKUP_INPUT1_GWS_MAN_PLUS = 0x40000000
RDB_MOCKUP_INPUT1_GWS_MAN_MINUS = 0x80000000
RDB_MOCKUP_INPUT2_LSZ_POTI = 0x000000FF
RDB_MOCKUP_INPUT2_LSZ_PARKING = 0x00000100
RDB_MOCKUP_INPUT2_LSZ_DRIVING = 0x00000200
RDB_MOCKUP_INPUT2_LSZ_AUTO = 0x00000300
RDB_MOCKUP_INPUT2_LSZ_FOG_FRONT = 0x00000400
RDB_MOCKUP_INPUT2_LSZ_FOG_REAR = 0x00000800
RDB_MOCKUP_INPUT2_DB_DTC = 0x00001000
RDB_MOCKUP_INPUT2_DB_PDC = 0x00002000
RDB_MOCKUP_INPUT2_DB_SEAT_HEAT_L = 0x00004000
RDB_MOCKUP_INPUT2_DB_SEAT_HEAT_R = 0x00008000
RDB_MOCKUP_INPUT2_DB_STARTER = 0x00010000
RDB_MOCKUP_INPUT2_DB_HAZARD_LIGHTS = 0x00020000
RDB_MOCKUP_INPUT2_DB_LOCK = 0x00040000
RDB_MOCKUP_INPUT2_DB_STEER_ADJUST_FWD = 0x00100000
RDB_MOCKUP_INPUT2_DB_STEER_ADJUST_BACK = 0x00200000
RDB_MOCKUP_INPUT2_DB_STEER_ADJUST_UP = 0x00400000
RDB_MOCKUP_INPUT2_DB_STEER_ADJUST_DOWN = 0x00800000
RDB_DRIVER_PERCEPTION_FLAG_NONE = 0x00000000
RDB_DRIVER_PERCEPTION_FLAG_TURN_L = 0x00000001
RDB_DRIVER_PERCEPTION_FLAG_TURN_R = 0x00000002
RDB_DRIVER_INPUT_VALIDITY_NONE = 0x00000000
RDB_DRIVER_INPUT_VALIDITY_STEERING_WHEEL = 0x00000001
RDB_DRIVER_INPUT_VALIDITY_STEERING_SPEED = 0x00000002
RDB_DRIVER_INPUT_VALIDITY_THROTTLE = 0x00000004
RDB_DRIVER_INPUT_VALIDITY_BRAKE = 0x00000008
RDB_DRIVER_INPUT_VALIDITY_CLUTCH = 0x00000010
RDB_DRIVER_INPUT_VALIDITY_TGT_ACCEL = 0x00000020
RDB_DRIVER_INPUT_VALIDITY_TGT_STEERING = 0x00000040
RDB_DRIVER_INPUT_VALIDITY_GEAR = 0x00000080
RDB_DRIVER_INPUT_VALIDITY_CURVATURE = 0x00000100
RDB_DRIVER_INPUT_VALIDITY_STEERING_TORQUE = 0x00000200
RDB_DRIVER_INPUT_VALIDITY_ENGINE_TORQUE = 0x00000400
RDB_DRIVER_INPUT_VALIDITY_TGT_SPEED = 0x00000800
RDB_DRIVER_INPUT_VALIDITY_INFO_ONLY = 0x00001000
RDB_DRIVER_INPUT_VALIDITY_ADD_ON = 0x00002000
RDB_DRIVER_INPUT_VALIDITY_FLAGS = 0x00004000
RDB_DRIVER_INPUT_VALIDITY_MOCKUP_INPUT0 = 0x00008000
RDB_DRIVER_INPUT_VALIDITY_MOCKUP_INPUT1 = 0x00010000
RDB_DRIVER_INPUT_VALIDITY_MOCKUP_INPUT2 = 0x00020000
RDB_DRIVER_INPUT_VALIDITY_STEERING_TPOS = 0x00040000
RDB_DRIVER_INPUT_VALIDITY_MODIFIED = 0x00080000
RDB_SCORING_FLAG_NONE = 0x00000000
RDB_SCORING_FLAG_COLLISION = 0x00000001
RDB_SCORING_FLAG_OFF_ROAD = 0x00000002
RDB_COORD_FLAG_NONE = 0x00
RDB_COORD_FLAG_POINT_VALID = 0x01
RDB_COORD_FLAG_ANGLES_VALID = 0x02
RDB_LIGHT_SOURCE_FLAG_NONE = 0x0000
RDB_LIGHT_SOURCE_FLAG_USE_FRUSTUM = 0x0001
RDB_LIGHT_SOURCE_FLAG_PERSISTENT = 0x0002
RDB_LIGHT_SOURCE_FLAG_STENCIL = 0x0004
RDB_SENSOR_OBJECT_FLAG_NONE = 0x0000
RDB_SENSOR_OBJECT_FLAG_CRITICALITY_LOW = 0x0001
RDB_SENSOR_OBJECT_FLAG_CRITICALITY_MEDIUM = 0x0002
RDB_SENSOR_OBJECT_FLAG_CRITICALITY_HIGH = 0x0003
RDB_ROAD_EVENT_NONE = 0x00000000
RDB_ROAD_EVENT_POTHOLE = 0x00000001
RDB_ENV_FLAG_NONE = 0x0000
RDB_ENV_FLAG_PRECIPITATION_SNOW = 0x0001
RDB_ENV_FLAG_PRECIPITATION_HAIL = 0x0002
RDB_ENV_FLAG_ROAD_SURFACE_WET = 0x0004
RDB_ENV_FLAG_STREET_LAMPS = 0x0008
RDB_SHM_ID_IMG_GENERATOR_OUT = 0x0816a
RDB_SHM_ID_IMG_GENERATOR_IN = 0x0817a
RDB_SHM_ID_CONTROL_GENERATOR_IN = 0x0817b
RDB_SHM_ID_CUSTOM_01 = 0x0818a
RDB_SHM_ID_TC_IN = 0x08200
RDB_SHM_ID_TC_OUT = 0x08201
RDB_SHM_ID_DYN_2_STEER = 0x08210
RDB_SHM_ID_STEER_2_DYN = 0x08211
RDB_SHM_BUFFER_FLAG_NONE = 0x00000000
RDB_SHM_BUFFER_FLAG_LOCK = 0x00000001
RDB_SHM_BUFFER_FLAG_TC = 0x00000002
RDB_SHM_BUFFER_FLAG_IG = 0x00000004
RDB_OBJECT_CFG_FLAG_NONE = 0x0000
RDB_OBJECT_CFG_FLAG_CTRL_EXTERN = 0x0001
RDB_OBJECT_CFG_FLAG_MODEL_ID_VALID = 0x0002
RDB_ROAD_POS_FLAG_NONE = 0x00
RDB_ROAD_POS_FLAG_DIR_FWD = 0x01
RDB_ROAD_POS_FLAG_DIR_REAR = 0x02
RDB_ROAD_POS_FLAG_OFFROAD = 0x04
RDB_CONTACT_POINT_FLAG_NONE = 0x0000
RDB_CONTACT_POINT_FLAG_PLAYER_VALID = 0x0001
RDB_SYNC_CMD_RENDER_CONTINUOUS = 0x00000080
RDB_SYNC_CMD_RENDER_PAUSE = 0x00000100
RDB_SYNC_CMD_RENDER_SINGLE_FRAME = 0x00000200
RDB_TRAJECTORY_FLAG_NONE = 0x0000
RDB_TRAJECTORY_FLAG_TIME_DOMAIN = 0x0001
RDB_DYN_2_STEER_STATE_NONE = 0x0000
RDB_DYN_2_STEER_STATE_PAUSE = 0x0001
RDB_DYN_2_STEER_STATE_RUN = 0x0002
RDB_DYN_2_STEER_STATE_ERROR = 0x0004
RDB_DYN_2_STEER_CMD_NONE = 0x0000
RDB_DYN_2_STEER_CMD_RESET = 0x0001
RDB_DYN_2_STEER_EFFECT_NONE = 0x00000000
RDB_DYN_2_STEER_EFFECT_TIRE_MODEL = 0x00000001
RDB_DYN_2_STEER_EFFECT_VIBRATION_10HZ = 0x00000002
RDB_STEER_2_DYN_STATE_OFF = 0x00000000
RDB_STEER_2_DYN_STATE_INIT = 0x00000001
RDB_STEER_2_DYN_STATE_FAIL = 0x00000002
RDB_STEER_2_DYN_STATE_RUN = 0x00000004
RDB_STEER_2_DYN_STATE_OVER_LIMITS = 0x00000008
RDB_WHEEL_FLAG_NONE = 0x0000
RDB_WHEEL_FLAG_ON_ROADMARK = 0x0001
RDB_MOTION_SYSTEM_FLAG_NONE = 0x0000
RDB_MOTION_SYSTEM_FLAG_ACTIVE = 0x0001
RDB_MOTION_SYSTEM_FLAG_ERROR = 0x0002
RDB_CUSTOM_TRACK_CTRL_FLAG_VIS_SENSOR_A = 0x0001
RDB_CUSTOM_TRACK_CTRL_FLAG_VIS_SENSOR_B = 0x0002
RDB_CUSTOM_TRACK_CTRL_FLAG_VIS_GFX = 0x0004
RDB_CUSTOM_TRACK_CTRL_FLAG_VIS_SENSOR_C = 0x0008
RDB_CUSTOM_TRACK_CTRL_FLAG_VIS_SENSOR_D = 0x0010
RDB_CUSTOM_TRACK_CTRL_FLAG_NAME_BY_ID = 0x0100
RDB_CUSTOM_TRACK_CTRL_FLAG_PLAYER_ACTIVE = 0x0200
RDB_CUSTOM_TRACK_CTRL_VALIDITY_DEFAULT = 0x00000000
RDB_CUSTOM_TRACK_CTRL_VALIDITY_TGT_ACCEL = 0x00000001
RDB_CUSTOM_TRACK_CTRL_VALIDITY_STEERING_TPOS = 0x00000002
RDB_ROAD_QUERY_FLAG_NONE = 0x0000
RDB_ROAD_QUERY_FLAG_RELATIVE_POS = 0x0001
class RDB_POINT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
double x;
double y;
double z;
unsigned char flags;
unsigned char type;
unsigned short system;
"""
class RDB_COORD_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
double x;
double y;
double z;
float h;
float p;
float r;
unsigned char flags;
unsigned char type;
unsigned short system;
"""
class RDB_COORD_SYSTEM_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short id;
unsigned short spare;
struct RDB_COORD_t pos;
"""
class RDB_ROAD_POS_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
unsigned short roadId;
signed char laneId;
unsigned char flags;
float roadS;
float roadT;
float laneOffset;
float hdgRel;
float pitchRel;
float rollRel;
unsigned char roadType;
unsigned char spare1;
unsigned short spare2;
float pathS;
"""
class RDB_ROADMARK_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
char id;
char prevId;
char nextId;
char laneId;
float lateralDist;
float yawRel;
double curvHor;
double curvHorDot;
float startDx;
float previewDx;
float width;
float height;
double curvVert;
double curvVertDot;
unsigned char type;
unsigned char color;
unsigned short noDataPoints;
unsigned int roadId;
unsigned int spare1;
"""
class RDB_LANE_INFO_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short roadId;
char id;
unsigned char neighborMask;
char leftLaneId;
char rightLaneId;
unsigned char borderType;
unsigned char material;
unsigned short status;
unsigned short type;
float width;
double curvVert;
double curvVertDot;
double curvHor;
double curvHorDot;
unsigned int playerId;
unsigned int spare1;
"""
class RDB_OBJECT_CFG_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int id;
unsigned char category;
unsigned char type;
short modelId;
char name[32];
char modelName[32];
char fileName[1024];
unsigned short flags;
unsigned short spare0;
unsigned int spare1;
"""
class RDB_GEOMETRY_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
float dimX;
float dimY;
float dimZ;
float offX;
float offY;
float offZ;
"""
class RDB_OBJECT_STATE_BASE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int id;
unsigned char category;
unsigned char type;
unsigned short visMask;
char name[32];
struct RDB_GEOMETRY_t geo;
struct RDB_COORD_t pos;
unsigned int parent;
unsigned short cfgFlags;
short cfgModelId;
"""
class RDB_OBJECT_STATE_EXT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
struct RDB_COORD_t speed;
struct RDB_COORD_t accel;
float traveledDist;
unsigned int spare[3];
"""
class RDB_OBJECT_STATE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
struct RDB_OBJECT_STATE_BASE_t base;
struct RDB_OBJECT_STATE_EXT_t ext;
"""
class RDB_ENGINE_BASE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
float rps;
float load;
unsigned int spare1[2];
"""
class RDB_ENGINE_EXT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
float rpsStart;
float torque;
float torqueInner;
float torqueMax;
float torqueFriction;
float fuelCurrent;
float fuelAverage;
float oilTemperature;
float temperature;
"""
class RDB_ENGINE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
struct RDB_ENGINE_BASE_t base;
struct RDB_ENGINE_EXT_t ext;
"""
class RDB_DRIVETRAIN_BASE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
unsigned char gearBoxType;
unsigned char driveTrainType;
unsigned char gear;
unsigned char spare0;
unsigned int spare1[2];
"""
class RDB_DRIVETRAIN_EXT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
float torqueGearBoxIn;
float torqueCenterDiffOut;
float torqueShaft;
unsigned int spare1[2];
"""
class RDB_DRIVETRAIN_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
struct RDB_DRIVETRAIN_BASE_t base;
struct RDB_DRIVETRAIN_EXT_t ext;
"""
class RDB_WHEEL_BASE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
unsigned char id;
unsigned char flags;
unsigned char spare0[2];
float radiusStatic;
float springCompression;
float rotAngle;
float slip;
float steeringAngle;
unsigned int spare1[4];
"""
class RDB_WHEEL_EXT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
float vAngular;
float forceZ;
float forceLat;
float forceLong;
float forceTireWheelXYZ[3];
float radiusDynamic;
float brakePressure;
float torqueDriveShaft;
float damperSpeed;
unsigned int spare2[4];
"""
class RDB_WHEEL_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
struct RDB_WHEEL_BASE_t base;
struct RDB_WHEEL_EXT_t ext;
"""
class RDB_VEHICLE_SYSTEMS_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
unsigned int lightMask;
float steering;
float steeringWheelTorque;
unsigned char accMask;
unsigned char accSpeed;
unsigned char batteryState;
char batteryRate;
unsigned short displayLightMask;
unsigned short fuelGauge;
unsigned int spare[5];
"""
class RDB_VEHICLE_SETUP_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
float mass;
float wheelBase;
int spare[4];
"""
class RDB_IMAGE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int id;
unsigned short width;
unsigned short height;
unsigned char pixelSize;
unsigned char pixelFormat;
unsigned short cameraId;
unsigned int imgSize;
unsigned char color[4];
unsigned int spare1[3];
"""
class RDB_CUSTOM_LIGHT_B_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short lightElementId;
unsigned short width;
unsigned short height;
unsigned short spare0;
unsigned int dataSize;
unsigned int spare1[3];
"""
class RDB_CUSTOM_LIGHT_GROUP_B_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short lightElementId;
unsigned short groupId;
float intensity;
float hOffset;
float pOffset;
unsigned int spare[4];
"""
class RDB_FUNCTION_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int id;
unsigned char type;
unsigned char dimension;
unsigned short spare;
unsigned int dataSize;
unsigned int spare1[4];
"""
class RDB_SENSOR_STATE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int id;
unsigned char type;
unsigned char hostCategory;
unsigned short spare0;
unsigned int hostId;
char name[32];
float fovHV[2];
float clipNF[2];
struct RDB_COORD_t pos;
struct RDB_COORD_t originCoordSys;
float fovOffHV[2];
int spare[2];
"""
class RDB_SENSOR_OBJECT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned char category;
unsigned char type;
unsigned short flags;
unsigned int id;
unsigned int sensorId;
double dist;
struct RDB_COORD_t sensorPos;
char occlusion;
unsigned char spare0[3];
unsigned int spare[3];
"""
class RDB_CAMERA_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short id;
unsigned short width;
unsigned short height;
unsigned short spare0;
float clipNear;
float clipFar;
float focalX;
float focalY;
float principalX;
float principalY;
struct RDB_COORD_t pos;
unsigned int spare1[4];
"""
class RDB_LIGHT_SOURCE_BASE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short id;
char templateId;
unsigned char state;
int playerId;
struct RDB_COORD_t pos;
unsigned short flags;
unsigned short spare0;
int spare1[2];
"""
class RDB_LIGHT_SOURCE_EXT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
float nearFar[2];
float frustumLRBT[4];
float intensity[3];
float atten[3];
int spare1[3];
"""
class RDB_LIGHT_SOURCE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
struct RDB_LIGHT_SOURCE_BASE_t base;
struct RDB_LIGHT_SOURCE_EXT_t ext;
"""
class RDB_CONTACT_POINT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short id;
unsigned short flags;
struct RDB_COORD_t roadDataIn;
float friction;
int playerId;
int spare1;
"""
class RDB_TRAFFIC_SIGN_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int id;
unsigned int playerId;
float roadDist;
struct RDB_COORD_t pos;
int type;
int subType;
float value;
unsigned int state;
char readability;
char occlusion;
unsigned short spare0;
unsigned int addOnId;
char minLane;
char maxLane;
unsigned short spare;
"""
class RDB_ROAD_STATE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
char wheelId;
unsigned char spare0;
unsigned short spare1;
unsigned int roadId;
float defaultSpeed;
float waterLevel;
unsigned int eventMask;
int spare2[12];
"""
class RDB_ENVIRONMENT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
float visibility;
unsigned int timeOfDay;
float brightness;
unsigned char precipitation;
unsigned char cloudState;
unsigned short flags;
float temperature;
unsigned int spare1[7];
"""
class RDB_PED_ANIMATION_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
struct RDB_COORD_t pos;
unsigned int spare[4];
unsigned int noCoords;
unsigned int dataSize;
"""
class RDB_CUSTOM_SCORING_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
float pathS;
float roadS;
float fuelCurrent;
float fuelAverage;
unsigned int stateFlags;
float slip;
unsigned int spare[4];
"""
class RDB_TRIGGER_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
float deltaT;
unsigned int frameNo;
unsigned short features;
short spare0;
"""
class RDB_IG_FRAME_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
float deltaT;
unsigned int frameNo;
unsigned int spare[2];
"""
class RDB_DRIVER_CTRL_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
float steeringWheel;
float steeringSpeed;
float throttlePedal;
float brakePedal;
float clutchPedal;
float accelTgt;
float steeringTgt;
double curvatureTgt;
float steeringTorque;
float engineTorqueTgt;
float speedTgt;
unsigned char gear;
unsigned char sourceId;
unsigned char spare0[2];
unsigned int validityFlags;
unsigned int flags;
unsigned int mockupInput0;
unsigned int mockupInput1;
unsigned int mockupInput2;
unsigned int spare;
"""
class RDB_DRIVER_PERCEPTION_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
float speedFromRules;
float distToSpeed;
float spare0[4];
unsigned int flags;
unsigned int spare[4];
"""
class RDB_TRAFFIC_LIGHT_BASE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
int id;
float state;
unsigned int stateMask;
"""
class RDB_TRAFFIC_LIGHT_PHASE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
float duration;
unsigned char type;
unsigned char spare[3];
"""
class RDB_TRAFFIC_LIGHT_EXT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
int ctrlId;
float cycleTime;
unsigned short noPhases;
unsigned int dataSize;
"""
class RDB_TRAFFIC_LIGHT_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
struct RDB_TRAFFIC_LIGHT_BASE_t base;
struct RDB_TRAFFIC_LIGHT_EXT_t ext;
"""
class RDB_SYNC_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int mask;
unsigned int cmdMask;
double systemTime;
"""
class RDB_ROAD_QUERY_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short id;
unsigned short flags;
unsigned short spare[2];
double x;
double y;
"""
class RDB_SCP_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short version;
unsigned short spare;
char sender[64];
char receiver[64];
unsigned int dataSize;
"""
class RDB_PROXY_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short protocol;
unsigned short pkgId;
unsigned int spare[6];
unsigned int dataSize;
"""
class RDB_TRAJECTORY_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
double spacing;
unsigned short flags;
unsigned short noDataPoints;
unsigned int spare[4];
"""
class RDB_MOTION_SYSTEM_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
unsigned int flags;
struct RDB_COORD_t pos;
struct RDB_COORD_t speed;
unsigned int spare[6];
"""
class RDB_CUSTOM_OBJECT_CTRL_TRACK_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
unsigned short flags;
unsigned char posType;
char dir;
unsigned int roadId;
double initialRoadDeltaS;
float targetRoadT;
float speedTgtS;
float minAccelS;
float maxAccelS;
float accelTgt;
unsigned int validityFlags;
unsigned int spare[4];
"""
class RDB_FREESPACE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
float distance;
float angleLeft;
float angleRight;
float distanceLeft;
float distanceRight;
unsigned char stateLeft;
unsigned char stateRight;
unsigned char stateDistance;
unsigned char noVisibleObjects;
unsigned int spare1[3];
"""
class RDB_DYN_EL_SWITCH_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int objectId;
unsigned int elementId;
unsigned char scope;
unsigned char spare0[3];
unsigned int state;
unsigned int spare1[2];
"""
class RDB_DYN_EL_DOF_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int objectId;
unsigned int elementId;
unsigned char scope;
unsigned char validity;
unsigned char nValues;
unsigned char spare0;
unsigned int spare1[3];
"""
class RDB_END_OF_FRAME_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
"""
class RDB_START_OF_FRAME_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
"""
class RDB_STEER_2_DYN_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
unsigned int state;
float angle;
float rev;
float torque;
unsigned int spare[8];
"""
class RDB_DYN_2_STEER_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int playerId;
unsigned short state;
unsigned short cmd;
unsigned int effects;
float torque;
float friction;
float damping;
float stiffness;
float velocity;
float angle;
float neutralPos;
float dampingMaxTorque;
unsigned int spare[6];
"""
class RDB_RAY_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int id;
unsigned int emitterId;
unsigned char type;
unsigned char spare0;
unsigned short spare2;
float length;
unsigned int spare1[3];
struct RDB_COORD_t ray;
"""
class RDB_RT_PERFORMANCE_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int noOverruns;
unsigned int noUnderruns;
unsigned int noMeasurements;
float tolerance;
float nominalFrameTime;
float actualFrameTime;
unsigned int spare1[6];
"""
class RDB_MSG_HDR_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned short magicNo;
unsigned short version;
unsigned int headerSize;
unsigned int dataSize;
unsigned int frameNo;
double simTime;
"""
class RDB_MSG_ENTRY_HDR_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int headerSize;
unsigned int dataSize;
unsigned int elementSize;
unsigned short pkgId;
unsigned short flags;
"""
# class RDB_MSG_t(cstruct.CStruct):
# __byte_order__ = cstruct.LITTLE_ENDIAN
# __struct__ = """
# struct RDB_MSG_HDR_t hdr;
# struct RDB_MSG_ENTRY_HDR_t entryHdr;
# struct RDB_MSG_UNION_t u;
# """
class RDB_SHM_BUFFER_INFO_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int thisSize;
unsigned int bufferSize;
unsigned short id;
unsigned short spare0;
unsigned int flags;
unsigned int offset;
unsigned int spare1[4];
"""
class RDB_SHM_HDR_t(cstruct.CStruct):
__byte_order__ = cstruct.LITTLE_ENDIAN
__struct__ = """
unsigned int headerSize;
unsigned int dataSize;
unsigned char noBuffers;
"""
lookup_dict = {1: 'RDB_PKG_ID_START_OF_FRAME',
2: 'RDB_PKG_ID_END_OF_FRAME',
3: 'RDB_PKG_ID_COORD_SYSTEM',
4: 'RDB_PKG_ID_COORD',
5: 'RDB_PKG_ID_ROAD_POS',
6: 'RDB_PKG_ID_LANE_INFO',
7: 'RDB_PKG_ID_ROADMARK',
8: 'RDB_PKG_ID_OBJECT_CFG',
9: 'RDB_PKG_ID_OBJECT_STATE',
10: 'RDB_PKG_ID_VEHICLE_SYSTEMS',
11: 'RDB_PKG_ID_VEHICLE_SETUP',
12: 'RDB_PKG_ID_ENGINE',
13: 'RDB_PKG_ID_DRIVETRAIN',
14: 'RDB_PKG_ID_WHEEL',
15: 'RDB_PKG_ID_PED_ANIMATION',
16: 'RDB_PKG_ID_SENSOR_STATE',
17: 'RDB_PKG_ID_SENSOR_OBJECT',
18: 'RDB_PKG_ID_CAMERA',
19: 'RDB_PKG_ID_CONTACT_POINT',
20: 'RDB_PKG_ID_TRAFFIC_SIGN',
21: 'RDB_PKG_ID_ROAD_STATE',
22: 'RDB_PKG_ID_IMAGE',
23: 'RDB_PKG_ID_LIGHT_SOURCE',
24: 'RDB_PKG_ID_ENVIRONMENT',
25: 'RDB_PKG_ID_TRIGGER',
26: 'RDB_PKG_ID_DRIVER_CTRL',
27: 'RDB_PKG_ID_TRAFFIC_LIGHT',
28: 'RDB_PKG_ID_SYNC',
29: 'RDB_PKG_ID_DRIVER_PERCEPTION',
30: 'RDB_PKG_ID_LIGHT_MAP',
31: 'RDB_PKG_ID_TONE_MAPPING',
32: 'RDB_PKG_ID_ROAD_QUERY',
33: 'RDB_PKG_ID_SCP',
34: 'RDB_PKG_ID_TRAJECTORY',
35: 'RDB_PKG_ID_DYN_2_STEER',
36: 'RDB_PKG_ID_STEER_2_DYN',
37: 'RDB_PKG_ID_PROXY',
38: 'RDB_PKG_ID_MOTION_SYSTEM',
39: 'RDB_PKG_ID_OCCLUSION_MATRIX',
40: 'RDB_PKG_ID_FREESPACE',
41: 'RDB_PKG_ID_DYN_EL_SWITCH',
42: 'RDB_PKG_ID_DYN_EL_DOF',
43: 'RDB_PKG_ID_IG_FRAME',
44: 'RDB_PKG_ID_RAY',
45: 'RDB_PKG_ID_RT_PERFORMANCE',
10000: 'RDB_PKG_ID_CUSTOM_SCORING',
10001: 'RDB_PKG_ID_CUSTOM_OBJECT_CTRL_TRACK',
10002: 'RDB_PKG_ID_CUSTOM_LIGHT_B',
10003: 'RDB_PKG_ID_CUSTOM_LIGHT_A',
10004: 'RDB_PKG_ID_CUSTOM_LIGHT_GROUP_B',
12000: 'RDB_PKG_ID_CUSTOM_AUDI_FORUM',
12100: 'RDB_PKG_ID_CUSTOM_OPTIX_START',
12101: 'RDB_PKG_ID_OPTIX_BUFFER',
12149: 'RDB_PKG_ID_CUSTOM_OPTIX_END',
12150: 'RDB_PKG_ID_CUSTOM_USER_A_START',
12174: 'RDB_PKG_ID_CUSTOM_USER_A_END',
12175: 'RDB_PKG_ID_CUSTOM_USER_B_START',
12200: 'RDB_PKG_ID_CUSTOM_USER_B_END'}
type_dict = {1: RDB_START_OF_FRAME_t,
2: RDB_END_OF_FRAME_t,
3: RDB_COORD_SYSTEM_t,
4: RDB_COORD_t,
5: RDB_ROAD_POS_t,
6: RDB_LANE_INFO_t,
7: RDB_ROADMARK_t,
8: RDB_OBJECT_CFG_t,
9: RDB_OBJECT_STATE_t,
10: RDB_VEHICLE_SYSTEMS_t,
11: RDB_VEHICLE_SETUP_t,
12: RDB_ENGINE_t,
13: RDB_DRIVETRAIN_t,
# 12174: RDB_CUSTOM_USER_A_END_t,
15: RDB_PED_ANIMATION_t,
10000: RDB_CUSTOM_SCORING_t,
17: RDB_SENSOR_OBJECT_t,
18: RDB_CAMERA_t,
19: RDB_CONTACT_POINT_t,
10004: RDB_CUSTOM_LIGHT_GROUP_B_t,
21: RDB_ROAD_STATE_t,
22: RDB_IMAGE_t,
23: RDB_LIGHT_SOURCE_t,
24: RDB_ENVIRONMENT_t,
25: RDB_TRIGGER_t,
26: RDB_DRIVER_CTRL_t,
27: RDB_TRAFFIC_LIGHT_t,
28: RDB_SYNC_t,
29: RDB_DRIVER_PERCEPTION_t,
# 30: RDB_LIGHT_MAP_t,
# 31: RDB_TONE_MAPPING_t,
32: RDB_ROAD_QUERY_t,
33: RDB_SCP_t,
34: RDB_TRAJECTORY_t,
35: RDB_DYN_2_STEER_t,
36: RDB_STEER_2_DYN_t,
37: RDB_PROXY_t,
38: RDB_MOTION_SYSTEM_t,
# 39: RDB_OCCLUSION_MATRIX_t,
# 12200: RDB_CUSTOM_USER_B_END_t,
41: RDB_DYN_EL_SWITCH_t,
42: RDB_DYN_EL_DOF_t,
43: RDB_IG_FRAME_t,
44: RDB_RAY_t,
45: RDB_RT_PERFORMANCE_t,
# 12100: RDB_CUSTOM_OPTIX_START_t,
# 12101: RDB_OPTIX_BUFFER_t,
14: RDB_WHEEL_t,
# 12175: RDB_CUSTOM_USER_B_START_t,
# 12000: RDB_CUSTOM_AUDI_FORUM_t,
16: RDB_SENSOR_STATE_t,
10001: RDB_CUSTOM_OBJECT_CTRL_TRACK_t,
10002: RDB_CUSTOM_LIGHT_B_t,
40: RDB_FREESPACE_t,
# 10003: RDB_CUSTOM_LIGHT_A_t,
# 12149: RDB_CUSTOM_OPTIX_END_t,
# 12150: RDB_CUSTOM_USER_A_START_t,
20: RDB_TRAFFIC_SIGN_t,
}
| 1.359375 | 1 |
ais-kml/descriptionService.py | ctwardy/jacobs-vault | 1 | 12768875 |
from datetime import datetime
from flask import Flask
from flask import request
from flask import send_file
try:
# restplus is dead: https://github.com/noirbizarre/flask-restplus/issues/770
from flask_restx import Resource, Api
from flask_restx import reqparse
except ImportError:
try:
from flask_restplus import Resource, Api
except ImportError:
import werkzeug
werkzeug.cached_property = werkzeug.utils.cached_property
from flask_restplus import Resource, Api
from flask_restplus import reqparse
from markupsafe import escape
import json
app = Flask(__name__)
api = Api(app)
# e.g.: http://127.0.0.1:5000/test?ts=1467244800&lat=45.0&lon=-176.0
@api.route('/test')
class TestService(Resource):
def get(self):
parser = reqparse.RequestParser()
parser.add_argument('ts', type=int, help='unix epoch seconds')
parser.add_argument('lat', type=float, help='datetime in unix time format')
parser.add_argument('lon', type=float, help='datetime in unix time format')
args = parser.parse_args()
dt = datetime.fromtimestamp(args["ts"])
result = {"response" : "HelloWorld"}
return result
@api.route('/get_image')
class ImageService(Resource):
def get(self):
icon1="icons/circle-xl.png"
return send_file(icon1, mimetype='image/png')
| 2.6875 | 3 |
uninstall.py | slmjy/oci-ansible-modules | 106 | 12768876 | #!/usr/bin/env python
# Copyright (c) 2018, 2019 Oracle and/or its affiliates.
# This software is made available to you under the terms of the GPL 3.0 license or the Apache 2.0 license.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Apache License v2.0
# See LICENSE.TXT for details.
"""
Oracle Cloud Infrastructure(OCI) Ansible Modules Uninstaller Script
===================================================================
This script deletes OCI Ansible modules, Oracle docs fragments and Oracle Ansible utility file from the ansible path.
To uninstall OCI Ansible modules, execute:
$ ./uninstall.py
To execute the script with debug messages, execute:
$ ./uninstall.py --debug
author: "<NAME> (@rohitChaware)"
"""
from __future__ import print_function
import argparse
import os.path
import shutil
import sys
try:
import ansible
ANSIBLE_IS_INSTALLED = True
except ImportError:
ANSIBLE_IS_INSTALLED = False
debug = False
def parse_cli_args():
parser = argparse.ArgumentParser(description="Script to uninstall oci-ansible-role")
parser.add_argument(
"--debug",
action="store_true",
default=False,
help="Send debug messages to STDERR",
)
return parser.parse_args()
def log(*args, **kwargs):
if debug:
print(*args, file=sys.stderr, **kwargs)
def main():
if not ANSIBLE_IS_INSTALLED:
print("Could not load ansible module.")
sys.exit(1)
global debug
args = parse_cli_args()
if args.debug:
debug = True
ansible_path = os.path.dirname(os.path.abspath(os.path.realpath(ansible.__file__)))
log("Ansible path: {}".format(ansible_path))
module_utils_path = os.path.join(ansible_path, "module_utils", "oracle")
log("Module utilities path: {}".format(module_utils_path))
document_fragments_path_old = os.path.join(
ansible_path, "utils", "module_docs_fragments"
)
document_fragments_path_new = os.path.join(ansible_path, "plugins", "doc_fragments")
if os.path.exists(document_fragments_path_new):
document_fragments_path = document_fragments_path_new
else:
document_fragments_path = document_fragments_path_old
log("Documentation fragments path: {}".format(document_fragments_path))
delete(module_utils_path)
oci_docs_fragments = []
for filename in os.listdir(document_fragments_path):
if filename.startswith("oracle"):
oci_docs_fragments.append(os.path.join(document_fragments_path, filename))
delete(oci_docs_fragments)
oracle_module_dir_path = os.path.join(ansible_path, "modules", "cloud", "oracle")
delete(oracle_module_dir_path)
print("Uninstalled OCI Ansible modules successfully.")
def delete(paths):
if type(paths) is not list:
paths = [paths]
for path in paths:
if os.path.isdir(path):
print("Deleting directory {}".format(path))
shutil.rmtree(path)
elif os.path.isfile(path):
print("Deleting {}".format(path))
os.remove(path)
if __name__ == "__main__":
main()
| 2.171875 | 2 |
setup.py | 51pricing/toplines | 0 | 12768877 | <reponame>51pricing/toplines<filename>setup.py
#!/usr/bin/env python
# coding=utf-8
from setuptools import setup, find_packages
setup(
name = 'toplines',
version = '0.1',
description = 'Get top lines from a large file',
author = 'haierol',
author_email = '<EMAIL>',
maintainer = '51pricing',
maintainer_email = '<EMAIL>',
license = 'MIT License',
packages = find_packages(),
platforms = ["all"],
url = 'https://github.com/51pricing/toplines',
classifiers = [
'Development Status :: 4 - Beta',
'Operating System :: OS Independent',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: Implementation',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Software Development :: Libraries'
],
install_requires = [
'click',
],
entry_points = {
'console_scripts': [
'toplines = toplines.main:cmd'
]
},
) | 1.4375 | 1 |
accounts/admin.py | mugagambi/retail-system | 1 | 12768878 | <reponame>mugagambi/retail-system
from django.contrib import admin
from accounts import models
class IncomeAccountAdmin(admin.ModelAdmin):
list_display = ('name', 'amount', 'created_at')
date_hierarchy = 'created_at'
class ExpenditureAccountAdmin(admin.ModelAdmin):
list_display = ('name', 'amount', 'created_at')
date_hierarchy = 'created_at'
# Register your models here.
admin.site.register(models.IncomeAccount, IncomeAccountAdmin)
admin.site.register(models.ExpenditureAccount, ExpenditureAccountAdmin)
| 2.171875 | 2 |
blockchain_adapter/__init__.py | bountyful/bountyfulcoins | 0 | 12768879 | <reponame>bountyful/bountyfulcoins
import logging
import unittest
import requests
logger = logging.getLogger('blockchain_adapter')
URL_BASE = 'https://blockchain.info'
URL_BALANCE = '{base}/q/addressbalance/{addr}'
CONFIRMATIONS_MIN = 6 # min recommended by blockchain docs
class BlockChainAdapter(object):
"""
A simple wrapper around requests to query blockchain.info API.
Currently supports getting a specified address balance
"""
def __init__(self, *args, **kwargs):
self.url_base = kwargs.get('URL_BASE', URL_BASE)
self.url_balance = kwargs.get('URL_BALANCE', URL_BALANCE)
def get_balance_url(self, addr):
return self.url_balance.format(base=self.url_base, addr=addr)
def get_balance(self, address):
logger.debug('Entering get_balance')
res = requests.get(self.get_balance_url(address), params={
'confirmations': CONFIRMATIONS_MIN})
if not res.ok:
logger.error('Could not get balance, server returned: %s - %s',
res.status_code, res.content)
return None
return float(res.content)
blockchain = BlockChainAdapter()
class TestBlockChainAdapter(unittest.TestCase):
good_address = '1F1tAaz5x1HUXrCNLbtMDqcw6o5GNn4xqX'
bad_address = 'NOT_REAL'
def test_bad_address(self):
res = blockchain.get_balance(self.bad_address)
self.assertIsNone(res, "get_balance should have returned 'None'"
" for this address")
def test_good_address(self):
res = blockchain.get_balance(self.good_address)
self.assertIsInstance(res, float, "get_balance should have returned"
" a float number fot this address")
if __name__ == '__main__':
unittest.main()
| 3.109375 | 3 |
wisc/wisc-coords-local.py | beninato8/pokemon-go | 0 | 12768880 | from geopy.distance import geodesic
from pprint import pprint
from itertools import permutations
from tqdm import tqdm
import gmplot
from math import cos, sin, atan2, sqrt
import time
import re
home = (43.077589, -89.414075)
with open('wisc-coords-local.txt', 'r') as f:
text = f.read()
def dist_between(coords, home=home):
return geodesic(coords, home).meters
coords = dict()
for i, x in enumerate(text.split('\n')):
if x == '':
continue
tmp = re.split(r'0{4},', x)
lat = float(tmp[0])
lon = float(tmp[1])
dist = dist_between((lat, lon))
if dist > 4700:
continue
name = tmp[2].lower()
coords[name] = dict()
coords[name]['coords'] = (lat, lon)
coords[name]['distance'] = dist
# print(len(coords))
# exit()
# pprint(coords.items())
for k,v in sorted(coords.items(), key = lambda k: k[1]['distance']):
print(f"{v['coords'][0]},{v['coords'][1]},{k}")
# exit()
print("43.074757,-89.380006,afk 4 stop")
print("43.076735,-89.413106,afk 2 stop 2 gym")
exit()
def center_geolocation(coords):
lats = []
lons = []
for k,v in coords.items():
lats.append(v['coords'][0])
lons.append(v['coords'][1])
return (sum(lats)/len(lats), sum(lons)/len(lons))
def fac(n):
if n < 2:
return 1
return n * fac(n-1)
lats = [v['coords'][0] for v in coords.values()]
lons = [v['coords'][1] for v in coords.values()]
center = center_geolocation(coords)
gmap3 = gmplot.GoogleMapPlotter(center[0], center[1], 13)
gmap3.scatter(lats, lons, '# FF0000',
size = 40, marker = False )
# gmap3.plot(lats, lons,
# 'cornflowerblue', edge_width = 2.5)
gmap3.draw("/Users/Nicholas/Github/pokemon-go/wisc/map.html") | 2.875 | 3 |
tests/test_cmd.py | KrishanBhasin/giraffez | 122 | 12768881 | # -*- coding: utf-8 -*-
import pytest
from giraffez._teradata import RequestEnded, StatementEnded, StatementInfoEnded
import giraffez
from giraffez.constants import *
from giraffez.errors import *
from giraffez.types import *
class ResultsHelper:
"""
Helps to emulate how exceptions are raised when working with the CLIv2 so
that the control flow will be adequately represented.
"""
def __init__(self, rows):
self.first = True
self.index = 0
self.rows = rows
def get(self):
if self.first:
self.first = False
raise StatementInfoEnded
if self.index >= len(self.rows):
raise RequestEnded
row = self.rows[self.index]
self.index += 1
return row
def __call__(self):
return self.get()
@pytest.mark.usefixtures('config', 'context')
class TestCmd(object):
def test_results(self, mocker):
connect_mock = mocker.patch('giraffez.cmd.TeradataCmd._connect')
mock_columns = mocker.patch("giraffez.cmd.Cursor._columns")
cmd = giraffez.Cmd()
query = "select * from db1.info"
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns.return_value = columns
rows = [
["value1", "value2", "value3"],
["value1", "value2", "value3"],
["value1", "value2", "value3"],
]
expected_rows = [
{"col1": "value1", "col2": "value2", "col3": "value3"},
{"col1": "value1", "col2": "value2", "col3": "value3"},
{"col1": "value1", "col2": "value2", "col3": "value3"},
]
cmd.cmd = mocker.MagicMock()
cmd.cmd.fetchone.side_effect = ResultsHelper(rows)
result = list(cmd.execute(query))
assert [x.items() for x in result] == expected_rows
cmd._close()
# This ensures that the config was proper mocked
connect_mock.assert_called_with('db1', 'user123', '<PASSWORD>', None, None)
def test_invalid_credentials(self, mocker):
connect_mock = mocker.patch('giraffez.cmd.TeradataCmd._connect')
connect_mock.side_effect = InvalidCredentialsError("test")
with pytest.raises(InvalidCredentialsError):
cmd = giraffez.Cmd(protect=True)
cmd._close()
@pytest.mark.usefixtures('config', 'context', 'tmpfiles')
class TestInsert(object):
def test_insert_from_file(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
rows = []
for i in range(100):
rows.append("|".join(["value1", "value2", "value3"]))
f.write("\n".join(rows))
with giraffez.Cmd() as cmd:
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
assert result.get('count') == 100
def test_insert_from_file_quoted(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
rows = []
for i in range(99):
rows.append("|".join(["value1", "value2", "value3"]))
rows.append("|".join(["value1",'"value2|withpipe"', "value3"]))
f.write("\n".join(rows))
with giraffez.Cmd() as cmd:
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
assert result.get('count') == 100
def test_insert_from_file_single_quoted(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
rows = []
for i in range(99):
rows.append("|".join(["value1", "value2", "value3"]))
rows.append("|".join(["value1","'value2|withpipe'", "value3"]))
f.write("\n".join(rows))
with giraffez.Cmd() as cmd:
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|", quotechar="'")
assert result.get('count') == 100
def test_insert_from_file_nonstandard_quote(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
rows = []
for i in range(99):
rows.append("|".join(["value1", "value2", "value3"]))
rows.append("|".join(['va"lue1','$value2|withpipe"and"quote$', "value3"]))
f.write("\n".join(rows))
with giraffez.Cmd() as cmd:
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|", quotechar="$")
assert result.get('count') == 100
def test_insert_from_file_error(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
f.write("|".join(["value1", "value2", "value3", "value4"]))
f.write("\n")
with giraffez.Cmd() as cmd:
cmd.panic = False
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
def test_insert_from_file_error_panic(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3"]))
f.write("\n")
f.write("|".join(["value1", "value2", "value3", "value4"]))
f.write("\n")
with giraffez.Cmd() as cmd:
with pytest.raises(GiraffeEncodeError):
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
print(result)
def test_insert_from_file_invalid_header(self, mocker, tmpfiles):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
# Invalid column (blank string)
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3", "", ""]))
f.write("\n")
f.write("|".join(["value1", "value2", "value3"]))
f.write("\n")
with giraffez.Cmd() as cmd:
with pytest.raises(GiraffeError):
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
print(result)
# Invalid column (wrong name)
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col4"]))
f.write("\n")
f.write("|".join(["value1", "value2", "value3"]))
f.write("\n")
with giraffez.Cmd() as cmd:
with pytest.raises(GiraffeError):
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
print(result)
# Too many columns (duplicate name)
with open(tmpfiles.load_file, 'w') as f:
f.write("|".join(["col1", "col2", "col3", "col3"]))
f.write("\n")
f.write("|".join(["value1", "value2", "value3"]))
f.write("\n")
with giraffez.Cmd() as cmd:
with pytest.raises(GiraffeEncodeError):
result = cmd.insert("db1.test", tmpfiles.load_file, delimiter="|")
print(result)
def test_insert_insert_no_specify_fields(self, mocker):
mock_connect = mocker.patch("giraffez.cmd.TeradataCmd._connect")
mock_execute = mocker.patch("giraffez.cmd.TeradataCmd.execute")
columns = Columns([
("col1", VARCHAR_NN, 50, 0, 0),
("col2", VARCHAR_N, 50, 0, 0),
("col3", VARCHAR_N, 50, 0, 0),
])
mock_columns = mocker.patch("giraffez.cmd.TeradataCmd.fetch_columns")
mock_columns.return_value = columns
rows = [
("value1", "value3"),
("value1", "value3"),
("value1", "value3"),
]
with giraffez.Cmd() as cmd:
with pytest.raises(GiraffeEncodeError):
cmd.insert("db1.test", rows)
| 2.28125 | 2 |
setup.py | mrexmelle/setuptools-example-hello | 0 | 12768882 | <reponame>mrexmelle/setuptools-example-hello
#!/bin/python
from setuptools import setup, find_packages
setup(
name="setuptools-example-hello",
version="1.0",
packages=find_packages(),
url="https://github.com/mrexmelle/setuptools-example-hello",
entry_points={
'console_scripts': [
'setuptools-hello = hello.main:main',
]}
)
| 1.414063 | 1 |
tasks/data.py | jrnold/acw_battle_data | 15 | 12768883 | #!/usr/bin/env python3
"""Command line tasks to build and deploy the ACW Battle Data."""
import os
import shutil
from os import path
import logging
from invoke import task
LOGGER = logging.getLogger(__name__)
@task
def setup(ctx):
"""Setup directory structure."""
os.makedirs(ctx.dst, exist_ok=True)
@task(setup)
def unit_sizes(ctx):
"""Build unit size data."""
shutil.copy(path.join(ctx.src, 'rawdata', 'unit_sizes', 'unit_sizes.csv'),
ctx.dst)
shutil.copy(
path.join(ctx.src, 'rawdata', 'unit_sizes', 'eicher_units_table.csv'),
ctx.dst)
@task(setup)
def aad(ctx):
"""Build the AAD CWSAC initial data."""
ctx.run(f"{ctx.python} -m acwbattledata.aad {ctx.src} {ctx.dst}")
@task(setup)
def cwsac(ctx):
"""Build the CWSAC Report I data."""
ctx.run(f"{ctx.python} -m acwbattledata.cwsac {ctx.src} {ctx.dst}")
@task(setup)
def cws2(ctx):
"""Build the CWSAC Report II data."""
ctx.run(f"{ctx.python} -m acwbattledata.cws2 {ctx.src} {ctx.dst}")
@task
def download_cwss(ctx):
"""Download CWSS data."""
files = ('old/battle.xml',
'old/persons.xml',
'old/battleunitlink.xml',
'new/tsv/Regiments_Unitz.tsv',
'new/tsv/State_Name.tsv',
'new/tsv/Unititle.tsv',
'new/tsv/Contitle.tsv',
'new/tsv/Category.tsv')
for file_ in files:
basefilename = path.basename(file_)
dstfile = path.join(ctx.cwss.data_dir, basefilename)
if not os.path.exists(dstfile):
ctx.run(f"aws s3 cp --region {ctx.cwss.s3.region} "
f" s3://{ctx.cwss.s3.bucket}/{file_} "
f" {dstfile} ")
else:
print(f"{dstfile} exists")
@task(pre=[setup, download_cwss])
def cwss(ctx):
"""Build the CWSS data."""
ctx.run(f"{ctx.python} -m acwbattledata.cwss "
f" {ctx.src} {ctx.cwss.data_dir} {ctx.dst}")
@task(pre=[setup, aad, cwsac, cws2, cwss, unit_sizes])
def nps(ctx):
"""Build the NPS Combined Data."""
ctx.run(f"{ctx.Rscript} bin/build_nps_combined.R {ctx.src} {ctx.dst}")
@task(setup)
def bodart(ctx):
"""Build data from Bodart."""
ctx.run(f"{ctx.python} -m acwbattledata.bodart {ctx.src} {ctx.dst}")
@task(setup)
def dyer(ctx):
"""Build data from Dyer (1908)."""
ctx.run(f"{ctx.python} -m acwbattledata.dyer {ctx.src} {ctx.dst}")
@task(setup)
def fox(ctx):
"""Build data from Fox."""
ctx.run(f"{ctx.python} -m acwbattledata.fox {ctx.src} {ctx.dst}")
@task(setup)
def greer(ctx):
"""Build weekly casualty data from Greer."""
ctx.run(f"{ctx.python} -m acwbattledata.greer {ctx.src} {ctx.dst}")
@task(setup)
def kennedy(ctx):
"""Build casualty data from Kennedy."""
ctx.run(f"{ctx.python} -m acwbattledata.kennedy {ctx.src} {ctx.dst}")
@task(setup)
def livermore(ctx):
"""Build casualty data from Kennedy."""
ctx.run(f"{ctx.Rscript} bin/build_livermore.R "
f"{ctx.src} {ctx.dst}")
ctx.run(f"{ctx.python} -m acwbattledata.livermore_to_cwsac "
f"{ctx.src} {ctx.dst}")
@task(setup)
def thorpe(ctx):
"""Build Thorpe data."""
ctx.run(f"{ctx.python} -m acwbattledata.thorpe {ctx.src} {ctx.dst}")
@task(setup)
def nyt(ctx):
"""Build New York Times chronology data."""
shutil.copy(
path.join(ctx.src, "rawdata", "nytimes_civil_war_chronology",
"nytimes_civil_war_chronology.json"), ctx.dst)
@task(setup)
def phisterer(ctx):
"""Build phisterer data."""
ctx.run(f"{ctx.python} -m acwbattledata.phisterer {ctx.src} {ctx.dst}")
@task(setup)
def shenandoah(ctx):
"""Build the NPS Shenandoah Report Data."""
ctx.run(f"{ctx.python} -m acwbattledata.shenandoah {ctx.src} {ctx.dst}")
@task(pre=[setup, unit_sizes])
def clodfelter(ctx):
"""Build the Clodfelter data."""
ctx.run(f"{ctx.python} -m acwbattledata.clodfelter {ctx.src} {ctx.dst}")
ctx.run(f"{ctx.Rscript} bin/update_clodfelter_forces.R "
f"{ctx.src} {ctx.dst}")
@task(setup)
def cdb90(ctx):
"""Build the CDB90 data."""
ctx.run(f"{ctx.python} -m acwbattledata.cdb90 {ctx.src} {ctx.dst}")
@task(setup)
def civilwarorg(ctx):
"""Build the civilwar.org data."""
ctx.run(f"{ctx.python} -m acwbattledata.civilwarorg {ctx.src} {ctx.dst}")
@task(setup)
def misc(ctx):
"""Build some miscellaneous datasets."""
ctx.run(f"{ctx.python} -m acwbattledata.misc {ctx.src} {ctx.dst}")
@task(setup)
def battlemisc(ctx):
"""Build miscellaneous battle data."""
ctx.run(f"{ctx.python} -m acwbattledata.battlemisc {ctx.src} {ctx.dst}")
@task(setup)
def ships(ctx):
"""Build the dataset on ships."""
ctx.run(f"{ctx.python} -m acwbattledata.ships {ctx.src} {ctx.dst}")
@task(setup)
def wikipedia(ctx):
"""Build wikipedia data."""
ctx.run(f"{ctx.python} -m acwbattledata.wikipedia {ctx.src} {ctx.dst}")
@task(setup)
def eicher(ctx):
"""Build Eicher datasets."""
ctx.run(f"{ctx.python} -m acwbattledata.eicher {ctx.src} {ctx.dst}")
@task(setup)
def download_wikipedia(ctx):
"""Download wikipedia data."""
outdir = path.join(ctx.src, 'wikipedia')
ctx.run(f"{ctx.python} bin/download_wikipedia.py {ctx.src} {outdir}")
DATA_TASKS = [
unit_sizes, aad, shenandoah, cwsac, cws2, cwss, nps, bodart, dyer, fox,
greer, kennedy, livermore, thorpe, nyt, phisterer, clodfelter, cdb90,
ships, civilwarorg, wikipedia, eicher, misc, battlemisc]
"""Tasks to run before build."""
@task(setup)
def datapackage(ctx):
"""Build datapackage.json"""
ctx.run(f"{ctx.python} -m acwbattledata.datapackage {ctx.src} {ctx.dst}")
@task(pre=[*DATA_TASKS, datapackage])
def build(ctx):
"""Build all datasets."""
pass
| 2.40625 | 2 |
2017/spring/s1/samples/example3/main.py | Rouzbeh-School/Python101 | 1 | 12768884 | #!/usr/bin/python3
def main():
buffersize = 50000
infile = open('obama.png', 'rb')
outfile = open('obama-copy.png', 'wb')
buffer = infile.read(buffersize)
while len(buffer):
outfile.write(buffer)
print('.', end = '')
buffer = infile.read(buffersize)
print()
print('Done.')
if __name__ == "__main__": main()
| 3.0625 | 3 |
python/191116_A.py | Xanonymous-GitHub/main | 1 | 12768885 | a, b, c = sorted(list(map(float, input().split())))
if(((a+b) > c)and((c-a) < b)):
print('It is a triangle.')
if(((a**2)+(b**2)) == (c**2)):
print('-> also a Right triangle.')
if(not(a-b)):
print('-> Wow it\'s a Isosceles right triangle!')
else:
print('not a triangle.') | 3.90625 | 4 |
MiniMaxAgent.py | youssef-sherif/riskgame | 0 | 12768886 | from Agent import Agent
from Color import Color
from State import State
class MiniMaxAgent(Agent):
def make_decision(self, board):
self.receive_armies(board)
state = State(board, self.available_armies_count, 1)
place_armies_result, _ = self.maximize_place_armies(state, -999999, 999999)
attack_result, _ = self.maximize_attack(place_armies_result.parent, -999999, 999999)
if attack_result is not None:
board.bulk_update(attack_result.parent.board)
def maximize_place_armies(self, state, alpha, beta):
max_child, max_heuristic = (None, -999999)
if state.is_terminal(self.get_opponent_color()):
return max_child, self.evaluate_heuristic(state.board)
for child_node in self.get_place_armies_children(state):
self.receive_armies(child_node.state.board)
_, new_heuristic = self.minimize_place_armies(child_node.state, alpha, beta)
# Update heuristic
if new_heuristic > max_heuristic:
max_child, max_heuristic = child_node, new_heuristic
# Update alpha
if new_heuristic > alpha:
alpha = new_heuristic
# Break from loop to prune if no chance for better results
if alpha >= beta:
break
return max_child, max_heuristic #return max child
def minimize_place_armies(self, state, alpha, beta):
min_child, min_heuristic = (None, 999999)
if state.is_terminal(self.get_opponent_color()):
return min_child, self.evaluate_heuristic(state.board)
for child_node in self.get_place_armies_children(state):
self.receive_armies(child_node.state.board)
_, new_heuristic = self.maximize_place_armies(child_node.state, alpha, beta)
# Update heuristic
if new_heuristic < min_heuristic:
min_child, min_heuristic = child_node, new_heuristic
# Update alpha
if new_heuristic < beta:
beta = new_heuristic
# Break from loop to prune if no chance for better results
if alpha >= beta:
break
return min_child, min_heuristic
def maximize_attack(self, state, alpha, beta):
max_child, max_heuristic = (None, -999999)
if state.is_terminal(self.get_opponent_color()):
return max_child, self.evaluate_heuristic(state.board)
for child_node in self.get_attacking_children(state):
self.receive_armies(child_node.state.board)
_, new_heuristic = self.minimize_attack(child_node.state, alpha, beta)
# Update heuristic
if new_heuristic > max_heuristic:
max_child, max_heuristic = child_node, new_heuristic
# Update alpha
if new_heuristic > alpha:
alpha = new_heuristic
# Break from loop to prune if no chance for better results
if alpha >= beta:
break
return max_child, max_heuristic #return max child
def minimize_attack(self, state, alpha, beta):
min_child, min_heuristic = (None, 999999)
if state.is_terminal(self.get_opponent_color()):
return min_child, self.evaluate_heuristic(state.board)
for child_node in self.get_attacking_children(state):
self.receive_armies(child_node.state.board)
_, new_heuristic = self.maximize_attack(child_node.state, alpha, beta)
# Update heuristic
if new_heuristic < min_heuristic:
min_child, min_heuristic = child_node, new_heuristic
# Update alpha
if new_heuristic < beta:
beta = new_heuristic
# Break from loop to prune if no chance for better results
if alpha >= beta:
break
return min_child, min_heuristic
| 2.96875 | 3 |
otree_manager/otree_manager/om/views/legal.py | chkgk/otree_manager | 2 | 12768887 | from django.shortcuts import render
from django.http import HttpResponseRedirect
from django.urls import reverse
from django.contrib.flatpages.forms import FlatpageForm
from django.contrib.flatpages.models import FlatPage
from django.contrib.auth.decorators import login_required
"""Views for editing legal pages."""
# While there is some code duplication here, this is intentional
# to keep flexibility for future changes to these parts of the app
# consider this temporary.
@login_required
def imprint_edit(request):
"""Imprint and contact information page"""
if not request.user.is_superuser:
return HttpResponseRedirect(reverse('index'))
page_model = FlatPage.objects.get(url='/legal/imprint/')
if request.method == 'POST':
form = FlatpageForm(request.POST or None, instance=page_model)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('imprint'))
else:
form = FlatpageForm(instance=page_model)
return render(request, 'om/legal/edit_imprint.html', {'form': form})
@login_required
def privacy_edit(request):
"""Privacy statements page"""
if not request.user.is_superuser:
return HttpResponseRedirect(reverse('index'))
page_model = FlatPage.objects.get(url='/legal/privacy/')
if request.method == 'POST':
form = FlatpageForm(request.POST or None, instance=page_model)
if form.is_valid():
form.save()
return HttpResponseRedirect(reverse('privacy'))
else:
form = FlatpageForm(instance=page_model)
return render(request, 'om/legal/edit_privacy.html', {'form': form}) | 2.25 | 2 |
airdrop/testcases/functional_testcases/test_airdrop_handler.py | rajeevbbqq/airdrop-services | 0 | 12768888 | <filename>airdrop/testcases/functional_testcases/test_airdrop_handler.py
import unittest
import json
from unittest.mock import patch
from http import HTTPStatus
from datetime import datetime, timedelta
from airdrop.infrastructure.repositories.airdrop_repository import AirdropRepository
from airdrop.application.handlers.airdrop_handlers import get_airdrop_schedules, user_eligibility, user_registration, airdrop_window_claims, airdrop_window_claim_status, user_notifications
from airdrop.infrastructure.models import UserRegistration,Airdrop,AirdropWindow,UserReward,ClaimHistory,UserNotifications
class TestAirdropHandler(unittest.TestCase):
airdrop_id = None
airdrop_window_id = None
def setUp(self):
self.tearDown()
org_name = 'SINGNET'
token_name = 'AGIX'
token_type = 'CONTRACT'
portal_link = 'https://ropsten-airdrop.singularitynet.io/'
documentation_link = 'https://ropsten-airdrop.singularitynet.io/'
description = 'This is a test airdrop'
github_link = 'https://github.com/singnet/airdrop-services'
registration_start_date = datetime.utcnow() - timedelta(days=2)
registration_end_date = datetime.utcnow() + timedelta(days=30)
claim_start_date = datetime.utcnow() - timedelta(days=2)
claim_end_date = datetime.utcnow() + timedelta(days=30)
now = datetime.utcnow()
contract_address = '0x5e94577b949a56279637ff74dfcff2c28408f049'
token_address = '<KEY>'
user_address = '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8'
stakable_token_name = 'AGIX'
occam_contract_address = '0x6e94577b949a56279637ff74dfcff2c28408f049'
occam_token_address = '<KEY>'
occam_user_address = '0xEA6741dDe714fd979de3EdF0F56AA9716B898ec8'
occam_stakable_token_name = 'AGIX'
airdrop_repository = AirdropRepository()
airdrop = airdrop_repository.register_airdrop(
token_address, org_name, token_name, token_type, contract_address, portal_link, documentation_link, description, github_link, stakable_token_name)
global airdrop_id
airdrop_id = airdrop.id
airdrop_windows = airdrop_repository.register_airdrop_window(airdrop_id=airdrop.id, airdrop_window_name='Airdrop Window 1', description='Long description', registration_required=True,
registration_start_period=registration_start_date, registration_end_period=registration_end_date, snapshot_required=True, claim_start_period=claim_start_date, claim_end_period=claim_end_date, total_airdrop_tokens=1000000)
global airdrop_window_id
airdrop_window_id = airdrop_windows.id
nunet_occam_airdrop = airdrop_repository.register_airdrop(
occam_token_address, org_name, token_name, token_type, contract_address, portal_link, documentation_link, description, github_link, occam_stakable_token_name)
airdrop_repository.register_airdrop_window(airdrop_id=nunet_occam_airdrop.id, airdrop_window_name='Occam Window 1', description='Long description', registration_required=True,
registration_start_period=registration_start_date, registration_end_period=registration_end_date, snapshot_required=True, claim_start_period=claim_start_date, claim_end_period=claim_end_date, total_airdrop_tokens=1000000)
@patch("common.utils.Utils.report_slack")
def test_get_airdrop_schedules(self, mock_report_slack):
event = {
"pathParameters": {
"token_address": "<KEY>"
}
}
result = get_airdrop_schedules(event, None)
airdrop_schedules = result['body']
self.assertIsNotNone(airdrop_schedules)
@patch("common.utils.Utils.report_slack")
def test_get_airdrop_window_eligibility(self, mock_report_slack):
address = '0x5e94577b949a56279637ff74dfcff2c28408f049'
event = {
"body": json.dumps({
"address": address,
"airdrop_id": airdrop_id,
"airdrop_window_id": airdrop_window_id
})
}
result = user_eligibility(event, None)
result = json.loads(result['body'])
user_eligibility_object = result['data']
self.assertIn(user_eligibility_object['is_eligible'], [True, False])
self.assertIn(
user_eligibility_object['is_already_registered'], [True, False])
self.assertIn(
user_eligibility_object['is_airdrop_window_claimed'], [True, False])
self.assertEqual(user_eligibility_object['user_address'], address)
self.assertEqual(user_eligibility_object['airdrop_id'], airdrop_id)
self.assertEqual(
user_eligibility_object['airdrop_window_id'], airdrop_window_id)
@patch("common.utils.Utils.report_slack")
@patch('common.utils.recover_address')
def test_get_airdrop_window_user_registration(self, mock_recover_address, mock_report_slack):
address = '0x5e94577b949a56279637ff74dfcff2c28408f049'
mock_recover_address.return_value = address
event = {
"body": json.dumps({
"address": address,
"airdrop_id": airdrop_id,
"airdrop_window_id": airdrop_window_id,
"signature": "9e05e94577b949a56279637ff74dfcff2c28408f049"
})
}
result = user_registration(event, None)
result = json.loads(result['body'])
self.assertEqual(result['status'], HTTPStatus.OK.value)
@patch("common.utils.Utils.report_slack")
@patch('common.utils.recover_address')
@patch('airdrop.infrastructure.repositories.user_repository.UserRepository.check_rewards_awarded')
@patch('airdrop.application.services.airdrop_services.AirdropServices.get_signature_for_airdrop_window_id')
@patch('airdrop.infrastructure.repositories.airdrop_repository.AirdropRepository.get_airdrop_window_claimable_info')
@patch('airdrop.infrastructure.repositories.airdrop_repository.AirdropRepository.is_claimed_airdrop_window')
def test_airdrop_window_claim(self, mock_is_claimed_airdrop_window, mock_get_airdrop_window_claimable_info, mock_get_signature_for_airdrop_window_id, mock_check_rewards_awarded, mock_recover_address, mock_report_slack):
address = '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8'
airdrop_claim_signature = '958449C28930970989dB5fFFbEdd9F44989d33a958B5fF989dB5f33a958F'
contract_address = '0x5e94577b949a56279637ff74dfcff2c28408f049'
token_address = '<KEY>'
staking_contract_address = '0x5e94577b949a56279637ff74dfcff2c28408f049'
mock_recover_address.return_value = address
mock_is_claimed_airdrop_window.return_value = {}
mock_check_rewards_awarded.return_value = True, 1000
mock_get_signature_for_airdrop_window_id.return_value = airdrop_claim_signature
mock_get_airdrop_window_claimable_info.return_value = 100, address, contract_address, token_address, staking_contract_address
mock_recover_address.return_value = address
mock_check_rewards_awarded.value = True, 1000
event = {
"body": json.dumps({
"address": address,
"airdrop_id": str(airdrop_id),
"airdrop_window_id": str(airdrop_window_id),
"signature": "9e05e94577b949a56279637ff74dfcff2c28408f049",
"token_address": token_address,
"contract_address": contract_address,
"staking_contract_address": staking_contract_address
})
}
result = airdrop_window_claims(event, None)
result = json.loads(result['body'])
claim_signature_object = result['data']
self.assertEqual(result['status'], HTTPStatus.OK.value)
self.assertEqual(claim_signature_object['user_address'], address)
@patch("common.utils.Utils.report_slack")
def test_airdrop_window_claim_update_txn(self, mock_report_slack):
address = '0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8'
airdrop_claim_signature = '958449C28930970989dB5fFFbEdd9F44989d33a958B5fF989dB5f33a958F'
event = {
"body": json.dumps({
"address": address,
"airdrop_id": airdrop_id,
"airdrop_window_id": airdrop_window_id,
"amount": "100",
"txn_hash": "9e05e94577b949a56279637ff74dfcff2c28408f049"
})
}
result = airdrop_window_claim_status(event, None)
result = json.loads(result['body'])
self.assertIsNotNone(result)
@patch("common.utils.Utils.report_slack")
def test_user_notifications(self, mock_report_slack):
event = {
"body": json.dumps({
"email": "<EMAIL>"
})
}
result = user_notifications(event, None)
result = json.loads(result['body'])
self.assertIsNotNone(result)
def test_fetch_total_eligibility_amount(self):
#delete it all !!!!
AirdropRepository().session.query(ClaimHistory).delete()
AirdropRepository().session.query(UserRegistration).delete()
AirdropRepository().session.query(UserReward).delete()
AirdropRepository().session.query(AirdropWindow).delete()
AirdropRepository().session.query(Airdrop).delete()
airdrop_repository = AirdropRepository()
airdrop = airdrop_repository.register_airdrop(
"0xEA674fdDe714fd979de3EdF0F56AA9716B898ec8", "TEST", "TEST", "token_type",
"0x2fc8ae60108765056ff63a07843a5b7ec9ff89ef", "portal_link",
"documentation_link",
"description", "github_link", "stakable_token_name")
registration_start_date = datetime.utcnow() - timedelta(days=2)
registration_end_date = datetime.utcnow() + timedelta(days=30)
claim_start_date = datetime.utcnow() - timedelta(days=5)
claim_end_date = datetime.utcnow() + timedelta(days=30)
airdrop_window1 = airdrop_repository.register_airdrop_window(airdrop_id=airdrop.id,
airdrop_window_name='Airdrop Window 1',
description='Long description',
registration_required=True,
registration_start_period=registration_start_date,
registration_end_period=registration_end_date,
snapshot_required=True,
claim_start_period=claim_start_date,
claim_end_period=claim_end_date,
total_airdrop_tokens=1000000)
airdrop_window2 = airdrop_repository.register_airdrop_window(airdrop_id=airdrop.id,
airdrop_window_name='Airdrop Window 2',
description='Long description',
registration_required=True,
registration_start_period=registration_start_date,
registration_end_period=registration_end_date,
snapshot_required=True,
claim_start_period=datetime.utcnow() - timedelta(days=2),
claim_end_period=claim_end_date,
total_airdrop_tokens=1000000)
airdrop_window3 = airdrop_repository.register_airdrop_window(airdrop_id=airdrop.id,
airdrop_window_name='Airdrop Window 3',
description='Long description',
registration_required=True,
registration_start_period=registration_start_date,
registration_end_period=registration_end_date,
snapshot_required=True,
claim_start_period=datetime.utcnow() + timedelta(
days=20),
claim_end_period=datetime.utcnow() + timedelta(
days=25),
total_airdrop_tokens=1000000)
#now user has rewards for all three windows
airdrop_repository.register_user_rewards(airdrop.id,airdrop_window1.id,100,
'0xCc3cD60FF9936B7C9272a649b24f290ADa562469',1,1)
airdrop_repository.register_user_rewards(airdrop.id, airdrop_window2.id, 100,
'0xCc3cD60FF9936B7C9272a649b24f290ADa562469',1,1)
airdrop_repository.register_user_rewards(airdrop.id, airdrop_window3.id, 100,
'0xCc3cD60FF9936B7C9272a649b24f290ADa562469',1,1)
#User has not registered for any window
result = airdrop_repository.fetch_total_eligibility_amount(airdrop.id,
'0xCc3cD60FF9936B7C9272a649b24f290ADa562469')
self.assertEqual(result,0)
# User has registered for the first window
airdrop_repository.register_user_registration(airdrop_window1.id,'0xCc3cD60FF9936B7C9272a649b24f290ADa562469')
result = airdrop_repository.fetch_total_eligibility_amount(airdrop.id,
'0xCc3cD60FF9936B7C9272a649b24f290ADa562469')
# User has registrations for the third window,but claim is not yet open for this window
# hence this window should never be considered , total eligbile amount is applicable only for past claim
# or acitve claim windows
airdrop_repository.register_user_registration(airdrop_window3.id, '0xCc3cD60FF9936B7C9272a649b24f290ADa562469')
result = airdrop_repository.fetch_total_eligibility_amount(airdrop.id,
'0xCc3cD60FF9936B7C9272a649b24f290ADa562469')
self.assertEqual(result, 100)
# User has registrations for the 2nd window which is active
airdrop_repository.register_user_registration(airdrop_window2.id, '0xCc3cD60FF9936B7C9272a649b24f290ADa562469')
result = airdrop_repository.fetch_total_eligibility_amount(airdrop.id,
'0xCc3cD60FF9936B7C9272a649b24f290ADa562469')
self.assertEqual(result, 200)
#assuming no claim has happend til now , total_rewards user can claim = 200
rewards_for_claim_raw = airdrop_repository.fetch_total_rewards_amount(airdrop.id,
'0xCc3cD60FF9936B7C9272a649b24f290ADa562469')
self.assertEqual(200, rewards_for_claim_raw[0]['total_rewards'])
#make an entry in the claim table for window 2 => all amount till this point has been claimed
#hence rewards to be claimed is zero for current time , however total eligibility was always 200
airdrop_repository.register_claim_history(airdrop.id,airdrop_window2.id,
'0xCc3cD60FF9936B7C9272a649b24f290ADa562469',200,0,'PENDING',
'transaction_hash')
rewards_for_claim_raw = airdrop_repository.fetch_total_rewards_amount(airdrop.id,
'0xCc3cD60FF9936B7C9272a649b24f290ADa562469')
self.assertEqual(None, rewards_for_claim_raw[0]['total_rewards'])
def tearDown(self):
self.assertEqual(100, 100)
AirdropRepository().session.query(ClaimHistory).delete()
AirdropRepository().session.query(UserRegistration).delete()
AirdropRepository().session.query(UserReward).delete()
AirdropRepository().session.query(AirdropWindow).delete()
AirdropRepository().session.query(Airdrop).delete()
if __name__ == '__main__':
unittest.main()
| 2.125 | 2 |
sapsan/core/abstrctions/experiment.py | MilesCranmer/Sapsan | 11 | 12768889 | from abc import ABC, abstractmethod
from typing import List
from sapsan.core.abstrctions.algorithm import Parameter, Metric, Artifact
from sapsan.core.abstrctions.tracking import TrackingBackend
from sapsan.core.tracking.logger import LoggingBackend
class Experiment(ABC):
"""
Base experiment class
"""
def __init__(self,
tracking_backend: TrackingBackend):
"""
@param tracking_backend: tracking backend
"""
self.tracking_backend = tracking_backend
def execute(self, *args, **kwargs):
result = self.run(*args, **kwargs)
self.tracking_backend.log_parameters(parameters=self.parameters)
self.tracking_backend.log_metrics(metrics=self.metrics)
self.tracking_backend.log_artifacts(artifacts=self.artifacts)
return result
@abstractmethod
def run(self, *args, **kwargs):
"""
Pass of experiment
@return:
"""
pass
@abstractmethod
def test(self,
parameters: Parameter):
"""
Test/evaluation of experiment
@param parameters: parameters for test
@return:
"""
pass
@property
@abstractmethod
def parameters(self) -> List[Parameter]:
"""
List of parameters of algorithm
@return: list of parameters for algorithm
"""
pass
@property
@abstractmethod
def metrics(self) -> List[Metric]:
"""
List of metrics of algorithm
@return: list of metrics
"""
pass
@property
@abstractmethod
def artifacts(self) -> List[Artifact]:
"""
List of artifacts produced by algorithm
@return:
"""
pass | 2.78125 | 3 |
setup.py | ulysse06/vtam | 1 | 12768890 | <gh_stars>1-10
# -*- coding: UTF-8 -*-
__author__ = "<NAME>, <NAME>, <NAME>, <NAME>"
__copyright__ = "Copyright 2018-2020, <NAME>, <NAME>"
__email__ = "<EMAIL>, <EMAIL>"
__license__ = "MIT"
import codecs
import configparser
from setuptools import setup
from setuptools import find_packages
import os
import sys
config = configparser.RawConfigParser()
config.read(os.path.join('.', 'setup.cfg'))
author = config['metadata']['author']
email = config['metadata']['email']
license = config['metadata']['license']
if sys.version_info < (3, 6):
print("At least Python 3.6 is required.\n", file=sys.stderr)
exit(1)
try:
from setuptools import setup, find_packages
except ImportError:
print("Please install setuptools before installing VTAM.",
file=sys.stderr)
exit(1)
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as fin:
long_description = fin.read()
CLASSIFIERS = """\
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3 :: Only
Topic :: Scientific/Engineering :: Bio-Informatics
Operating System :: POSIX :: Linux
Operating System :: Microsoft :: Windows :: Windows 10
"""
# Create list of package data files
def data_files_to_list(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
data_file_list = data_files_to_list('vtam/data')
data_example_file_list = data_files_to_list('vtam/data/example')
def read(rel_path):
here = os.path.abspath(os.path.dirname(__file__))
with codecs.open(os.path.join(here, rel_path), 'r') as fp:
return fp.read()
def get_version(rel_path):
for line in read(rel_path).splitlines():
if line.startswith('__version__'):
delim = '"' if '"' in line else "'"
return line.split(delim)[1]
else:
raise RuntimeError("Unable to find version string.")
if sys.version_info < (3, 7):
print("At least Python 3.7 is required.\n", file=sys.stderr)
exit(1)
try:
from setuptools import setup, find_packages
except ImportError:
print("Please install setuptools before installing VTAM.",
file=sys.stderr)
exit(1)
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as fin:
long_description = fin.read()
CLASSIFIERS = """\
Development Status :: 4 - Beta
Environment :: Console
Intended Audience :: Science/Research
License :: OSI Approved :: MIT License
Programming Language :: Python
Programming Language :: Python :: 3
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3 :: Only
Topic :: Scientific/Engineering :: Bio-Informatics
Topic :: Software Development
Operating System :: POSIX :: Linux
Operating System :: Microsoft :: Windows :: Windows 10
"""
# Create list of package data files
def data_files_to_list(directory):
paths = []
for (path, directories, filenames) in os.walk(directory):
for filename in filenames:
paths.append(os.path.join('..', path, filename))
return paths
data_file_list = data_files_to_list('vtam/data')
# data_test_list = data_files_to_list('vtam/tests')
setup(
name='vtam',
version=get_version("vtam/__init__.py"),
description="VTAM - Validation and Taxonomic Assignation of Metabarcoding Data "
"is a metabarcoding pipeline. The analyses start from high throughput "
"sequencing (HTS) data of amplicons of one or several metabarcoding "
"markers and produce an amplicon sequence variant (ASV) "
"table of validated variants assigned to taxonomic groups.",
author=author,
author_email=email,
url="https://vtam.readthedocs.io",
license=license,
long_description=long_description,
classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f],
packages=find_packages(),
package_dir={'vtam': 'vtam'},
package_data={'vtam': data_file_list},
install_requires=['biopython', 'cutadapt', 'jinja2', 'pandas', 'progressbar', 'pyyaml', 'sqlalchemy', 'snakemake', 'termcolor', 'wopmars'],
entry_points={
'console_scripts': ['vtam=vtam:main']
},
)
| 1.960938 | 2 |
tests/fullcontact_tests.py | fullcontact/fullcontact.py | 26 | 12768891 | from types import FunctionType
from fullcontact import FullContact
from nose.tools import assert_equal, assert_true
class TestFullContact(object):
def test_init(self):
fc = FullContact('test_key')
assert_equal(fc.api_key, 'test_key')
def test__prepare_batch_url(self):
fc = FullContact('test_key')
assert_equal(
fc._prepare_batch_url(('person', {'email': '<EMAIL>'})),
'https://api.fullcontact.com/v2/person.json?email=test%40test.<EMAIL>'
)
def test_invalid_api_keys(self):
fc = FullContact('test_key')
r = fc.person(email='<EMAIL>')
assert_equal(r.status_code, 403)
test_batch = [
('person', {'email': '<EMAIL>'}),
('person', {'name': '<NAME>'})
]
r = fc.api_batch(test_batch)
assert_equal(r.status_code, 403)
def test_adds_endpoint_methods(self):
fc = FullContact('')
for endpoint in fc.get_endpoints:
assert_true(isinstance(getattr(fc, endpoint), FunctionType))
| 2.453125 | 2 |
vaultier/accounts/tests/api.py | witoon-acom/vaultier | 0 | 12768892 | from time import time
from django.core.urlresolvers import reverse
from rest_framework.test import APIClient
# todo: Backend?
# from accounts.business.authentication import Backend
from accounts.business.fields import RecoverTypeField
from vaultier.test.tools import FileAccessMixin, VaultierAPIClient
from django.utils import timezone
def auth_api_call(email=None, date=None, signature=None):
url = reverse('auth-auth')
client = APIClient()
m = FileAccessMixin()
if not date:
date = timezone.now()
if not signature:
privkey = m.read_file('vaultier.key')
# todo: Backend?
# signature = Backend.sign(privkey, email, date)
signature = None
response = client.post(url, {'email': email,
'date': date,
'signature': signature}
)
return response
def register_api_call(*args, **kwargs):
m = FileAccessMixin()
pubkey = m.read_file('vaultier.pub')
kwargs['public_key'] = pubkey
url = reverse('user-list')
client = APIClient()
kwargs.update({'timestamp': int(time())})
response = client.post(url, kwargs)
return response
def invite_member_api_call(token, email=None, workspace=None, send=True,
resend=True):
url = reverse('member-list')
client = VaultierAPIClient()
client.token(token)
response = client.post(url, {
'email': email,
'workspace': workspace,
'send': send,
'resend': resend
})
return response
def list_members_api_call(token, workspace):
url = reverse('member-list')
client = VaultierAPIClient()
client.token(token)
response = client.get(url, {'workspace': workspace})
return response
def delete_member_api_call(token, member):
url = reverse('member-detail', args=(member,))
client = VaultierAPIClient()
client.token(token)
response = client.delete(url)
return response
def create_lost_keys_api_call(email, **kwargs):
"""
Call to lost_key create view
:param email: user email
:param kwargs:
:return: Response
"""
client = VaultierAPIClient()
kwargs['email'] = email
url = reverse('lost_keys-list')
response = client.post(url, data={'email': email}, kwargs=kwargs)
return response
def update_lost_key_api_rebuild_call(lost_key_id, auth_hash=None,
public_key=None):
"""
Call to update view with parameter recover_type set to
RecoverTypeField.REBUILD
:param lost_key_id: int
:param auth_hash: str
:param public_key: str
:return: Response
"""
client = VaultierAPIClient()
url = "{}?hash={}".format(reverse('lost_keys-detail', args=(lost_key_id,)),
auth_hash)
return client.put(url, data={
'public_key': public_key, 'recover_type': RecoverTypeField.REBUILD})
def update_lost_key_api_disable_call(lost_key_id, auth_hash=None,
public_key=None):
"""
Call to update view with parameter recover_type set to
RecoverTypeField.DISABLE
:param lost_key_id: int
:param auth_hash: str
:param public_key: str
:return: Response
"""
client = VaultierAPIClient()
url = "{}?hash={}".format(reverse('lost_keys-detail', args=(lost_key_id,)),
auth_hash)
return client.put(url, data={
'public_key': public_key, 'recover_type': RecoverTypeField.DISABLE})
def retrieve_lost_key_api_call(lost_key_id, auth_hash=None):
"""
Call to retrieve view
:param lost_key_id: int
:param auth_hash: str
:return: Response
"""
client = VaultierAPIClient()
url = "{}?hash={}".format(reverse('lost_keys-detail', args=(lost_key_id,)),
auth_hash)
return client.get(url)
| 2.0625 | 2 |
project/sema2/migrations/0011_answer_answered_option.py | eorygen/sema2_web | 0 | 12768893 | <filename>project/sema2/migrations/0011_answer_answered_option.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('sema2', '0010_auto_20150421_0420'),
]
operations = [
migrations.AddField(
model_name='answer',
name='answered_option',
field=models.ForeignKey(blank=True, to='sema2.QuestionOption', null=True),
),
]
| 1.539063 | 2 |
glustercli2/peer.py | aravindavk/glustercli2-python | 0 | 12768894 | from glustercli2.parsers import parsed_pool_list
class Peer:
def __init__(self, cli, hostname):
self.cli = cli
self.hostname = hostname
@classmethod
def peer_cmd(cls, cli, cmd):
return cli.exec_gluster_command(
["peer"] + cmd
)
@classmethod
def list(cls, cli):
out = cli.exec_gluster_command(["pool", "list"])
peers = parsed_pool_list(out)
for peer in peers:
if peer.hostname == "localhost":
peer.hostname = cli.get_current_host()
return peers
@classmethod
def add(cls, cli, hostname):
cls.peer_cmd(cli, ["attach", hostname])
def detach(self):
"""
== Peer Delete/Detach
Delete or Detach a Peer from Cluster.
Example:
[source,python]
----
from glustercli2 import GlusterCLI
gcli = GlusterCLI()
gcli.peer("server2.kadalu").delete()
----
"""
self.peer_cmd(self.cli, ["detach", self.hostname])
| 2.921875 | 3 |
src/sampler.py | nespinoza/tso-abc | 0 | 12768895 | <reponame>nespinoza/tso-abc<filename>src/sampler.py
import os
import numpy as np
import pickle
from scipy.stats import multivariate_normal
from ._version import __version__
def get_weights(prior, old_weights, old_thetas, old_covariance, new_thetas, N):
""" Function that calculates the (normalized) weights of each particle """
weights = np.zeros(N)
for j in range(N):
theta = np.array([parameter[j] for parameter in new_thetas])
# Evaluate numerator of equation (3) in Ishida et al. (2015):
numerator = prior.evaluate(theta)
# Evaluate denominator:
denominator = 0.
for i in range(N):
old_theta = np.array([parameter[i] for parameter in old_thetas])
denominator += old_weights[i] * multivariate_normal.pdf(theta, \
mean = old_theta,\
cov = old_covariance)
# Save weight:
weights[j] = numerator / denominator
return weights/np.sum(weights)
def sample(prior, distance, simulator, M = 300, N = 30, Delta = 0.01, verbose = False, save_results = True, output_file = 'results.pkl'):
"""
Given a dataset `data`, a function that return parameters sampled from the prior `prior_sampler`, a function
that runs simulations given parameters `simulator` and a `distance` function that given two datasets
in the format of `data` returns back the distance between two datasets, this function samples from the
posterior using the PMC-ABC algorithm outlined in Ishida et al. (2015; https://arxiv.org/abs/1504.06129).
Attributes
----------
prior : object
This is an object that has to have three possible functions. (1) A function `prior.sample(nsamples)` to sample from
the prior; this needs to return an N-tuple where each element is an array of length `nsamples` storing the samples
from the prior. (2) A function `prior.validate(theta)` that validates `theta` falls within the prior range; if
`theta` is within that, this has to return `True` and `False` otherwise. (3) A function `prior.evaluate(theta)`
which evaluates the prior at a given parameter vector `theta`.
distance : object
Object that calculates the distance from simulations to a(n already loaded) dataset. Has to have two functions;
`distance.single_distance` which, given a simulation, returns the distance to that simulation and `distance.several_distances`,
which calculates distances to `simulations.shape[0]` simulations stored in an array `simulations`.
simulator : object
Object that generates simulations. Must have a `simulator.single_simulation(parameter)` that generates a
simulation given a parameter vector and a `simulator.several_simulations(parameter)` that receives a N-tuple,
representing each of the parameters, and each tuple is an array, whose i-th element is the parameter for
the i-th particle.
M : int
Number of "particles" --- i.e., draws from the prior drawn on the very first iteration of the ABC sampling scheme.
N : int
Number of particles on each particle system (i.e., number of particles to keep on that first iteration and in
each of the subsequent importance sampling iterations). N must be less than M.
Delta : float
Convergence criterion for the ABC sampling scheme: minimum ratio between N and the number of draws necessary to
construct a particle system, K.
verbose : boolean
If True, several statistics are printed out to the terminal to monitor the algorithm.
save_results : boolean
If `True`, results are saved to a pickle file. Default is true.
output_file : string
If `save_results` is `True`, results of the sampler will be saved to `output_file`. If `output_file` is found, the factor N/K will be compared
to the desired Delta; if smaller, sampling will not be made, and results will be read from this file. If larger, sampling will resume from where
it took off.
"""
print('\n\t -----------------------------------------------')
print('\t \t abeec v'+__version__+' --- an ABC sampler\n')
print('\t Author: <NAME> (<EMAIL>)')
print('\t -----------------------------------------------')
resume_run = False
if save_results and os.path.exists(output_file):
print('\n\t >> Results found on '+output_file+'. Reading them...')
S = pickle.load(open(output_file, 'rb'))
times_S = list(S.keys())
# Check if Delta is larger than the latest delta in the results. If this is the case,
# return S. If not, resume until new Delta is reached:
input_N = S[times_S[-1]]['N']
input_M = S[times_S[-1]]['M']
input_Delta = S[times_S[-1]]['Current_Delta']
if Delta > input_Delta and input_N == N and input_M == M:
print('\t \t - Input run is the same as the results.pkl file. Reading in results.pkl...')
return S
else:
if input_N == N and input_M == M:
output_file_wo_extension = output_file.split('.')[0]
sufix = 'continued'
while True:
current_output_file = output_file_wo_extension + '_' + sufix + '.pkl'
if os.path.exists(current_output_file):
S = sample(prior, distance, simulator, M = M, N = N, Delta = Delta, verbose = verbose, save_results = save_results, output_file = current_output_file)
return S
sufix = 'continued_continued'
else:
break
resume_run = True
output_file = current_output_file
print('\t \t - Input run is the same, but with a lower convergence. Continuing run; saving to '+current_output_file+'...')
else:
output_file_wo_extension = output_file.split('.')[0]
current_output_file = output_file_wo_extension + '_N' + str(N) + '_M' + str(M) + '.pkl'
print('\t \t - Input has N = '+str(N)+' and M = '+str(M)+'. Starting a new run...')
S = sample(prior, distance, simulator, M = M, N = N, Delta = Delta, verbose = verbose, save_results = save_results, output_file = current_output_file)
return S
# Check weird user inputs:
if N > M:
raise Exception('N ('+str(N)+', number of particles) cannot be larger than M ('+str(M)+', number of each sub particle system). ')
######################################################################
# STEP 1: find the best particles on the initial draw from the prior #
######################################################################
if not resume_run:
print('\n\t >> 1. Starting ABC sampler...')
# First, generate set of samples from the prior:
thetas = prior.sample(nsamples = M)
nparameters = len(thetas)
# Simulate initial particle system:
simulations = simulator.several_simulations(thetas)
# Get distances between the dataset and the simulations; sort them out from best to worst, select best N ones:
distances = distance.several_distances(simulations)
idx = np.argsort(distances)[:N]
# Save the N best ones to S0 (i.e., S at t=0):
t = 0
S = {}
S[t] = {}
S[t]['thetas'] = [parameter[idx] for parameter in thetas]
S[t]['distances'] = distances[idx]
# Get covariance matrix for these thetas:
S[t]['covariance'] = np.cov(S[0]['thetas'])
# Calculate and save inital weights:
S[t]['weights'] = np.ones(N)/N
print('\t \t - Initial N particles successfully generated.')
Current_Delta = np.inf
else:
print('\n\t >> 1. Resuming ABC sampling...')
t = times_S[-1]
Current_Delta = S[t]['Current_Delta']
######################################################################
# STEP 2: iterate to find sub-particle systems of draws for t > 0 #
######################################################################
# Define parameters that are going to be used in the iteration:
idx_quantile = int(N * 0.75)
idx_N = np.arange(N)
# Start iteration:
print('\n\t >> 2. Going to iterative importance sampling. Target Delta: '+str(Delta)+'.')
while Current_Delta > Delta:
# Initialize parameters for the current iteration:
K = 0
Kstar = 0
t = t + 1
S[t] = {}
# Calculate epsilon as the 75th quantile of distances in S[t-1]:
epsilon = S[t-1]['distances'][idx_quantile]
# Sample N new particles:
counter = 0
while True:
Kstar += 1
# Sample a proposed theta:
idx_0 = np.random.choice(idx_N, p = S[t-1]['weights'])
theta_0 = np.array([parameter[idx_0] for parameter in S[t-1]['thetas']])
current_theta = np.random.multivariate_normal(theta_0, S[t-1]['covariance'])
# Before running the simulation, validate that the sampled theta falls
# within the bounds of the prior:
if prior.validate(current_theta):
current_simulation = simulator.single_simulation(current_theta)
current_distance = distance.single_distance(current_simulation)
else:
current_distance = np.inf
# If it does, save the sampled theta:
if current_distance < epsilon:
if counter == 0:
thetas = np.copy(current_theta)
distances = np.array([current_distance])
Kstars = np.array([Kstar])
else:
thetas = np.vstack((thetas, current_theta))
distances = np.append(distances, current_distance)
Kstars = np.append(Kstars, Kstar)
K += Kstar
Kstar = 0
counter += 1
if counter == N:
break
# Save the N new particles in the S dictionary. First, save the ordered distances
# (this ordering is needed on the next iteration):
idx = np.argsort(distances)
S[t]['distances'] = np.copy(distances[idx])
# Save the ordered thetas:
if verbose:
current_thetas = np.median(thetas, axis=0)
current_sigmas_on_thetas = np.sqrt(np.var(thetas, axis=0))
thetas = tuple(thetas.T)
S[t]['thetas'] = [parameter[idx] for parameter in thetas]
# Get weights:
S[t]['weights'] = get_weights(prior, S[t-1]['weights'], S[t-1]['thetas'], \
S[t-1]['covariance'], S[t]['thetas'], N)
# Get (weighted) covariance matrix:
S[t]['covariance'] = np.cov(S[t]['thetas'], aweights = S[t]['weights'])
# Calculate and save the current delta to see if we've completed the sampling:
Current_Delta = np.double(N)/np.double(K)
S[t]['N'] = N
S[t]['M'] = M
S[t]['Current_Delta'] = Current_Delta
percent_delta = 1. - ((Current_Delta - Delta)/Current_Delta)
if percent_delta < 1.:
print('\n\t - At t = '+str(t)+', current Delta is',Current_Delta,' | {0:.2f}% done'.format(percent_delta*100))
else:
print('\n\t - At t = '+str(t)+', current Delta is',Current_Delta,' | 100% done (target Delta of '+str(Delta)+' reached)')
if verbose:
nparams = len(thetas)
print('\n\t \t Current parameter statistics:')
print('\t \t -----------------------------\n')
for i in range(nparams):
print('\t \t Parameter '+str(i+1)+' of '+str(nparams)+': {0:.10f} +/- {1:.10f}'.format(current_thetas[i], current_sigmas_on_thetas[i]))
print('\n\t >> ABC samples successfully generated!\n')
if save_results:
print('\n\t - Saving results to '+output_file+' file...')
pickle.dump(S, open(output_file, 'wb'))
print('\n\t - ...done!')
return S
| 2.6875 | 3 |
3_GenderClassification/notebooks/highlevel-tensorflow-helper.py | Shuu-Ri/aws-nlp-workshop | 54 | 12768896 | <reponame>Shuu-Ri/aws-nlp-workshop<filename>3_GenderClassification/notebooks/highlevel-tensorflow-helper.py
import numpy as np
import pandas as pd
from numpy import genfromtxt
import os
import tensorflow as tf
from tensorflow.python.estimator.export.export import build_raw_serving_input_receiver_fn
from tensorflow.python.estimator.export.export_output import PredictOutput
import json
INPUT_TENSOR_NAME = "inputs"
SIGNATURE_NAME = "serving_default"
def model_fn(features, labels, mode, params):
"""Model function for Estimator.
# Logic to do the following:
# 1. Configure the model via Keras functional api
# 2. Define the loss function for training/evaluation using Tensorflow.
# 3. Define the training operation/optimizer using Tensorflow operation/optimizer.
# 4. Generate predictions as Tensorflow tensors.
# 5. Generate necessary evaluation metrics.
# 6. Return predictions/loss/train_op/eval_metric_ops in EstimatorSpec object"""
# 1. Configure the model via Keras functional api
first_hidden_layer = tf.keras.layers.LSTM(512, activation='relu', return_sequences=True,input_shape=(15,26), name='first-layer')(features[INPUT_TENSOR_NAME])
#second_hidden_layer = tf.keras.layers.LSTM(512,activation='relu',return_sequences=True,)(first_hidden_layer)
third_hidden_layer = tf.keras.layers.LSTM(512,activation='relu')(first_hidden_layer)
output_layer = tf.keras.layers.Dense(1, activation='sigmoid')(third_hidden_layer)
predictions = tf.reshape(output_layer, [-1])
# Provide an estimator spec for `ModeKeys.PREDICT`.
if mode == tf.estimator.ModeKeys.PREDICT:
return tf.estimator.EstimatorSpec(
mode=mode,
predictions={"Gender": predictions},
export_outputs={SIGNATURE_NAME: PredictOutput({"Gender": predictions})})
pred_2d = output_layer
# 2. Define the loss function for training/evaluation using Tensorflow.
loss = tf.losses.log_loss(labels, pred_2d)
# 3. Define the training operation/optimizer using Tensorflow operation/optimizer.
train_op = tf.contrib.layers.optimize_loss(
loss=loss,
global_step=tf.contrib.framework.get_global_step(),
learning_rate=params["learning_rate"],
optimizer="Adagrad")
# 4. Generate predictions as Tensorflow tensors.
predictions_dict = {"Gender": predictions}
# 5. Generate necessary evaluation metrics.
# Calculate root mean squared error as additional eval metric
eval_metric_ops = {
"accuracy": tf.metrics.accuracy(
tf.cast(labels, tf.float32), tf.cast(predictions,tf.float32))
}
# Provide an estimator spec for `ModeKeys.EVAL` and `ModeKeys.TRAIN` modes.
return tf.estimator.EstimatorSpec(
mode=mode,
loss=loss,
train_op=train_op,
eval_metric_ops=eval_metric_ops)
def serving_input_fn(params):
tensor = tf.placeholder(tf.float32, shape=[1, 15, 26])
return build_raw_serving_input_receiver_fn({INPUT_TENSOR_NAME: tensor})()
def train_input_fn(training_dir, params):
return _input_fn(training_dir, "train_names.csv")
def eval_input_fn(training_dir, params):
return _input_fn(training_dir, "test_names.csv")
def _input_fn(training_dir, training_filename):
filename = filename=os.path.join(training_dir, training_filename)
df=pd.read_csv(filename, sep=',', names = ["Name", "Gender"])
max_name_length = 15
alphabet_size = 26
#get list of names from the 'Name' column
names = df['Name'].values
train_count = df.shape[0]
# get input X
char_index = create_char_index()
X = np.zeros((train_count, max_name_length, alphabet_size),dtype=np.float32)
for i,name in enumerate(names):
name = name.lower()
for t, char in enumerate(name):
X[i, t,char_index[char]] = 1
# get list of genders <M,F> from the 'Gender' column
Y = np.ones((train_count,1),dtype=np.float32)
Y[df['Gender'] == 'M',0] = 0
return tf.estimator.inputs.numpy_input_fn(
x={INPUT_TENSOR_NAME: X},
y=Y,
batch_size=64,
num_epochs=None,
shuffle=True)()
def input_fn(serialized_data, content_type):
# assuming the input is in json
obj = json.loads(serialized_data)
name = obj['name']
name = name.lower()
char_index = create_char_index()
data = np.zeros((1,15,26), dtype=np.float32)
for t, char in enumerate(name):
data[0,t,char_index[char]] = 1
return tf.make_tensor_proto(values=np.asarray(data), shape=[1,15,26], dtype=tf.float32)
def create_char_index():
return {'a':0,'b':1,'c':2,'d':3,'e':4,'f':5,'g':6,'h':7,'i':8,'j':9,'k':10,'l':11,'m':12,'n':13,'o':14,'p':15,'q':16,'r':17,'s':18,'t':19,'u':20,'v':21,'w':22,'x':23,'y':24,'z':25}
| 2.734375 | 3 |
library/word_spliter.py | guoruibiao/memokeeper | 0 | 12768897 | <filename>library/word_spliter.py
# coding: utf8
import os
import jieba
import conf.conf as app_conf
def extract_keywords(content):
"""
extract keywords by word's frequency
:param content: something in your system clipboard
:return:
"""
keywords = []
if content == "":
return keywords
words = jieba.cut(content, cut_all=True)
stop_words = _load_stopwords()
data1 = {}
for chara in words:
if chara in stop_words:
continue
if len(chara) < 2:
continue
if chara in data1:
data1[chara] += 1
else:
data1[chara] = 1
print(data1)
keywords = sorted(data1.items(), key=lambda x: x[1], reverse=True)
return [item[0] for item in keywords]
def _load_stopwords():
stopwords = []
filepath = app_conf.STOPWORDS_PATH
if not os.path.exists(filepath):
return stopwords
tmpLines = []
with open(filepath, "r") as f:
tmpLines = f.readlines()
f.close()
if len(tmpLines) > 0:
stopwords = [word for word in tmpLines if word not in stopwords]
return stopwords
| 3.390625 | 3 |
create_model.py | shenalt/stroke_predictor | 0 | 12768898 | <gh_stars>0
# Import our libraries
# Import pandas and numpy
import pandas as pd
import numpy as np
import pickle
import matplotlib.pyplot as plt
# Helper function to split our data
from sklearn.model_selection import train_test_split
# Import our Logistic Regression model
from sklearn.linear_model import LogisticRegression
# Import helper functions to evaluate our model
from sklearn.metrics import accuracy_score, precision_score, recall_score, confusion_matrix, f1_score, roc_auc_score
# Import z-score helper function
import scipy.stats as stats
from IPython.display import Image
# Import helper functipn for hyper-parameter tuning
from sklearn.model_selection import GridSearchCV
# Import Decision Tree
# from sklearn.tree import DecisionTreeClassifier
# Import Random Forest
from sklearn.ensemble import RandomForestClassifier
# Import metrics to score our model
from sklearn import metrics
# LOAD IN AND CLEAN UP THE DATA BEFORE MERGING
# Load in the first stroke dataset
df = pd.read_csv('https://raw.githubusercontent.com/shenalt/tissera_yasser_DS_project/main/healthcare-dataset-stroke-data.csv')
# Drop the id column
df.drop(columns=['id'], inplace=True)
# Fill the bmi null values in df
df['bmi'] = df.bmi.fillna(df.bmi.mean())
# Remove entries with gender Other from df
df = df[df['gender'] != 'Other']
# Normalize our numerical features to ensure they have equal weight when I build my classifiers
# Create a new column for normalized age
df['age_norm']=(df['age']-df['age'].min())/(df['age'].max()-df['age'].min())
# Create a new column for normalized avg glucose level
df['avg_glucose_level_norm']=(df['avg_glucose_level']-df['avg_glucose_level'].min())/(df['avg_glucose_level'].max()-df['avg_glucose_level'].min())
# Create a new column for normalized bmi
df['bmi_norm']=(df['bmi']-df['bmi'].min())/(df['bmi'].max()-df['bmi'].min())
# Load in the second stroke dataset
df2 = pd.read_csv('https://raw.githubusercontent.com/shenalt/tissera_yasser_DS_project/main/train_strokes.csv')
# Drop the id column
df2.drop(columns=['id'], inplace=True)
# Fill the bmi null values in df2
df2['bmi'] = df2.bmi.fillna(df2.bmi.mean())
# Create a new category for the smoking null values
df2['smoking_status'] = df2['smoking_status'].fillna('not known')
# Remove entries with gender Other from df2
df2 = df2[df2['gender'] != 'Other']
# Normalize our numerical features to ensure they have equal weight when I build my classifiers
# Create a new column for normalized age
df2['age_norm']=(df2['age']-df2['age'].min())/(df2['age'].max()-df2['age'].min())
# Create a new column for normalized avg glucose level
df2['avg_glucose_level_norm']=(df2['avg_glucose_level']-df2['avg_glucose_level'].min())/(df2['avg_glucose_level'].max()-df2['avg_glucose_level'].min())
# Create a new column for normalized bmi
df2['bmi_norm']=(df2['bmi']-df2['bmi'].min())/(df2['bmi'].max()-df2['bmi'].min())
# Merge the two df's
df_master = df.merge(df2, how='outer')
# EXTRACT ALL STROKE ENTRIES AND ISOLATE 1000 RANDOM NON-STROKE ENTRIES INTO A DF
# Create a df from dataset with just the stroke entries
s_df = df_master.loc[df_master['stroke'] == 1]
# Remove age outliers from s_df
s_df = s_df.loc[s_df['age'] >= 45]
# Create a df from the dataset with the no stroke entries
n_df = df_master.sample(n=1100, random_state=30)
n_df = n_df.loc[n_df['stroke'] == 0]
# Merge them
df_final = s_df.merge(n_df, how='outer')
# FEATURE ENGINEERING TIME
# Convert certain features into numerical values
df_final = pd.get_dummies(df_final, columns=['gender', 'Residence_type', 'smoking_status', 'ever_married', 'work_type'])
# Begin to train our model
selected_features = ['age', 'bmi', 'avg_glucose_level', 'hypertension', 'heart_disease']
X = df_final[selected_features]
y = df_final['stroke']
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=30)
# RANDOM FOREST CLASSIFIER
# Init our Random Forest Classifier Model
#model = RandomForestClassifier()
params = {
'n_estimators' : [10, 50, 100],
'criterion' : ['gini', 'entropy'],
'max_depth': [5, 10, 100, None],
'min_samples_split': [2, 10, 100],
'max_features': ['auto', 'sqrt', 'log2']
}
grid_search_cv = GridSearchCV(
estimator=RandomForestClassifier(),
param_grid=params,
scoring='accuracy' )
# fit all combination of trees.
grid_search_cv.fit(X_train, y_train)
# the highest accuracy-score.
model = grid_search_cv.best_estimator_
# Fit our model
model.fit(X_train, y_train)
# Save our model using pickle
pickle.dump(model, open('models/rfc.pkl', 'wb') )
| 2.890625 | 3 |
iberzetsn/__init__.py | jwg4/iberzetsn | 0 | 12768899 | <gh_stars>0
from .to_yiddish import roman_to_yiddish # noqa: F401
from .from_yiddish import yiddish_to_roman # noqa: F401
| 1.078125 | 1 |
src/animalClassifier.py | UBC-MDS/animalsgonewild | 1 | 12768900 | from pickle import TRUE
import string
def animalClassifier(text):
"""
Count total unique words in the input text and returns an animal type.
Parameters
----------
text: str
text to be analyzed
Returns
-------
str
the corresponding animal species
Example
--------
>>> animalClassifier("what do you think?")
"duck"
"""
if not isinstance(text, str):
raise TypeError("Text input should be of type 'str'")
animal_list = ["Duck", "Monkey", "Giraffe", "Whale"]
cleaned_text = text.translate(str.maketrans('', '', string.punctuation))
total_words = len(str.split(cleaned_text))
unique_words = np.unique(str.split(cleaned_text))
uni_counts = len(unique_words)
if uni_counts/total_words <= 0.4:
output = animal_list[0]
elif uni_counts/total_words <= 0.6:
output = animal_list[1]
elif uni_counts/total_words <= 0.8:
output = animal_list[2]
else:
output = animal_list[3]
return output | 4.0625 | 4 |
twitterdedupe/tests/test_lengthen_url.py | jonohill/twitter-dedupe | 0 | 12768901 | <filename>twitterdedupe/tests/test_lengthen_url.py
import pytest
from unittest.mock import Mock
@pytest.fixture
def meth():
from twitterdedupe import lengthen_url
return lengthen_url
def reqlib(url):
reqlib = Mock(name="requests")
response = Mock(name="response")
response.url = url
reqlib.get.return_value = response
r = reqlib.get(url)
assert r.url == url
return reqlib
def test_normal_url(meth):
url = "http://www.chrisheisel.com/"
req = reqlib(url)
result = meth(url, req)
assert result == url
def test_sneaky_url(meth):
url = "http://www.chrisheisel.com/?wpsrc=fol_tw"
expected = "http://www.chrisheisel.com/"
req = reqlib(url)
result = meth(url, req)
assert result == expected
| 2.953125 | 3 |
delivery_bots/bots/tgbot/checkout/buttons.py | savilard/delivery-bots | 0 | 12768902 | <gh_stars>0
from aiogram.types import InlineKeyboardButton
from aiogram.utils.emoji import emojize
async def create_checkout_button() -> InlineKeyboardButton:
"""Create tgbot checkout button."""
return InlineKeyboardButton(
text=emojize('💱 Оформить заказ'),
callback_data='checkout',
)
| 2.609375 | 3 |
core/test/src/xal/model/benchmarks/ring-turns-test.py | luxiaohan/openxal-csns-luxh | 10 | 12768903 | #!/usr/bin/env jython
import sys
from jarray import *
from java.lang import *
from java.util import *
from java.io import *
from java.text import *
from javax.swing import *
from java.awt.event import *
from java.awt import *
from java.util.regex import *
from gov.sns.xal.smf import *
from gov.sns.xal.smf.data import *
from gov.sns.xal.model import *
from gov.sns.xal.model.alg import *
from gov.sns.xal.model.alg.resp import *
from gov.sns.xal.model.probe import *
from gov.sns.xal.model.probe.resp import *
from gov.sns.xal.model.probe.resp.traj import *
from gov.sns.xal.model.xml import *
from gov.sns.xal.model.mpx import *
from gov.sns.xal.model.scenario import *
from gov.sns.tools.beam import *
from gov.sns.xal.slg import *
from gov.sns.xal.smf.proxy import *
from gov.sns.tools.plot import *
# Java definitions
false = 0
true = 1
null = None
# constants
POSITION_FORMAT = DecimalFormat("0.000")
COORIDINATE_FORMAT = DecimalFormat("0.0E0")
# handler of the main window events
class WindowHandler(WindowAdapter):
def windowClosed(self, event):
#sys.exit(0)
print "Closing window..."
# class for holding node results
class NodeResult:
def __init__( self, position, x, y, z ):
self.position = position
self.values = []
self.values.append( x )
self.values.append( y )
self.values.append( z )
def get_value( self, field ):
return self.values[field];
# Plotter for plotting results in a chart on a window
class Plotter:
def __init__( self, title, label ):
self.frame = JFrame( title )
self.frame.setDefaultCloseOperation(WindowConstants.DISPOSE_ON_CLOSE)
self.frame.addWindowListener( WindowHandler() )
self.frame.setSize( 1000, 500 )
container = self.frame.getContentPane()
container.setLayout( BorderLayout() )
box = Box( BoxLayout.Y_AXIS )
container.add( box )
self.chart = FunctionGraphsJPanel()
self.chart.setName( title )
self.chart.setAxisNameX( " position (m)" );
self.chart.setAxisNameY( label );
box.add( self.chart )
self.frame.show()
def plot_node_results( self, results, field, isLineGraph, color, label ):
graph_data = BasicGraphData()
graph_data.setDrawLinesOn( isLineGraph )
graph_data.setGraphProperty( self.chart.getLegendKeyString(), label )
self.chart.setLegendVisible( true );
graph_data.setGraphColor( color )
for result in results:
graph_data.addPoint( result.position, result.get_value( field ) )
self.chart.addGraphData( graph_data )
# change a corrector to kick the beam
def kickBeam( scenario, sequence, corrector, field ):
scenario.setModelInput( corrector, ElectromagnetPropertyAccessor.PROPERTY_FIELD, field )
print "add field", field, "Tesla to corrector: ", corrector.getId(), ", position: ", sequence.getPosition(corrector), ", effective length: ", corrector.getEffLength()
# print results for the ring
def print_ring_results( trajectory ):
print ""
print "tunes: ", trajectory.getTunes()
# plot node results
def generate_xal_results( scenario, trajectory, sequence, nodes ):
node_iterator = nodes.iterator()
closed_results = []
winding_results = []
NUM_TURNS = 100
TURN_STEP = 25
print "First elements: ", trajectory.statesInPositionRange( 0.0, 0.1 )
while node_iterator.hasNext():
node = node_iterator.next()
position = sequence.getPosition( node )
state = trajectory.stateForElement( node.getId() )
phase_orbit_array = state.phaseCoordinatesTurnByTurn( NUM_TURNS )
closed_orbit = state.getFixedOrbit()
x = closed_orbit.getx() * 1000 # convert from meters to mm
y = closed_orbit.gety() * 1000 # convert from meters to mm
z = closed_orbit.getz()
print node.getId(), sequence.getPosition( node ), state.getPosition(), scenario.getPositionRelativeToStart( sequence.getPosition( node ) )
print "closed orbit: ", closed_orbit
print ""
closed_results.append( NodeResult( position, x, y, z ) )
xAvg = 0
for index in range( 0, NUM_TURNS, TURN_STEP ):
phase_orbit = phase_orbit_array[index]
phase_x = phase_orbit.getx() * 1000 # convert from meters to mm
phase_y = phase_orbit.gety() * 1000 # convert from meters to mm
phase_z = phase_orbit.getz() * 1000 # convert from meters to mm
xAvg += phase_x
winding_results.append( NodeResult( position, phase_x, phase_y, phase_z ) )
return [ closed_results, winding_results ]
# load the optics
accelerator = XMLDataManager.loadDefaultAccelerator()
print "loading the accelerator..."
#accelerator = XMLDataManager.acceleratorWithPath("/Users/t6p/Projects/xal/main/xal_xmls/main_ring.xal")
sequence = accelerator.getComboSequence("Ring")
print ""
# setup the model
probe = ProbeFactory.getTransferMapProbe( sequence, TransferMapTracker() )
#probe.setPhaseCoordinates( PhaseVector(0.0, 0.0, 0.0, 0.0, 0.0, 0.000266) )
probe.setPhaseCoordinates( PhaseVector(.01225, 0.00036, -0.002, -0.0009, 0.0, 0.0) )
#probe.setPhaseCoordinates( PhaseVector(0.01, 0.0, -0.002, -0.0005, 0.0, 0.0) )
scenario = Scenario.newScenarioFor( sequence )
scenario.setSynchronizationMode( Scenario.SYNC_MODE_DESIGN )
scenario.setProbe( probe )
#scenario.setStartElementId("Ring_Mag:DCV_A13")
scenario.setStartElementId("Ring_Inj:Foil")
#scenario.setStartElementId("Ring_Mag:DCV_C07")
print "Origin relative to start: ", scenario.getPositionRelativeToStart( 0.0 )
# pick the 1st corrector and change it
horizontal_correctors = sequence.getNodesOfType( "dch" ) # fetch horizontal correctors
vertical_correctors = sequence.getNodesOfType( "dcv" ) # fetch vertical correctors
h_corr = horizontal_correctors.get(6)
v_corr = vertical_correctors.get(5)
kickBeam( scenario, sequence, h_corr, 0.009 ) # 1.0 mrad kick
kickBeam( scenario, sequence, v_corr, 0.0099 ) # 1.0 mrad kick
# run the online model and generate the trajectory
scenario.resync()
scenario.run()
trajectory = probe.getTrajectory()
# print results
print_ring_results( trajectory )
# generate the results at the magnets
nodes = sequence.getAllNodes()
results = generate_xal_results( scenario, trajectory, sequence, nodes )
closed_results = results[0]
winding_results = results[1]
# plot results
plotter = Plotter( "Horizontal Motion", "Horizontal Orbit (mm)" )
plotter.plot_node_results( closed_results, 0, true, Color.blue, "Closed Orbit" )
plotter.plot_node_results( winding_results, 0, false, Color.red, "Winding Orbit" )
plotter = Plotter( "Vertical Motion", "Vertical Orbit (mm)" )
plotter.plot_node_results( closed_results, 1, true, Color.blue, "Closed Orbit" )
plotter.plot_node_results( winding_results, 1, false, Color.red, "Winding Orbit" )
plotter = Plotter( "Longitudinal Motion", "z (mm)" )
plotter.plot_node_results( closed_results, 2, true, Color.blue, "Closed Orbit" )
plotter.plot_node_results( winding_results, 2, false, Color.red, "Winding Orbit" )
| 2.109375 | 2 |
tests/integration_tests/test_integration.py | VladSkripniuk/optuna | 2 | 12768904 | <gh_stars>1-10
import pytest
def test_import():
# type: () -> None
from optuna.integration import chainer # NOQA
from optuna.integration import chainermn # NOQA
from optuna.integration import keras # NOQA
from optuna.integration import lightgbm # NOQA
from optuna.integration import mxnet # NOQA
from optuna.integration import tensorflow # NOQA
from optuna.integration import xgboost # NOQA
from optuna.integration import ChainerMNStudy # NOQA
from optuna.integration import ChainerPruningExtension # NOQA
from optuna.integration import KerasPruningCallback # NOQA
from optuna.integration import LightGBMPruningCallback # NOQA
from optuna.integration import MXNetPruningCallback # NOQA
from optuna.integration import TensorFlowPruningHook # NOQA
from optuna.integration import XGBoostPruningCallback # NOQA
with pytest.raises(ImportError):
from optuna.integration import unknown_module # type: ignore # NOQA
def test_module_attributes():
# type: () -> None
import optuna
assert hasattr(optuna.integration, 'chainer')
assert hasattr(optuna.integration, 'chainermn')
assert hasattr(optuna.integration, 'keras')
assert hasattr(optuna.integration, 'lightgbm')
assert hasattr(optuna.integration, 'mxnet')
assert hasattr(optuna.integration, 'tensorflow')
assert hasattr(optuna.integration, 'xgboost')
assert hasattr(optuna.integration, 'ChainerMNStudy')
assert hasattr(optuna.integration, 'ChainerPruningExtension')
assert hasattr(optuna.integration, 'KerasPruningCallback')
assert hasattr(optuna.integration, 'LightGBMPruningCallback')
assert hasattr(optuna.integration, 'MXNetPruningCallback')
assert hasattr(optuna.integration, 'TensorFlowPruningHook')
assert hasattr(optuna.integration, 'XGBoostPruningCallback')
with pytest.raises(AttributeError):
optuna.integration.unknown_attribute # type: ignore
| 1.992188 | 2 |
apps/redirects/tests.py | jfterpstra/onepercentclub-site | 7 | 12768905 | from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.test import TestCase
from django.test.utils import override_settings
from django.utils import six
from .middleware import RedirectFallbackMiddleware
from .models import Redirect
@override_settings(
APPEND_SLASH=False,
MIDDLEWARE_CLASSES=list(settings.MIDDLEWARE_CLASSES) +
['apps.redirects.middleware.RedirectFallbackMiddleware'],
SITE_ID=1,
)
class RedirectTests(TestCase):
def test_model(self):
r1 = Redirect.objects.create(
old_path='/initial', new_path='/new_target')
self.assertEqual(six.text_type(r1), "/initial ---> /new_target")
def test_redirect(self):
Redirect.objects.create(
old_path='/initial', new_path='/new_target')
response = self.client.get('/initial')
self.assertRedirects(response,
'/en/new_target', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash(self):
Redirect.objects.create(
old_path='/initial/', new_path='/new_target/')
response = self.client.get('/initial')
self.assertRedirects(response,
'/en/new_target/', status_code=301, target_status_code=404)
@override_settings(APPEND_SLASH=True)
def test_redirect_with_append_slash_and_query_string(self):
Redirect.objects.create(
old_path='/initial/?foo', new_path='/new_target/')
response = self.client.get('/initial?foo')
self.assertRedirects(response,
'/en/new_target/', status_code=301, target_status_code=404)
def test_regular_expression(self):
Redirect.objects.create(
old_path='/news/index/(\d+)/(.*)/',
new_path='/my/news/$2/',
regular_expression=True)
response = self.client.get('/news/index/12345/foobar/')
self.assertRedirects(response,
'/en/my/news/foobar/',
status_code=301, target_status_code=404)
redirect = Redirect.objects.get(regular_expression=True)
self.assertEqual(redirect.nr_times_visited, 1)
def test_fallback_redirects(self):
"""
Ensure redirects with fallback_redirect set are the last evaluated
"""
Redirect.objects.create(
old_path='/project/foo',
new_path='/my/project/foo')
Redirect.objects.create(
old_path='/project/foo/(.*)',
new_path='/my/project/foo/$1',
regular_expression=True)
Redirect.objects.create(
old_path='/project/(.*)',
new_path='/projects',
regular_expression=True,
fallback_redirect=True)
Redirect.objects.create(
old_path='/project/bar/(.*)',
new_path='/my/project/bar/$1',
regular_expression=True)
Redirect.objects.create(
old_path='/project/bar',
new_path='/my/project/bar')
response = self.client.get('/project/foo')
self.assertRedirects(response,
'/en/my/project/foo',
status_code=301, target_status_code=404)
response = self.client.get('/project/bar')
self.assertRedirects(response,
'/en/my/project/bar',
status_code=301, target_status_code=404)
response = self.client.get('/project/bar/details')
self.assertRedirects(response,
'/en/my/project/bar/details',
status_code=301, target_status_code=404)
response = self.client.get('/project/foobar')
self.assertRedirects(response,
'/en/projects',
status_code=301, target_status_code=404)
response = self.client.get('/project/foo/details')
self.assertRedirects(response,
'/en/my/project/foo/details',
status_code=301, target_status_code=404)
| 2.09375 | 2 |
export-reach-contact-list/export_reach_contact_list.py | AfricasVoices/Project-OCHA | 0 | 12768906 | import argparse
import csv
import sys
from core_data_modules.cleaners import Codes
from core_data_modules.logging import Logger
from core_data_modules.traced_data.io import TracedDataJsonIO
from core_data_modules.util import PhoneNumberUuidTable
Logger.set_project_name("OCHA")
log = Logger(__name__)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Exports a list of phone numbers for the consenting participants "
"to REACH")
parser.add_argument("traced_data_path", metavar="traced-data-path",
help="Path to the REACH traced data file to extract phone numbers from")
parser.add_argument("phone_number_uuid_table_path", metavar="phone-number-uuid-table-path",
help="JSON file containing the phone number <-> UUID lookup table for the messages/surveys "
"datasets")
parser.add_argument("output_path", metavar="output-path",
help="CSV file to write the REACH contacts to")
args = parser.parse_args()
traced_data_path = args.traced_data_path
phone_number_uuid_table_path = args.phone_number_uuid_table_path
output_path = args.output_path
sys.setrecursionlimit(15000)
# Load the phone number <-> uuid table
log.info(f"Loading the phone number <-> uuid table from file '{phone_number_uuid_table_path}'...")
with open(phone_number_uuid_table_path, "r") as f:
phone_number_uuid_table = PhoneNumberUuidTable.load(f)
log.info(f"Loaded {len(phone_number_uuid_table.numbers())} contacts")
# Load the REACH traced data
log.info(f"Loading REACH traced data from file '{traced_data_path}'...")
with open(traced_data_path, "r") as f:
data = TracedDataJsonIO.import_json_to_traced_data_iterable(f)
log.info(f"Loaded {len(data)} traced data objects")
# Search the TracedData for consenting contacts
log.info("Searching for consenting uuids...")
consenting_uuids = set()
for td in data:
if td["withdrawn_consent"] == Codes.TRUE:
continue
consenting_uuids.add(td["UID"])
log.info(f"Found {len(consenting_uuids)} consenting uuids")
# Convert the uuids to phone numbers
log.info("Converting the uuids to phone numbers...")
phone_numbers = [f"+{phone_number_uuid_table.get_phone(uuid)}" for uuid in consenting_uuids]
log.warning(f"Exporting {len(phone_numbers)} phone numbers to {output_path}...")
with open(output_path, "w") as f:
writer = csv.DictWriter(f, fieldnames=["URN:Tel", "Name"], lineterminator="\n")
writer.writeheader()
for n in phone_numbers:
writer.writerow({
"URN:Tel": n
})
log.info(f"Wrote {len(phone_numbers)} contacts to {output_path}")
| 2.65625 | 3 |
main_refactor.py | karl-crl/decagon | 0 | 12768907 | <filename>main_refactor.py
import argparse
import os
from run_decagon_toy import RunDecagonToy
from run_decagon_real import RunDecagonReal
from constants import PARAMS, INPUT_FILE_PATH
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--log', default=False,
action='store_true',
help='Whether to log run or nor, default True')
parser.add_argument('--real', default=False,
action='store_true',
help='Run on real data or toy example')
parser.add_argument('--batch-size', default=PARAMS['batch_size'], type=int,
help='Batch size')
parser.add_argument('--hidden1', default=PARAMS['hidden1'], type=int,
help="Number of neurons on first layer")
parser.add_argument('--hidden2', default=PARAMS['hidden2'], type=int,
help="Number of neurons on second layer")
parser.add_argument('--epoch', default=PARAMS['epoch'], type=int,
help="Number of neurons on second layer")
parser.add_argument('--cpu', default=False,
action='store_true',
help='Run on cpu instead of gpu')
parser.add_argument('--upload_saved', default=False,
action='store_true',
help='Whether to use saved model or new')
args = parser.parse_args()
if args.log:
import neptune
neptune.init('Pollutants/sandbox')
PARAMS['epoch'] = args.epoch
PARAMS['hidden1'] = args.hidden1
PARAMS['hidden2'] = args.hidden2
PARAMS['batch_size'] = args.batch_size
val_test_size = 0.1
if args.log:
neptune.create_experiment(name='example_with_parameters',
params=PARAMS,
upload_stdout=True,
upload_stderr=True,
send_hardware_metrics=True,
upload_source_files='**/*.py')
neptune.set_property("val_test_size", val_test_size)
if not args.real:
run = RunDecagonToy()
run.run(adj_path=None, path_to_split=f'data/split/toy/{PARAMS["batch_size"]}',
val_test_size=val_test_size,
batch_size=PARAMS['batch_size'], num_epochs=PARAMS['epoch'],
dropout=PARAMS['dropout'], max_margin=PARAMS['max_margin'],
print_progress_every=150, log=args.log, on_cpu=args.cpu,
upload_saved=args.upload_saved)
else:
run = RunDecagonReal(combo_path=f'{INPUT_FILE_PATH}/bio-decagon-combo.csv',
ppi_path=f'{INPUT_FILE_PATH}/bio-decagon-ppi.csv',
mono_path=f'{INPUT_FILE_PATH}/bio-decagon-mono.csv',
targets_path=f'{INPUT_FILE_PATH}/bio-decagon-targets-all.csv',
min_se_freq=500, min_se_freq_mono=40)
run.run(path_to_split=f'data/split/real/{PARAMS["batch_size"]}',
val_test_size=val_test_size, batch_size=PARAMS['batch_size'],
num_epochs=PARAMS['epoch'], dropout=PARAMS['dropout'],
max_margin=PARAMS['max_margin'],
print_progress_every=150, adj_path='data/adj/real',
log=args.log, on_cpu=args.cpu, upload_saved=args.upload_saved)
if args.log:
neptune.stop()
| 2.21875 | 2 |
python/labs/quick-banking-app/starter-code/banking.py | silpillasil/DANK_CSSI_STUFFS | 1 | 12768908 | <gh_stars>1-10
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Replace "pass" with your code
import time
class BankAccount(object):
def __init__(self, label, balance):
self.label = label
self.balance = balance
def __str__(self):
print("Label: {label}\nBalance:{bal}".format(
label = self.label, bal = self.balance))
return "Label: {label}\nBalance:{bal}".format(
label = self.label, bal = self.balance)
def withdraw(self, amount):
if(amount > self.balance):
print("Sorry, you require $" + str(amount-self.balance) + " inorder to withdraw")
return
elif(amount < 0):
print("Sorry, that is an invalid amount.")
else:
self.balance -= amount
print("Transaction Sucessfull! You have ${bal} remaing in you account.".format(bal = self.balance))
return
def deposit(self, amount):
if(amount < 0):
print("Sorry, that is an invalid amount.")
return
else:
self.balance += amount
print("Transaction Successfull! You have ${bal} remaingin you account.".format(bal = self.balance))
def rename(self, new_label):
if(new_label == ""):
print("Please provide a valid label.")
return
self.label = new_label
print("Label Sucessfully changed!")
return
def transfer(self, dest_account, amount):
if(amount < 0 or amount > self.balance):
print("Sorry, invalid transfer.")
return
self.balance -= amount
dest_account.deposit(amount)
print("Transfer Sucessfull!")
return
class Transaction(self):
def __init__(self, time, type, amount, dest_account=None):
self.time = time
self.type = type
self.amount = amount
self.dest_account = dest_account
| 3.546875 | 4 |
src/colourutils.py | erichards97/categorical-colour-calendar | 0 | 12768909 | from matplotlib.colors import hsv_to_rgb, to_hex
def get_n_colours(n, s=0.5, v=0.95):
return [hsv_to_rgb((i/n, s, v)) for i in range(n)]
def extend_colour_map(data, colour_map, date_colour):
missing_values = [x for x in data.dropna().unique() if x not in colour_map] # All events that don't have a specified colour
if date_colour is None: # If the default date square colour isn't specified, we should generate this too
new_colours = get_n_colours(len(missing_values)+1)
date_colour = new_colours.pop()
else:
new_colours = get_n_colours(len(missing_values))
new_colours_map = {x[0]: x[1] for x in zip(missing_values, new_colours)} # Match the events and newly generated colours
colour_map = {k: to_hex(c) for k, c in {**colour_map, **new_colours_map}.items()} # Concat dicts and convert all colours to hex
return colour_map, to_hex(date_colour)
| 3.015625 | 3 |
_teaching/csci127-summer-2020/readings/activities/alternating.py | lgw2/lgw2.github.io | 0 | 12768910 | <reponame>lgw2/lgw2.github.io
def alternating(list_of_ints):
pass
print(alternating([1, 2, 3, 4]))
print(alternating([10, 11, 1, 12]))
print(alternating([10, 21, 22, -5, 100, 101, 2]))
| 3.3125 | 3 |
helloWorld/helloWorldApp/migrations/0011_suggestion_upvote.py | jcheon/reddit_clone | 4 | 12768911 | # Generated by Django 2.2.5 on 2019-10-27 02:27
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('helloWorldApp', '0010_auto_20191025_2008'),
]
operations = [
migrations.AddField(
model_name='suggestion',
name='upvote',
field=models.ManyToManyField(blank=True, related_name='sugg_upvote', to=settings.AUTH_USER_MODEL),
),
]
| 1.539063 | 2 |
sensehat_dashboard/pi/environment.py | gdmgent-IoT-1920/labo-3-firebase-jensderyckere | 0 | 12768912 | from sense_hat import SenseHat
import threading
import firebase_admin
from firebase_admin import credentials, firestore
# constants
COLLECTION = 'raspberry'
DOCUMENT = 'omgeving'
# firebase
cred = credentials.Certificate("../config/firebase_admin.json")
firebase_admin.initialize_app(cred)
# connect firestore
db = firestore.client()
pi_ref = db.collection(COLLECTION).document(DOCUMENT)
# sense
sense = SenseHat()
# sensors
temp = sense.get_temperature()
hum = sense.get_humidity()
temp_hum = sense.get_temperature_from_humidity()
temp_pres = sense.get_temperature_from_pressure()
pres = sense.get_pressure()
data = {
u'temperature' : temp,
u'humidity' : hum,
u'humidity temperature' : temp_hum,
u'pressure' : pres,
u'pressure temperature' : temp_pres,
}
# interval
def set_interval(func, sec):
def func_wrapper():
set_interval(func, sec)
func()
t = threading.Timer(sec, func_wrapper)
t.start()
return t
# firebase send data
def send_data():
pi_ref.set(data)
print('aangepast')
timer = set_interval(send_data, 300) | 2.5 | 2 |
tester.py | vishwasmehra/Flask-Chat-interface | 1 | 12768913 | # -*- coding: utf-8 -*-
"""
Created on Sat Sep 15 17:25:09 2018
@author: Heller
"""
import os
inde="1"
inde=int(inde)
flag=0
for f in os.listdir("notes/"):
flag=flag+1
if(flag==inde):
file="notes/"+f
with open(file) as fa:
content = fa.readlines()
content=[x.strip() for x in content]
rep=""
for i in content:
rep=rep+i+"<br>"
print(rep) | 2.984375 | 3 |
hatebu2pocket.py | naimasa/hatebu2pocket | 0 | 12768914 | # -*- coding: utf-8 -*-
import sys
import pocket
from xml.etree.ElementTree import *
import json
param = sys.argv
if (len(param) < 2):
print "invalid auguments"
exit(1)
# parse xml
infile = param[1]
tree = parse(infile)
elem = tree.getroot()
list = []
for e in elem.findall(".//{http://purl.org/atom/ns#}link[@rel='related']"):
url = e.get("href");
list.append({'action' : 'add', 'url' : url})
# load to pocket (bulk add)
api = pocket.Api(consumer_key='<KEY>',
access_token='<PASSWORD>access_token')
actions = api.send(json.dumps(list));
# print results
for action in actions:
if action["response_code"] != "200":
print action["response_code"] + ":" + action["resolved_url"]
| 2.453125 | 2 |
lib/python2.7/site-packages/pyscope/simccdcamera2.py | leschzinerlab/myami-3.2-freeHand | 0 | 12768915 | import copy
import ccdcamera
import numpy
import random
random.seed()
import time
import remote
import os
from pyami import mrc
import itertools
from pyscope import falconframe
FRAME_DIR = '.'
rawtype = numpy.uint32
frametype = numpy.uint8
idcounter = itertools.cycle(range(100))
has_energy_filter = False
class SimCCDCamera(ccdcamera.CCDCamera):
name = 'SimCCDCamera'
binning_limits = [1,2,4,8]
binmethod = 'exact'
def __init__(self):
self.unsupported = []
super(SimCCDCamera,self).__init__()
self.pixel_size = {'x': 2.5e-5, 'y': 2.5e-5}
self.exposure_types = ['normal', 'dark', 'bias']
self.binning = {'x': 1, 'y': 1}
self.offset = {'x': 0, 'y': 0}
self.dimension = copy.copy(self.getCameraSize())
self.exposure_time = 0.2
self.exposure_type = 'normal'
self.energy_filter = False
self.energy_filter_width = 0.0
self.views = ('square', 'empty')
self.view = 'square'
#self.view = 'empty'
self.inserted = True
if not has_energy_filter:
self.unsupported = [
'getEnergyFilter',
'setEnergyFilter',
'getEnergyFilterWidth',
'setEnergyFilterWidth',
'alignEnergyFilterZeroLossPeak',
]
def __getattribute__(self, attr_name):
if attr_name in object.__getattribute__(self, 'unsupported'):
raise AttributeError('attribute not supported')
return object.__getattribute__(self, attr_name)
def getRetractable(self):
return True
def getInserted(self):
return self.inserted
def setInserted(self, value):
self.inserted = value
def setView(self, view):
self.view = view
def getView(self):
return self.view
def getViews(self):
return self.views
def getBinnedMultiplier(self):
binning = self.getBinning()
return binning['x']*binning['y']
def getBinning(self):
return copy.copy(self.binning)
def setBinning(self, value):
for axis in self.binning.keys():
try:
if value[axis] not in self.getCameraBinnings():
raise ValueError('invalid binning')
except KeyError:
pass
for axis in self.binning.keys():
try:
self.binning[axis] = value[axis]
except KeyError:
pass
def getOffset(self):
return copy.copy(self.offset)
def setOffset(self, value):
for axis in self.offset.keys():
try:
if value[axis] < 0 or value[axis] >= self.getCameraSize()[axis]:
raise ValueError('invalid offset')
except KeyError:
pass
for axis in self.offset.keys():
try:
self.offset[axis] = value[axis]
except KeyError:
pass
def getDimension(self):
return copy.copy(self.dimension)
def setDimension(self, value):
for axis in self.dimension.keys():
try:
if value[axis] < 1 or value[axis] > self.getCameraSize()[axis]:
raise ValueError('invalid dimension')
except KeyError:
pass
for axis in self.dimension.keys():
try:
self.dimension[axis] = value[axis]
except KeyError:
pass
def getExposureTime(self):
return self.exposure_time*1000.0
def setExposureTime(self, value):
if value < 0:
raise ValueError('invalid exposure time')
self.exposure_time = value/1000.0
def getExposureTypes(self):
return self.exposure_types
def getExposureType(self):
return self.exposure_type
def setExposureType(self, value):
if value not in self.exposure_types:
raise ValueError('invalid exposure type')
self.exposure_type = value
def _getImage(self):
if not self.validateGeometry():
raise ValueError('invalid image geometry')
for axis in ['x', 'y']:
if self.dimension[axis] * self.binning[axis] > self.getCameraSize()[axis]:
raise ValueError('invalid dimension/binning combination')
columns = self.dimension['x']
rows = self.dimension['y']
shape = (rows, columns)
t0 = time.time()
## exposure time
time.sleep(self.exposure_time)
t1 = time.time()
self.exposure_timestamp = (t1 + t0) / 2.0
return self.getSyntheticImage(shape)
def getSyntheticImage(self,shape):
dark_mean = 1.0
bright_scale = 10
if self.exposure_type != 'dark':
mean = self.exposure_time * 1000.0 *bright_scale + dark_mean
sigma = 0.01 * mean
else:
mean = dark_mean
sigma = 0.1 * mean
image = numpy.random.normal(mean, sigma, shape)
if self.exposure_type != 'dark':
row_offset = random.randint(-shape[0]/16, shape[0]/16) + shape[0]/4
column_offset = random.randint(-shape[1]/16, shape[1]/16) + shape[0]/4
image[row_offset:row_offset+shape[0]/2,
column_offset:column_offset+shape[1]/2] += 0.5 * mean
image = numpy.asarray(image, dtype=numpy.uint16)
return image
def getEnergyFiltered(self):
return has_energy_filter
def getEnergyFilter(self):
return self.energy_filter
def setEnergyFilter(self, value):
self.energy_filter = bool(value)
def getEnergyFilterWidth(self):
return self.energy_filter_width
def setEnergyFilterWidth(self, value):
self.energy_filter_width = float(value)
def alignEnergyFilterZeroLossPeak(self):
pass
def getPixelSize(self):
return dict(self.pixel_size)
class SimFrameCamera(SimCCDCamera):
name = 'SimFrameCamera'
def __init__(self):
super(SimFrameCamera,self).__init__()
self.frame_time = 200
self.save_frames = False
self.alignframes = False
self.alignfilter = 'None'
self.rawframesname = 'frames'
self.useframes = ()
def _simBias(self, shape):
bias = numpy.arange(100,115)
bias = numpy.resize(bias, shape)
noise = numpy.random.normal(0.0, 2.0, shape)
bias = bias + noise
bias = numpy.asarray(bias, rawtype)
#print 'BIAS', bias
return bias
def _simDark(self, shape, exptime):
# return image: dark + bias
## counts per second
darkrate = numpy.array((0.1, 0.1, 0.1, 0.1, 0.2, 0.2, 0.2, 0.2), numpy.float32)
dark = exptime * darkrate
dark = numpy.resize(dark, shape)
dark = dark + self._simBias(shape)
dark = numpy.asarray(dark, rawtype)
#print 'DARK', dark
return dark
def _simExposure(self, shape, exptime):
# return image: dark + bias + exposure
# light sensitivity in counts per second
sensitivity = self.binning['x'] * self.binning['y'] * numpy.arange(2000, 2065)
sensitivity = numpy.resize(sensitivity, shape)
exposure = exptime * sensitivity
noise = numpy.random.normal(0.0, 50.0, shape)
exposure = exposure + noise
final = self._simDark(shape, exptime) + exposure
final = numpy.asarray(final, rawtype)
#print 'EXP', final
return final
def _simNormal(self, shape, exptime):
# return image: transparency * (dark + bias + exposure)
final = self._simExposure(shape, exptime) * self._simSample(shape)
final = numpy.asarray(final, rawtype)
#print 'NORMAL', final
return final
def _simSample(self, shape):
if self.view == 'empty':
transparency = numpy.ones(shape, dtype=numpy.float32)
elif self.view == 'square':
transparency = 0.9 * numpy.ones(shape, dtype=numpy.float32)
row_offset = random.randint(-shape[0]/8, shape[0]/8) + shape[0]/4
column_offset = random.randint(-shape[1]/8, shape[1]/8) + shape[0]/4
transparency[row_offset:row_offset+shape[0]/2,
column_offset:column_offset+shape[1]/2] = 0.7
#print 'VIEW', transparency
return transparency
def convertToInt8(self,array):
min = 0
max = array.max()
array = (array / (numpy.ones(array.shape)*max))*128
array = numpy.asarray(array,numpy.int8)
return array
def custom_setup(self):
'''
Place holder for more setup
'''
pass
def _getImage(self):
self.custom_setup()
if not self.validateGeometry():
raise ValueError('invalid image geometry')
for axis in ['x', 'y']:
if self.dimension[axis] * self.binning[axis] > self.getCameraSize()[axis]:
raise ValueError('invalid dimension/binning combination')
columns = self.dimension['x']
rows = self.dimension['y']
shape = (rows, columns)
t0 = time.time()
## exposure time
time.sleep(self.exposure_time)
t1 = time.time()
self.exposure_timestamp = (t1 + t0) / 2.0
nframes = self.getNumberOfFrames()
exptime = self.frame_time
if self.useframes:
useframes = []
for f in self.useframes:
if 0 <= f < nframes:
useframes.append(f)
else:
# use all frames in final image
useframes = range(nframes)
self.useframes = useframes
print 'SAVERAWFRAMES', self.save_frames
if self.save_frames:
self.rawframesname = time.strftime('frames_%Y%m%d_%H%M%S')
self.rawframesname += '_%02d' % (idcounter.next(),)
else:
return self.getSyntheticImage(shape)
sum = numpy.zeros(shape, numpy.float32)
for i in range(nframes):
if self.exposure_type == 'bias':
frame = self._simBias(shape)
elif self.exposure_type == 'dark':
frame = self._simDark(shape, exptime)
elif self.exposure_type == 'normal':
frame = self._simNormal(shape, exptime)
else:
raise RuntimeError('unknown exposure type: %s' % (self.exposure_type,))
#Keep it small
frame = self.convertToInt8(frame)
mrcname = '.mrc'
fname = os.path.join(FRAME_DIR,self.rawframesname + mrcname)
if self.save_frames:
print 'SAVE', i
if i == 0:
mrc.write(frame, fname)
else:
mrc.append(frame, fname)
if i in self.useframes:
print 'SUM', i
sum += frame
return sum
def getNumberOfFrames(self):
if self.frame_time:
nframes = int(round(self.exposure_time / self.frame_time))
return nframes
else:
return 1
def getFrameTime(self):
ms = self.frame_time * 1000.0
return ms
def setFrameTime(self,ms):
seconds = ms / 1000.0
self.frame_time = seconds
def getSaveRawFrames(self):
'''Save or Discard'''
return self.save_frames
def setSaveRawFrames(self, value):
'''True: save frames, False: discard frames'''
self.save_frames = bool(value)
def getAlignFrames(self):
return self.alignframes
def setAlignFrames(self, value):
self.alignframes = bool(value)
def getAlignFilter(self):
return self.alignfilter
def setAlignFilter(self, value):
if value:
self.alignfilter = str(value)
else:
self.alignfilter = 'None'
def setNextRawFramesName(self, value):
self.rawframesname = value
def getNextRawFramesName(self):
return self.rawframesname
def getPreviousRawFramesName(self):
return self.rawframesname
def setUseFrames(self, value):
self.useframes = value
def getUseFrames(self):
return self.useframes
class SimFalconFrameCamera(SimFrameCamera):
name = 'SimFalconFrameCamera'
def __init__(self):
super(SimFalconFrameCamera,self).__init__()
self.frameconfig = falconframe.FalconFrameConfigXmlMaker(simu=True)
self.movie_exposure = 500.0
self.start_frame_number = 1
self.end_frame_number = 7
self.equal_distr_frame_number = 0
def getNumberOfFrames(self):
if self.save_frames:
return self.frameconfig.getNumberOfFrameBins()
else:
return 1
def calculateMovieExposure(self):
'''
Movie Exposure is the exposure time to set to ConfigXmlMaker in ms
'''
self.movie_exposure = self.end_frame_number * self.frameconfig.getBaseFrameTime() * 1000.0
self.frameconfig.setExposureTime(self.movie_exposure / 1000.0)
def getReadoutDelay(self):
'''
Integrated image readout delay is always base_frame_time.
There is no way to change it.
'''
return None
def validateUseFramesMax(self,value):
'''
Return end frame number valid for the integrated image exposure time.
'''
if not self.save_frames:
return 1
# find number of frames the exposure time will give as the maximun
self.frameconfig.setExposureTime(self.exposure_time)
max_input_frame_value = self.frameconfig.getNumberOfAvailableFrames() - 1
return min(max_input_frame_value, max(value,1))
def setUseFrames(self, frames):
'''
UseFrames gui for Falcon is a tuple of base_frames that defines
the frames used in the movie. For simplicity in input, we only
use the min number as the movie delay and max number as the highest
frame number to include.
'''
if frames:
if len(frames) > 1:
self.frameconfig.setFrameReadoutDelay(min(frames))
else:
self.frameconfig.setFrameReadoutDelay(1)
self.end_frame_number = self.validateUseFramesMax(max(frames))
else:
# default movie to start at frame 1 ( i.e., not include roll-in)
self.frameconfig.setFrameReadoutDelay(1)
# use impossible large number to get back value for exposure time
self.end_frame_number = self.validateUseFramesMax(1000)
self.start_frame_number = self.frameconfig.getFrameReadoutDelay()
# set equally distributed frames starting frame number
if len(frames) >2:
framelist = list(frames)
framelist.sort()
self.equal_distr_frame_number = framelist[1]
else:
self.equal_distr_frame_number = 0
self.frameconfig.setEquallyDistributedStartFrame(self.equal_distr_frame_number)
self.calculateMovieExposure()
# self.useframes is used in simulater to generate simulated sum image
self.useframes = tuple(range(self.start_frame_number-self.frameconfig.internal_readout_delay, self.end_frame_number-self.frameconfig.internal_readout_delay))
def getUseFrames(self):
if self.save_frames:
if self.equal_distr_frame_number > self.start_frame_number:
return (self.start_frame_number,self.equal_distr_frame_number,self.end_frame_number)
else:
return (self.start_frame_number,self.end_frame_number)
def setFrameTime(self,ms):
'''
OutputFrameTime is not detrmined by the user
'''
pass
def getFrameTime(self):
'''
Output frame time is the average time of all frame bins.
'''
ms = self.movie_exposure / self.getNumberOfFrames()
return ms
def getPreviousRawFramesName(self):
return self.frameconfig.getFrameDirName()
def custom_setup(self):
self.calculateMovieExposure()
movie_exposure_second = self.movie_exposure/1000.0
if self.save_frames:
self.frameconfig.makeRealConfigFromExposureTime(movie_exposure_second,self.equal_distr_frame_number,self.start_frame_number)
else:
self.frameconfig.makeDummyConfig(movie_exposure_second)
class SimOtherCCDCamera(SimCCDCamera):
name = 'SimOtherCCDCamera'
def __init__(self):
super(SimOtherCCDCamera,self).__init__()
self.binning_limits = [1,2,4,8]
self.binmethod = 'floor'
def _getImage(self):
im = SimCCDCamera._getImage(self)
im = 10 * im
return im
class SimK2CountingCamera(SimFrameCamera):
name = 'SimK2CountingCamera'
def __init__(self):
super(SimK2CountingCamera,self).__init__()
self.binning_limits = [1,2,4,8]
self.binmethod = 'floor'
class SimK2SuperResCamera(SimFrameCamera):
name = 'SimK2SuperResCamera'
def __init__(self):
super(SimK2SuperResCamera,self).__init__()
self.binning_limits = [1]
self.binmethod = 'floor'
| 1.960938 | 2 |
src/setup.py | anchepiece/pydukeenergy | 24 | 12768916 | from setuptools import setup, find_packages
setup(name='pydukeenergy',
version='0.0.6',
description='Interface to the unofficial Duke Energy API',
url='http://github.com/w1ll1am23/pyduke-energy',
author='<NAME>',
license='MIT',
install_requires=['requests>=2.0', 'beautifulsoup4>=4.6.0'],
tests_require=['mock'],
test_suite='tests',
packages=find_packages(exclude=["dist", "*.test", "*.test.*", "test.*", "test"]),
zip_safe=True)
| 1.3125 | 1 |
aquaticore/taxa/urls.py | rockerBOO/aquaticore | 0 | 12768917 | <reponame>rockerBOO/aquaticore
from django.conf.urls.defaults import *
urlpatterns = patterns('aquaticore.taxa.views',
(r'^$', 'index'),
(r'^species/(?P<species_name>[^\/]+)/?', 'species_detail'),
(r'^species/?', 'species_list'),
(r'^family/(?P<family_name>[^\/]+)/?', 'family_detail'),
(r'^order/(?P<order_name>[^\/]+)/?', 'order_detail'),
)
| 1.648438 | 2 |
app/nextmove.py | xm-evanguo/starter-snake-python | 0 | 12768918 | import collections
import random
def next_move_state(map, head_xy, direction):
switcher = {
'left': (-1, 0),
'right': (1, 0),
'up': (0, -1),
'down': (0, 1)
}
return map[head_xy[1] + switcher.get(direction)[1]][head_xy[0] + switcher.get(direction)[0]]
def next_direction(map, head_xy, move_xy):
switcher = {
(-1, 0): 'left',
(1, 0): 'right',
(0, -1): 'up',
(0, 1): 'down'
}
return switcher.get(((move_xy[0] - head_xy[0]), (move_xy[1] - head_xy[1])))
def shortest_path(map, starting, goal):
queue = collections.deque([[(starting[0], starting[1])]])
seen = set([starting])
tmp_map = map
if tmp_map[goal[1]][goal[0]] != 7:
tmp_map[goal[1]][goal[0]] = 1
while queue:
path = queue.popleft()
x, y = path[-1]
if (x,y) == goal:
return path;
for x2, y2 in ((x+1,y), (x-1,y), (x,y+1), (x,y-1)):
if 0 <= x2 < len(map) and 0 <= y2 < len(map) and map[y2][x2] < 2 and (x2,y2) not in seen:
queue.append(path + [(x2,y2)])
seen.add((x2,y2))
def random_move(map, head_xy):
directions = ['left', 'right', 'up', 'down']
if head_xy[0] is 0:
directions.remove('left')
elif head_xy[0] is len(map) - 1:
directions.remove('right')
if head_xy[1] is 0:
directions.remove('up')
elif head_xy[1] is len(map) - 1:
directions.remove('down')
for direction in directions:
if next_move_state(map, head_xy, direction) > 1 and len(directions) > 1:
directions.remove(direction)
return random.choice(directions)
| 3.59375 | 4 |
TensorCook.py | AmanKishore/TensorCook | 1 | 12768919 |
from google_images_download import google_images_download
food_list = ['food with protein','unhealthy food','carbs','food with sugar','vegetables']
for food in food_list:
args = {"keywords":food, "format": "jpg", "limit":1000, "output_directory":"./tf_files/food_images"}
response = google_images_download.googleimagesdownload()
paths = response.download(args) | 2.953125 | 3 |
canny.py | HuangKaiHuan/edge_detection | 3 | 12768920 | <filename>canny.py
#!/usr/bin/python2.7
# -*- coding:utf-8 -*-
__author__ = 'hkh'
__date__ = '05/02/2018'
__version__ = 1.0
"""
主要参考: 数字图像处理(第三版)10.2.6,<NAME>
"""
import cv2
import numpy as np
import time
def geodesicDilation(src, mask, kernel, iteration=-1):
if iteration > 0:
while iteration:
temp = cv2.dilate(src=src, kernel=kernel)
src = cv2.min(src1=temp, src2=mask)
iteration -= 1
return src
else:
while True:
temp = cv2.dilate(src=src, kernel=kernel)
pre_image = src.copy()
src = cv2.min(src1=temp, src2=mask)
if 0 == cv2.compare(src1=src, src2=pre_image, cmpop=cv2.CMP_NE).sum():
return src
def Canny_for_loop_imp(src, thresh1, thresh2):
assert thresh1 < thresh2
Gx = cv2.Sobel(src=src, ddepth=cv2.CV_32F, dx=1, dy=0)
Gy = cv2.Sobel(src=src, ddepth=cv2.CV_32F, dx=0, dy=1)
magnitude, angle = cv2.cartToPolar(Gx, Gy, angleInDegrees=True)
rows, cols = src.shape
suppression_edge = np.zeros_like(magnitude)
for i in range(1, rows-1):
for j in range(1, cols-1):
mag = magnitude[i, j]
ang = angle[i, j] % 180
if mag >= thresh1:
if ang < 22.5 or ang >= 157.5:
if mag >= magnitude[i, j + 1] and mag >= magnitude[i, j - 1]:
suppression_edge[i, j] = mag
elif 67.5 > ang >= 22.5:
if mag >= magnitude[i - 1, j - 1] and mag >= magnitude[i + 1, j + 1]:
suppression_edge[i, j] = mag
elif 112.5 > ang >= 67.5:
if mag >= magnitude[i - 1, j] and mag >= magnitude[i + 1, j]:
suppression_edge[i, j] = mag
elif 157.5 > ang >= 112.5:
if mag >= magnitude[i - 1, j + 1] and mag >= magnitude[i + 1, j - 1]:
suppression_edge[i, j] = mag
strong_edge = ((suppression_edge > thresh2) * 255).astype('uint8')
full_edge = ((suppression_edge > thresh1) * 255).astype('uint8')
optimized_edge = geodesicDilation(strong_edge, full_edge, kernel=np.ones((3, 3)))
return cv2.convertScaleAbs(src=optimized_edge)
def Canny_maxtrix_parallel_imp(src, thresh1, thresh2):
assert thresh1 < thresh2
Gx = cv2.Sobel(src=src, ddepth=cv2.CV_32F, dx=1, dy=0)
Gy = cv2.Sobel(src=src, ddepth=cv2.CV_32F, dx=0, dy=1)
magnitude, angle = cv2.cartToPolar(Gx, Gy, angleInDegrees=True)
angle %= 180
angle_0_mask = (angle < 22.5) | (angle >= 157.5)
angle_45_mask = (67.5 > angle) & (angle >= 22.5)
angle_90_mask = (112.5 > angle) & (angle >= 67.5)
angle_135_mask = (157.5 > angle) & (angle >= 112.5)
dsize = (src.shape[1], src.shape[0])
M = np.array([[1, 0, -1], [0, 1, 0]], dtype=np.float32)
shift_left = cv2.warpAffine(magnitude, M, dsize)
M = np.array([[1, 0, 1], [0, 1, 0]], dtype=np.float32)
shift_right = cv2.warpAffine(magnitude, M, dsize)
M = np.array([[1, 0, 0], [0, 1, -1]], dtype=np.float32)
shift_up = cv2.warpAffine(magnitude, M, dsize)
M = np.array([[1, 0, 0], [0, 1, 1]], dtype=np.float32)
shift_down = cv2.warpAffine(magnitude, M, dsize)
M = np.array([[1, 0, 1], [0, 1, 1]], dtype=np.float32)
shift_right_down = cv2.warpAffine(magnitude, M, dsize)
M = np.array([[1, 0, -1], [0, 1, -1]], dtype=np.float32)
shift_left_up = cv2.warpAffine(magnitude, M, dsize)
M = np.array([[1, 0, 1], [0, 1, -1]], dtype=np.float32)
shift_right_up = cv2.warpAffine(magnitude, M, dsize)
M = np.array([[1, 0, -1], [0, 1, 1]], dtype=np.float32)
shift_left_down = cv2.warpAffine(magnitude, M, dsize)
shift_left_right_max = cv2.max(shift_left, shift_right)
shift_up_down_max = cv2.max(shift_up, shift_down)
shift_rd_lu_max = cv2.max(shift_right_down, shift_left_up)
shift_ru_lf_max = cv2.max(shift_right_up, shift_left_down)
magnitude[angle_0_mask] *= (magnitude[angle_0_mask] >= shift_left_right_max[angle_0_mask])
magnitude[angle_45_mask] *= (magnitude[angle_45_mask] >= shift_rd_lu_max[angle_45_mask])
magnitude[angle_90_mask] *= (magnitude[angle_90_mask] >= shift_up_down_max[angle_90_mask])
magnitude[angle_135_mask] *= (magnitude[angle_135_mask] >= shift_ru_lf_max[angle_135_mask])
strong_edge = ((magnitude > thresh2) * 255).astype('uint8')
full_edge = ((magnitude > thresh1) * 255).astype('uint8')
optimized_edge = geodesicDilation(strong_edge, full_edge, kernel=np.ones((3, 3)))
return cv2.convertScaleAbs(src=optimized_edge)
def Canny(src, thresh1, thresh2, imp='for_loop'):
assert imp in ('for_loop', 'maxtrix_parallel', 'opencv')
if 'for_loop' == imp:
return Canny_for_loop_imp(src, thresh1, thresh2)
if 'maxtrix_parallel' == imp:
return Canny_maxtrix_parallel_imp(src, thresh1, thresh2)
if 'opencv' == imp:
return cv2.Canny(src, thresh1, thresh2, L2gradient=True)
if __name__ == '__main__':
src = cv2.imread('./data/4.tif', 0)
blur_img = cv2.GaussianBlur(src, ksize=(11, 11), sigmaX=0)
t1 = time.time()
res = Canny(src=blur_img, thresh1=20, thresh2=40, imp='maxtrix_parallel')
t2 = time.time()
res2 = Canny(src=blur_img, thresh1=20, thresh2=40, imp='opencv')
t3 = time.time()
print('python imp: ', t2 - t1)
print('opencv imp: ', t3 - t2)
cv2.namedWindow("src", cv2.WINDOW_NORMAL)
cv2.namedWindow("res", cv2.WINDOW_NORMAL)
cv2.namedWindow("res2", cv2.WINDOW_NORMAL)
cv2.imshow("src", src)
cv2.imshow("res", res)
cv2.imshow("res2", res2)
cv2.waitKey()
| 2.21875 | 2 |
feeds/view/_utils.py | qq20004604/Mail-Report-System | 1 | 12768921 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from ..models import *
from django.utils import timezone
from django.db.models import Q
from user.models import UserModel
from mail_pusher.models import PuberMailPushHistory
from mail_pusher.utils import send_feeds_mail
from package.simple_log import log
from package.get_time import get_date_time
from config.switch import USER_FEED_SEND_LIMIT
from config.variable import USER_FEED_SEND_LIMIT_COUNT
def get_sendtime_cn(sendtime):
kv = {
'delay': '未要求推送',
'plan': '推送中',
'done': '推送完毕',
'fail': '推送失败'
}
if sendtime in kv:
return kv[sendtime]
else:
return '未知状态'
# 获取用户权限的中文名
def get_user_permission_cn(user_permission):
kv = {
'01': '普通用户(只能接收邮件)',
'02': '高级用户(可以接收邮件和创建up主账号)',
'10': '管理员',
'11': '超级管理员',
}
if user_permission in kv:
return kv[user_permission]
else:
return '未知用户'
# 能否创建 up 主账号。其实应该做个权限表的,但是我懒
def can_create_uploader(user_permission):
kv = {
'01': False,
'02': True,
'10': False,
'11': False,
}
if user_permission in kv:
return kv[user_permission]
else:
return False
# 获取推送方式的中文版
def get_action_type_cn(action_type):
kv = {
'01': '网页推送',
'02': '密钥推送'
}
return kv[action_type]
# 获取订阅类型的中文版
def get_sub_type_cn(sub_type):
kv = {
'01': '邮件订阅',
'02': 'QQ订阅'
}
if sub_type in kv:
return kv[sub_type]
else:
return '未知订阅模式'
# 返回该用户信息
def get_msgpub_info(mp_info, has_key=False):
d = {
'id': mp_info.id,
'user_id': mp_info.user_id,
'create_time': mp_info.create_time.strftime('%Y-%m-%d %H:%M:%S'),
'last_pub_time': mp_info.last_pub_time.strftime('%Y-%m-%d %H:%M:%S'),
'name': mp_info.name,
'default_allow_rec': mp_info.default_allow_rec,
'allow_sub': mp_info.allow_sub
}
if has_key is True:
d['secret_key'] = mp_info.secret_key
return d
# 返回该用户是否已订阅
def had_user_sub(request, upid):
# 1. 先拿到用户id——没拿到就直接返回False
# 2. 用户id + up主ID 去查 SubscribeFeeds 表
# 2.1 没数据,返回False
# 2.2 有数据,但 is_active 为 True ,返回 True
# 3. 返回 False
user_id = request.session.get('id', None) # 当前用户
if user_id is None:
return False
user_id = int(user_id)
sf_info = SubscribeFeeds.objects.filter(uploader_id=upid, user_id=user_id)
if len(sf_info) == 0:
return False
sf_info = sf_info[0]
if sf_info.is_active is True:
return True
else:
return False
# 用户当前是否可以推送新消息
# 可以推送返回 True,否则返回 错误提示信息
def is_user_can_pub_feed(user_id):
# 流程分析
# 1. 查User表,拿到权限,然后调用 can_create_uploader 查看是否能推送消息(能创建up主账号的就能推送消息)
# 2. 理论上应该区分更多用户权限,比如某些用户每日只能推送一个,某些用户可以推送多个之类的。
# 3. 之所以查一次表,是为了在后期管理员可以禁止某些用户推送邮件
# 4. 【特殊限制】每个用户,多个up主账号,加起来每日最多只能推送 3 次消息
user_info = UserModel.objects.filter(id=user_id)[0]
can_create = can_create_uploader(user_info.user_permission)
# 不能则返回
if can_create is False:
return '你被禁止推送新消息,请联系管理员'
# 如果限制开关没有打开,则此时返回允许推送新消息
if USER_FEED_SEND_LIMIT is False:
return True
# 拿到时间
d = timezone.now()
# 筛选当前用户,今日,推送中或推送成功
feeds_list = Feeds.objects. \
filter(user_id=user_id). \
filter(Q(sendtime='plan') | Q(sendtime='done')). \
filter(pub_time__year=d.year,
pub_time__month=d.month,
pub_time__day=d.day)
# 查看数量是否超过上限
if len(feeds_list) >= USER_FEED_SEND_LIMIT_COUNT:
return '你今日推送邮件已达到上限:%s' % USER_FEED_SEND_LIMIT_COUNT
else:
return True
# 立即推送消息
def feed_send_now(feed, mp_data, title=''):
# 是立即推送
feed.set_plan()
# 调用邮件推送的 api
push_result = push_mail(feed_id=feed.id, title=title)
# 推送成功
if push_result['code'] == 200:
# 设置为已推送
feed.set_sended()
feed.save()
# 更新该up主最近推送时间
mp_data.set_last_pub()
mp_data.save()
return push_result
else:
# 失败
# 设置本条消息为推送失败
feed.set_send_failed()
feed.save()
return push_result
# 推送邮件给订阅者
# 推送失败则返回 False
def push_mail(feed_id, title=''):
# 流程
# 1. 根据 feed_id 查 Feeds 表拿到 user_id,uploader_id,pub_time,content。
# 2. 用 uploader_id 查 MsgPuber 表拿到 name
# 3. 用 uploader_id 查 SubscribeFeeds 表,条件为 is_active=True,sub_type='01',
# 拿到所有订阅该 up 主 的 user_id
# 4. 根据 user_id 查 User 表,拿到该用户的邮箱(没有邮箱则被忽略)
# 5. 用邮箱组成一个 list,作为邮件接受者
# 6. 用 name,pub_time,content 等拼装邮件的 content
# 7. 推送邮件,拿到推送结果(假如有一个邮件地址是错的,那么所有的都不会被推送)
# (不过理论上不可能,因为用户是需要收取邮件验证码后才能注册成功的)。
# 8. 并写下日志,返回推送结果
# 1
feed = Feeds.objects.filter(id=feed_id)[0]
mail_info = {
'user_id': feed.user_id,
'uploader_id': feed.uploader_id,
'pub_time': feed.pub_time.strftime('%Y-%m-%d %H:%M:%S'),
'content': feed.content,
'name': None
}
# 2
puber_info = MsgPuber.objects.filter(id=mail_info['uploader_id'])[0]
mail_info['name'] = puber_info.name
# 3 找到订阅者列表,(订阅者订阅中 + 当前模式是邮件订阅)
suber_list = SubscribeFeeds.objects.filter(uploader_id=mail_info['uploader_id'],
is_active=True,
puber_allow_rec=True,
sub_type='01')
# 如果没有人订阅,直接返回 0 表示推送成功,但是推送了 0 个订阅者
if len(suber_list) == 0:
return {
'code': 200,
'msg': '你成功推送了消息给 0 位邮件订阅者'
}
suber_id_list = [
item.user_id for item in suber_list
]
# 5 搞一个接受这次推送的邮件列表
suber_mail_list = []
for user_id in suber_id_list:
user_info = UserModel.objects.filter(id=user_id)[0]
if user_info.email:
suber_mail_list.append(user_info.email)
# 6 拼装邮件内容
content = [
# up主这里应该给个链接
'<a href="http://report.lovelovewall.com/?upid=%s">UP主:%s</a>' % (
mail_info['uploader_id'],
mail_info['name']
),
'<b>消息推送时间:</b>',
mail_info['pub_time'],
'<b>消息推送类型:</b>',
'立即推送',
'<b>内容:</b>',
mail_info['content']
]
send_result = send_feeds_mail(receiver_list=suber_mail_list,
title='%s。UP主:%s' % (title, mail_info['name']),
content=content)
is_success = False
if send_result.code == 200:
is_success = True
else:
# 如果推送失败,则写一个失败日志
with open('log/push_mail_fail.log', 'a')as f:
f.write('time:%s||receiver:%s||feed_id:%s||fail_reason:%s\n' % (
mail_info['pub_time'],
str(suber_mail_list),
feed_id,
send_result.msg
))
# 根据返回信息写入邮件推送记录
for mail in suber_mail_list:
PuberMailPushHistory.objects.create(rec_email=mail,
uploader_id=mail_info['uploader_id'],
feed_id=feed_id,
pub_time=mail_info['pub_time'],
success=is_success).save()
if is_success is False:
return {
'code': 0,
'msg': send_result.msg
}
else:
return {
'code': 200,
'msg': '你成功推送了消息给 %s 位邮件订阅者' % len(suber_mail_list)
}
# 检查当前用户是否能创建up主账号
def is_user_can_create_uploader(user_id, uploader_name):
user_info = UserModel.objects.filter(id=user_id)
# 其实应该不可能等于0
if len(user_info) == 0:
return '未知错误'
user_info = user_info[0]
if user_info.user_permission != '02':
return '当前用户无权限创建up主账号,请联系管理员'
if len(MsgPuber.objects.filter(name=uploader_name)) > 0:
return '已有同名账号,请使用其他名字'
return True
# 创建 up 主账号
def create_uploader_account(user_id, uploader_name):
try:
mb = MsgPuber.objects.create(user_id=user_id,
name=uploader_name)
mb.make_secret_key()
mb.save()
except BaseException as e:
log('%s:创建用户账号失败,user_id=%s,uploader_name=%s,报错信息:%s' % (
get_date_time(),
user_id,
uploader_name,
str(e))
)
return False
return True
| 1.992188 | 2 |
MuSCADeT/MCA.py | aymgal/MuSCADeT | 0 | 12768922 | """@package MuSCADeT
"""
from scipy import signal as scp
import numpy as np
import matplotlib.pyplot as plt
import astropy.io.fits as pf
import scipy.ndimage.filters as med
import MuSCADeT.pca_ring_spectrum as pcas
import MuSCADeT.wave_transform as mw
NOISE_TAB = np.array([ 0.8907963 , 0.20066385, 0.08550751, 0.04121745, 0.02042497,
0.01018976, 0.00504662, 0.00368314])
NOISE_TAB_2G = np.array([ 0.94288346, 0.22998949, 0.10029194, 0.04860995, 0.02412084,
0.01498695])
def mMCA(img, A,kmax, niter,mode = 'PCA', PCA = [2,40], harder = 0, pos = False,threshmode = 'mom',lvl = 0, PSF = None,
soft = False, reweighting = 'none', alpha = [0,0], npca = 64, mask = [0,0], plot = False, noise_map = [0,0],
newwave=1):
"""
mMCA runs the MuSCADeT algorithm over a cube of multi-band images.
INPUTS:
img: multiband cube with size nbxn1xn2 where nb is the number of bands and n1xn2,
the size of the images
A: the mixing matrix. if mode is set to 'PCA', A will be ignored and can be set to 0
kmax: detection threshold in units of noise standard deviation usually chosen between 3 and 5
niter: number of iterations of the MuSCADeT algorithm
OUTPUTS:
S: extracted sources
A: mixing matrix, either given by the user or estimate by PCA with option mode ='PCA'
alpha: angles in PCA space to identify pixels with same SEDs
OPTIONS:
mode: if set to 'PCA', the mixing matrix A will be estimated from PCA decomposition of the SEDs
PCA: parameters for PCA sensitivity. if mode is set to 'PCA', the PCA estimator will take PCA[0]
as the number of sources to be extracted and PCA[1] as a sensitivity parameter to discriminate between
source. Values betwee 5 and 30 are usually recommended
harder: if set to 1,
pos: if set to True, the output of the hard thresholding procedure is constrined to be positive
threshmode: if set to 'mom', adaptive method of moments is used at every iteration to decrease the threshold
lvl: number of wavelet levels to use in the decompositions, default is 6.
soft: if set to True, soft thresholding is used
alpha: angles in degrees to feed the PCA finder. If set, the PCA finder will use pixels along the directions pointed by these angles in PCA space to estimate SED
That option is particularly useful if automated PCA fails at clearly identifying different SEDs. This happens in case of high degrees of blending.
mask: if parts of the band images images are to be masked (e.g. stars in the FOV), the user can provide a mask with size n1xn2
with all pixels at one except for the masked pixels that should be set to 0.
npca: number of pixels in which images are downsampled to perform a fast PCA.
plot: set to true to display PCA coefficients of the SEDs. Set to False for automated mode
EXAMPLE:
S,A = wine.MCA.mMCA(cube, A, 5,10, PCA=[2,80], mode=pca, harder = 1)
"""
nb, n1, n2 = np.shape(img)
if lvl == 0:
lvl = int(np.log2(n1))
print("using lvl (including coarse scale !)", lvl)
if np.sum(mask) == 0:
mask = np.ones((n1,n2))
img = np.multiply(img,mask)
print("mode", mode)
if mode == 'PCA':
Apca = PCA_initialise(img.T, PCA[0], angle = PCA[1], alpha = alpha, npca = npca, plot = plot, newwave=newwave)
Apca = np.multiply(Apca,[1./np.sum(Apca,0)])
A = Apca
nb,ns = np.shape(A)
X = np.zeros((ns,n1*n2))
A = np.multiply(A,[1./np.sum(A,0)])
AT = A.T
mu = 2. / linorm(A, 10)
Y = np.reshape(img,(nb,n1*n2))
Ri = np.dot(AT,Y)
sigma_y = np.zeros(nb)
for i in range(nb):
sigma_y[i] = MAD(np.reshape(Y[i,:],(n1,n2)))
if PSF is not None:
PSFT = np.copy(PSF)
for ind in range(nb):
PSFT[ind,:,:] = PSF[ind,:,:].T
def PSF_apply(x):
y = np.copy(x)*0
for i in range(nb):
y[i,:,:] = scp.fftconvolve(x[i,:,:],PSF[i,:,:],mode = 'same')
return y
def PSFT_apply(x):
y = np.copy(x)*0
for i in range(nb):
y[i,:,:] = scp.fftconvolve(x[i,:,:],PSFT[i,:,:],mode = 'same')
return y
for i in range(nb):
sigma_y[i] = sigma_y[i]*np.sqrt(np.sum(PSFT[i,:,:]**2))
sigma = np.zeros(ns)
for i in range(ns):
sigma[i] = np.sqrt(np.sum( (AT[i,:]**2)*(sigma_y**2)))
kmas = MOM(np.reshape(Ri,(ns,n1,n1)),sigma,lvl,newwave)#15#np.max(np.dot(1/(mu*np.dot(AT,Y),1),mu*np.dot(AT,Y)))
print(kmas)
step = (kmas-kmax)/(niter-5)
k = kmas
################FOR PLOT#############
th = np.ones((lvl,n1,n2))
th0 = np.multiply(th.T, NOISE_TAB[:lvl]).T * sigma[0]
th1 = np.multiply(th.T, NOISE_TAB[:lvl]).T * sigma[1]
per= np.zeros((ns,niter))
w = np.zeros((ns,lvl,n1,n2))
wmap = np.zeros((ns,lvl,n1,n2))
S = np.zeros((ns,n1*n2))
thmap = np.zeros((ns,lvl,n1,n2))
ks = np.zeros(niter)
sub = 0
reweight = 0
weight2 = 1
if np.sum(noise_map) != 0:
sig_map = np.dot(AT,np.reshape(noise_map,(nb,n1*n2)))
sigma = np.reshape(sig_map,(ns,n1,n2))
for i in range(niter):
if i % 10 == 0:
print(i)
AX = np.dot(A,X)
if PSF is not None:
AX = PSF_apply(AX.reshape((nb,n1,n2))).reshape((nb,n1*n2))
R = mu*np.dot(AT, PSFT_apply(np.reshape(Y-AX,(nb,n1,n2))).reshape(nb,n1*n2))
else:
R = mu*np.dot(AT, Y-AX)
X = np.real(X+R)
S = X
if threshmode == 'mom':
kmas = MOM(np.reshape(R,(ns,n1,n2)),sigma,lvl=lvl)
threshmom = np.max([kmas,kmax])
if threshmom < k:
k = threshmom
step = ((k-kmax)/(niter-i-6))
print('threshold from MOM',threshmom)
for j in range(ns):
kthr = np.max([kmax, k])
Sj,wmap = mr_filter(np.reshape(S[j,:],(n1,n2)),20,kthr,sigma[j],harder = harder, lvl = lvl,pos = pos,soft = soft, newwave=newwave)
S[j,:] = np.reshape(Sj,(n1*n2))
X = np.multiply(S,np.reshape(mask,(n1*n2)))
a = 1
ks[i] = kthr
k = k-step
S = np.reshape(S,(ns,n1,n2))
plt.plot(ks, linewidth = 5)
plt.xlabel('Iterations', fontsize=30)
plt.ylabel('k', fontsize=30)
plt.title('k = f(it)', fontsize = 50)
plt.show()
return S,A
def MOM(R, sigma, lvl=6 , newwave=1):
"""
Estimates the best for a threshold from method of moments
INPUTS:
R: multi-sources cube with size nsxn1xn2 where ns is the number of sources
and n1xn2, the size of an image
sigma: noise standard deviation
OUTPUTS:
k: threshold level
OPTIONS:
lvl: number of wavelet levels used in the decomposition, default is 6.
EXAMPLES
"""
ns,n1,n2 = np.shape(R)
wmax = np.zeros((ns))
wm = np.zeros((ns,lvl))
w = np.zeros((ns,lvl,n1,n2))
for j in range(ns):
w[j,:,:,:], _ = mw.wave_transform(R[j,:,:],lvl, newwave=newwave, verbose=False)
for j in range(ns):
for l in range(lvl-1):
wm[j,l] = np.max(np.abs(w[j,l,:,:]))/NOISE_TAB[l]
wmax[j] = np.max(wm[j,:])
wmax[j] = wmax[j]/np.mean(sigma[j])
k = np.min(wmax)+(np.max(wmax)-np.min(wmax))/100
return k
def MM(R, sigma, lvl=6, newwave=1):
n1,n2 = np.shape(R)
wm = np.zeros((lvl))
w = np.zeros((lvl,n1,n2))
w[:,:,:], _ = mw.wave_transform(R,lvl, newwave=newwave, verbose=False)
for l in range(lvl-1):
wm[l] = np.max(np.abs(w[l,:,:]))/NOISE_TAB[l]
wmax = np.max(wm)/sigma
k = (wmax)-(wmax)/100
return k
def MAD(x):
"""
Estimates noise level in an image from Median Absolute Deviation
INPUTS:
x: image
OUTPUTS:
sigma: noise standard deviation
EXAMPLES
"""
meda = med.median_filter(x,size = (3,3))
medfil = np.abs(x-meda)
sh = np.shape(x)
sigma = 1.48*np.median((medfil))
return sigma
def mr_filter(img, niter, k, sigma,lvl = 6, pos = False, harder = 0,mulweight = 1, subweight = 0, addweight = 0, soft = False, newwave=1):
"""
Computes wavelet iterative filtering on an image.
INPUTS:
img: image to be filtered
niter: number of iterations (10 is usually recommended)
k: threshold level in units of sigma
sigma: noise standard deviation
OUTPUTS:
imnew: filtered image
wmap: weight map
OPTIONS:
lvl: number of wavelet levels used in the decomposition, default is 6.
pos: if set to True, positivity constrain is applied to the output image
harder: if set to one, threshold levels are risen. This is used to compensate for correlated noise
for instance
mulweight: multiplicative weight (default is 1)
subweight: weight map derived from other sources applied to diminish the impact of a given set of coefficient (default is 0)
addweight: weight map used to enhance previously detected features in an iterative process (default is 0)
soft: if set to True, soft thresholding is used
EXAMPLES
"""
shim = np.shape(img)
n1 = shim[0]
n2 = shim[1]
M = np.zeros((lvl,n1,n2))
M[-1,:,:] = 1
th = np.ones_like(M) * k
##A garder
th[0,:,:] = k+1
####################
th = np.multiply(th.T, NOISE_TAB[:lvl]).T * sigma
th[np.where(th<0)] = 0
th[-1,:,:] = 0
imnew = 0
i = 0
R = img
# here, always 1st gen transform (apparently better ?)
alpha, _ = mw.wave_transform(R, lvl, newwave=0, verbose=False)
if pos == True :
M[np.where(alpha-np.abs(addweight)+np.abs(subweight)-np.abs(th)*mulweight > 0)] = 1
else:
M[np.where(np.abs(alpha)-np.abs(addweight)+np.abs(subweight)-np.abs(th)*mulweight > 0)] = 1
while i < niter:
R = img-imnew
alpha, pysap_transform = mw.wave_transform(R,lvl, newwave=newwave, verbose=False)
if soft == True and i>0:
alpha= np.sign(alpha)*(np.abs(alpha)-np.abs(addweight)+np.abs(subweight)-(th*mulweight))
Rnew = mw.iuwt(M*alpha, newwave=newwave, convol2d=0,
pysap_transform=pysap_transform, verbose=False)
imnew = imnew+Rnew
imnew[(imnew < 0)] = 0
i = i+1
wmap, _ = mw.wave_transform(imnew,lvl, newwave=newwave, verbose=False)
return imnew,wmap
def linorm(A,nit):
"""
Estimates the maximal eigen value of a matrix A
INPUTS:
A: matrix
nit: number of iterations
OUTPUTS:
xn: maximal eigen value
EXAMPLES
"""
ns,nb = np.shape(A)
x0 = np.random.rand(nb)
x0 = x0/np.sqrt(np.sum(x0**2))
for i in range(nit):
x = np.dot(A,x0)
xn = np.sqrt(np.sum(x**2))
xp = x/xn
y = np.dot(A.T,xp)
yn = np.sqrt(np.sum(y**2))
if yn < np.dot(y.T,x0) :
break
x0 = y/yn
return xn
def PCA_initialise(cube, ns, angle = 15,npca = 32, alpha = [0,0], plot = 0, newwave=1):
"""
Estimates the mixing matrix of of two sources in a multi band set of images
INPUTS:
cube: multi-band cube from which to extract mixing coefficients
ns: number of mixed sources
OUTPUTS:
A0: mixing matrix
OPTIONS:
angle: sensitivity parameter. The angular resolution at which the algorithm has to look for PCA coefficients clustering
npca: square root of the number of pixels to be used. Since too big images result in too big computation time
we propose to downsample the image in order to get reasonable calculation time
EXAMPLES
"""
n,n,nband = np.shape(cube)
cubep = cube+0.
lvl = int(np.log2(n))
s = np.zeros(nband)
for i in range(nband):
s[i] = MAD(cube[:,:,i])
cubep[:,:,i] = mr_filter(cube[:,:,i],10,3,s[i],harder = 0, lvl=lvl, newwave=newwave)[0]
cubepca = np.zeros((np.min([n,npca]),np.min([n,npca]),nband))
xk, yk = np.where(cubepca[:,:,0]==0)
cubepca[xk, yk, :] = cubep[xk*int(n/npca), yk*int(n/npca), :]
lines = np.reshape(cubep,(n**2, nband))
alphas, basis, sig= pcas.pca_ring_spectrum(cubepca[:,:,:].T,std = s)
ims0 = pcas.pca_lines(alphas,sig,angle, ns, alpha0 = alpha, plot = plot)
vals = np.array(list(set(np.reshape(ims0,(npca*npca)))))
vals = vals[np.where(vals>=0)]
nsp = np.size(vals)
spectras = np.ones([ns, nband])
rank = nsp
S_prior = np.zeros((n,n,np.size(vals)))
xs,ys = np.where(S_prior[:,:,0]==0)
count = 0
for k in vals:
x,y = np.where(ims0 == k)
im = np.zeros((npca, npca))
im[x,y] = 1
S_prior[xs,ys,count] = im[np.int_(xs*(npca/n)), np.int_(ys*(npca/n))]#/(k+1)
vecube = np.reshape(cubepca,(nband,npca*npca))
######Essai norm#####
xcol,ycol=np.where(ims0==k)
specs = np.reshape(cubepca[xcol,ycol,:],(len(xcol),nband))
s1 =np.multiply(np.mean(specs,0),
1/np.sum(np.reshape(cubepca,(npca**2,nband),0)))
spectras[count,:]=s1/np.sum(s1,0)
S_prior[:,:,count] = S_prior[:,:,count]*np.dot(cube,spectras[count,:])
count = count+1
S0 = np.reshape(S_prior[:,:,::-1],(ns,n*n))
A0 = spectras.T
return A0
| 2.421875 | 2 |
xiaomi_thermo_unified/sensors/uuids.py | h4/xiaomi_thermo_unified | 1 | 12768923 | DEVICE_NAME = '00002a00-0000-1000-8000-00805f9b34fb'
MODEL_NUMBER = '00002a24-0000-1000-8000-00805f9b34fb'
SERIAL_NUMBER = '00002a25-0000-1000-8000-00805f9b34fb'
FIRMWARE_VERSION = '00002a26-0000-1000-8000-00805f9b34fb'
HARDWARE_VERSION = '00002a27-0000-1000-8000-00805f9b34fb'
MANUFACTURER_NAME = '00002a29-0000-1000-8000-00805f9b34fb'
LYWSD02_DATA = 'EBE0CCC1-7A0A-4B0C-8A1A-6FF2997DA3A6'
CGG_DATA = '00000100-0000-1000-8000-00805f9b34fb'
MJHT_DATA = '00000001-0000-1000-8000-00805f9b34fb'
MJHT_BATTERY = '00002a19-0000-1000-8000-00805f9b34fb'
| 1.132813 | 1 |
xarray_sentinel/sentinel1.py | shaorenshengg/bopene | 0 | 12768924 | import os
import typing as T
import warnings
import fsspec # type: ignore
import numpy as np
import numpy.typing as NT
import pandas as pd # type: ignore
import rioxarray # type: ignore
import xarray as xr
from xarray_sentinel import conventions, esa_safe
def open_calibration_dataset(calibration: esa_safe.PathType) -> xr.Dataset:
calibration_vectors = esa_safe.parse_tag_list(
calibration, ".//calibrationVector", "calibration"
)
azimuth_time_list = []
pixel_list = []
line_list = []
sigmaNought_list = []
betaNought_list = []
gamma_list = []
dn_list = []
for vector in calibration_vectors:
azimuth_time_list.append(vector["azimuthTime"])
line_list.append(vector["line"])
pixel = np.fromstring(vector["pixel"]["$"], dtype=int, sep=" ") # type: ignore
pixel_list.append(pixel)
sigmaNought = np.fromstring(vector["sigmaNought"]["$"], dtype=float, sep=" ") # type: ignore
sigmaNought_list.append(sigmaNought)
betaNought = np.fromstring(vector["betaNought"]["$"], dtype=float, sep=" ") # type: ignore
betaNought_list.append(betaNought)
gamma = np.fromstring(vector["gamma"]["$"], dtype=float, sep=" ") # type: ignore
gamma_list.append(gamma)
dn = np.fromstring(vector["dn"]["$"], dtype=float, sep=" ") # type: ignore
dn_list.append(dn)
pixel = np.array(pixel_list)
if not np.allclose(pixel, pixel[0]):
raise ValueError(
"Unable to organise calibration vectors in a regular line-pixel grid"
)
data_vars = {
"azimuth_time": ("line", [np.datetime64(dt) for dt in azimuth_time_list]),
"sigmaNought": (("line", "pixel"), sigmaNought_list),
"betaNought": (("line", "pixel"), betaNought_list),
"gamma": (("line", "pixel"), gamma_list),
"dn": (("line", "pixel"), dn_list),
}
coords = {"line": line_list, "pixel": pixel_list[0]}
return xr.Dataset(data_vars=data_vars, coords=coords)
def open_coordinateConversion_dataset(annotation_path: esa_safe.PathType) -> xr.Dataset:
coordinate_conversion = esa_safe.parse_tag(
annotation_path, ".//coordinateConversionList"
)
gr0 = []
sr0 = []
azimuth_time = []
slant_range_time = []
srgrCoefficients: T.List[NT.NDArray[np.float_]] = []
grsrCoefficients: T.List[NT.NDArray[np.float_]] = []
for values in coordinate_conversion["coordinateConversion"]:
sr0.append(values["sr0"])
gr0.append(values["gr0"])
azimuth_time.append(values["azimuthTime"])
slant_range_time.append(values["slantRangeTime"])
srgrCoefficients.append(
np.fromstring(values["srgrCoefficients"]["$"], dtype=float, sep=" ")
)
grsrCoefficients.append(
np.fromstring(values["grsrCoefficients"]["$"], dtype=float, sep=" ")
)
coords = {
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"degree": list(range(len(srgrCoefficients[0]))),
}
data_vars = {
"gr0": ("azimuth_time", gr0),
"sr0": ("azimuth_time", sr0),
"slant_range_time": ("azimuth_time", slant_range_time),
"srgr_coefficients": (("azimuth_time", "degree"), srgrCoefficients),
"grsr_coefficients": (("azimuth_time", "degree"), grsrCoefficients),
}
return xr.Dataset(data_vars=data_vars, coords=coords)
def get_fs_path(
urlpath_or_path: esa_safe.PathType, fs: T.Optional[fsspec.AbstractFileSystem] = None
) -> T.Tuple[fsspec.AbstractFileSystem, str]:
if fs is None:
fs, _, paths = fsspec.get_fs_token_paths(urlpath_or_path)
if len(paths) == 0:
raise ValueError(f"file or object not found {urlpath_or_path!r}")
elif len(paths) > 1:
raise ValueError(f"multiple files or objects found {urlpath_or_path!r}")
path = paths[0]
else:
path = urlpath_or_path
return fs, path
def open_gcp_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
geolocation_grid_points = esa_safe.parse_tag_list(
annotation, ".//geolocationGridPoint"
)
azimuth_time = []
slant_range_time = []
line_set = set()
pixel_set = set()
for ggp in geolocation_grid_points:
if ggp["line"] not in line_set:
azimuth_time.append(np.datetime64(ggp["azimuthTime"]))
line_set.add(ggp["line"])
if ggp["pixel"] not in pixel_set:
slant_range_time.append(ggp["slantRangeTime"])
pixel_set.add(ggp["pixel"])
shape = (len(azimuth_time), len(slant_range_time))
dims = ("azimuth_time", "slant_range_time")
data_vars = {
"latitude": (dims, np.full(shape, np.nan)),
"longitude": (dims, np.full(shape, np.nan)),
"height": (dims, np.full(shape, np.nan)),
"incidenceAngle": (dims, np.full(shape, np.nan)),
"elevationAngle": (dims, np.full(shape, np.nan)),
}
line = sorted(line_set)
pixel = sorted(pixel_set)
for ggp in geolocation_grid_points:
for var in data_vars:
j = line.index(ggp["line"])
i = pixel.index(ggp["pixel"])
data_vars[var][1][j, i] = ggp[var]
ds = xr.Dataset(
data_vars=data_vars,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"slant_range_time": slant_range_time,
"line": ("azimuth_time", line),
"pixel": ("slant_range_time", pixel),
},
)
return ds
def open_attitude_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
attitudes = esa_safe.parse_tag_list(annotation, ".//attitude")
variables = ["q0", "q1", "q2", "q3", "wx", "wy", "wz", "pitch", "roll", "yaw"]
azimuth_time: T.List[T.Any] = []
data_vars: T.Dict[str, T.Any] = {var: ("azimuth_time", []) for var in variables}
for attitude in attitudes:
azimuth_time.append(attitude["time"])
for var in variables:
data_vars[var][1].append(attitude[var])
ds = xr.Dataset(
data_vars=data_vars,
coords={"azimuth_time": [np.datetime64(dt) for dt in azimuth_time]},
)
return ds
def open_orbit_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
orbits = esa_safe.parse_tag_list(annotation, ".//orbit")
reference_system = orbits[0]["frame"]
variables = ["position", "velocity"]
data: T.Dict[str, T.List[T.Any]] = {var: [[], [], []] for var in variables}
azimuth_time: T.List[T.Any] = []
for orbit in orbits:
azimuth_time.append(orbit["time"])
data["position"][0].append(orbit["position"]["x"])
data["position"][1].append(orbit["position"]["y"])
data["position"][2].append(orbit["position"]["z"])
data["velocity"][0].append(orbit["velocity"]["x"])
data["velocity"][1].append(orbit["velocity"]["y"])
data["velocity"][2].append(orbit["velocity"]["z"])
if orbit["frame"] != reference_system:
warnings.warn(
"reference_system is not consistent in all the state vectors. "
)
reference_system = None
position = xr.Variable(data=data["position"], dims=("axis", "azimuth_time")) # type: ignore
velocity = xr.Variable(data=data["velocity"], dims=("axis", "azimuth_time")) # type: ignore
attrs = {}
if reference_system is not None:
attrs.update({"reference_system": reference_system})
ds = xr.Dataset(
data_vars={"position": position, "velocity": velocity},
attrs=attrs,
coords={
"azimuth_time": [np.datetime64(dt) for dt in azimuth_time],
"axis": [0, 1, 2],
},
)
return ds
def open_dc_estimate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
dc_estimates = esa_safe.parse_dc_estimate(annotation)
azimuth_time = []
t0 = []
data_dc_poly = []
for dc_estimate in dc_estimates:
azimuth_time.append(dc_estimate["azimuthTime"])
t0.append(dc_estimate["t0"])
data_dc_poly.append(dc_estimate["dataDcPolynomial"])
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"data_dc_polynomial": (("azimuth_time", "degree"), data_dc_poly),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(data_dc_poly[0]))),
},
)
return ds
def open_azimuth_fm_rate_dataset(annotation: esa_safe.PathOrFileType) -> xr.Dataset:
azimuth_fm_rates = esa_safe.parse_azimuth_fm_rate(annotation)
azimuth_time = []
t0 = []
azimuth_fm_rate_poly = []
for azimuth_fm_rate in azimuth_fm_rates:
azimuth_time.append(azimuth_fm_rate["azimuthTime"])
t0.append(azimuth_fm_rate["t0"])
azimuth_fm_rate_poly.append(azimuth_fm_rate["azimuthFmRatePolynomial"])
ds = xr.Dataset(
data_vars={
"t0": ("azimuth_time", t0),
"azimuth_fm_rate_polynomial": (
("azimuth_time", "degree"),
azimuth_fm_rate_poly,
),
},
coords={
"azimuth_time": [np.datetime64(at) for at in azimuth_time],
"degree": list(range(len(azimuth_fm_rate_poly[0]))),
},
)
return ds
def find_avalable_groups(
ancillary_data_paths: T.Dict[str, T.Dict[str, T.Dict[str, str]]],
product_attrs: T.Dict[str, T.Any],
fs: fsspec.AbstractFileSystem = fsspec.filesystem("file"),
) -> T.Dict[str, str]:
groups: T.Dict[str, str] = {}
for subswath_id, subswath_data_path in ancillary_data_paths.items():
for pol_id, pol_data_paths in subswath_data_path.items():
try:
with fs.open(pol_data_paths["s1Level1ProductSchema"]):
pass
except FileNotFoundError:
continue
groups[subswath_id] = ""
groups[f"{subswath_id}/{pol_id}"] = pol_data_paths["s1Level1ProductSchema"]
for metadata_group in [
"gcp",
"orbit",
"attitude",
"dc_estimate",
"azimuth_fm_rate",
]:
groups[f"{subswath_id}/{pol_id}/{metadata_group}"] = pol_data_paths[
"s1Level1ProductSchema"
]
try:
with fs.open(pol_data_paths["s1Level1CalibrationSchema"]):
pass
except FileNotFoundError:
continue
groups[f"{subswath_id}/{pol_id}/calibration"] = pol_data_paths[
"s1Level1CalibrationSchema"
]
return groups
def open_pol_dataset(
measurement: esa_safe.PathType,
annotation: esa_safe.PathType,
chunks: T.Optional[T.Union[int, T.Dict[str, int]]] = None,
) -> xr.Dataset:
image_information = esa_safe.parse_tag(annotation, ".//imageInformation")
product_information = esa_safe.parse_tag(annotation, ".//productInformation")
swath_timing = esa_safe.parse_tag(annotation, ".//swathTiming")
number_of_samples = image_information["numberOfSamples"]
first_slant_range_time = image_information["slantRangeTime"]
slant_range_sampling = 1 / product_information["rangeSamplingRate"]
slant_range_time = np.linspace(
first_slant_range_time,
first_slant_range_time + slant_range_sampling * (number_of_samples - 1),
number_of_samples,
)
number_of_lines = image_information["numberOfLines"]
first_azimuth_time = image_information["productFirstLineUtcTime"]
azimuth_time_interval = image_information["azimuthTimeInterval"]
azimuth_time = pd.date_range(
start=first_azimuth_time,
periods=number_of_lines,
freq=pd.to_timedelta(azimuth_time_interval, "s"),
).values
attrs = {
"azimuth_steering_rate": product_information["azimuthSteeringRate"],
"sar:center_frequency": product_information["radarFrequency"] / 10 ** 9,
}
number_of_bursts = swath_timing["burstList"]["@count"]
if number_of_bursts:
lines_per_burst = swath_timing["linesPerBurst"]
attrs.update(
{
"number_of_bursts": number_of_bursts,
"lines_per_burst": lines_per_burst,
}
)
for burst_index, burst in enumerate(swath_timing["burstList"]["burst"]):
first_azimuth_time_burst = burst["azimuthTime"]
azimuth_time_burst = pd.date_range(
start=first_azimuth_time_burst,
periods=lines_per_burst,
freq=pd.to_timedelta(azimuth_time_interval, "s"),
)
azimuth_time[
lines_per_burst * burst_index : lines_per_burst * (burst_index + 1)
] = azimuth_time_burst
if chunks is None:
chunks = {"y": lines_per_burst}
arr = rioxarray.open_rasterio(measurement, chunks=chunks)
arr = arr.squeeze("band").drop_vars(["band", "spatial_ref"])
arr = arr.rename({"y": "line", "x": "pixel"})
arr = arr.assign_coords(
{
"pixel": np.arange(0, arr["pixel"].size, dtype=int),
"line": np.arange(0, arr["line"].size, dtype=int),
"slant_range_time": ("pixel", slant_range_time),
"azimuth_time": ("line", azimuth_time),
}
)
if number_of_bursts == 0:
arr = arr.swap_dims({"line": "azimuth_time", "pixel": "slant_range_time"})
return xr.Dataset(attrs=attrs, data_vars={"measurement": arr})
def crop_burst_dataset(pol_dataset: xr.Dataset, burst_index: int) -> xr.Dataset:
if burst_index < 0 or burst_index >= pol_dataset.attrs["number_of_bursts"]:
raise IndexError(f"{burst_index=} out of bounds")
lines_per_burst = pol_dataset.attrs["lines_per_burst"]
ds = pol_dataset.sel(
line=slice(
lines_per_burst * burst_index, lines_per_burst * (burst_index + 1) - 1
)
)
ds = ds.swap_dims({"line": "azimuth_time", "pixel": "slant_range_time"})
ds.attrs["burst_index"] = burst_index
return ds
def build_burst_id(lat: float, lon: float, relative_orbit: int) -> str:
lat = int(round(lat * 10))
lon = int(round(lon * 10))
n_or_s = "N" if lat >= 0 else "S"
e_or_w = "E" if lon >= 0 else "W"
burst_id = f"R{relative_orbit:03}" f"-{n_or_s}{lat:03}" f"-{e_or_w}{lon:04}"
return burst_id
def compute_burst_centres(
gcp: xr.Dataset,
) -> T.Tuple[NT.NDArray[np.float_], NT.NDArray[np.float_]]:
gcp_rolling = gcp.rolling(azimuth_time=2, min_periods=1)
gc_az_win = gcp_rolling.construct(azimuth_time="az_win")
centre = gc_az_win.mean(["az_win", "slant_range_time"])
centre = centre.isel(azimuth_time=slice(1, None))
return centre.latitude.values, centre.longitude.values
def normalise_group(group: T.Optional[str]) -> T.Tuple[str, T.Optional[int]]:
if group is None:
group = ""
if group.startswith("/"):
group = group[1:]
burst_index = None
parent_group, _, last_name = group.rpartition("/")
if parent_group.count("/") == 1 and last_name.isnumeric():
burst_index = int(last_name)
group = parent_group
return group, burst_index
def open_dataset(
product_urlpath: esa_safe.PathType,
*,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
chunks: T.Optional[T.Union[int, T.Dict[str, int]]] = None,
fs: T.Optional[fsspec.AbstractFileSystem] = None,
) -> xr.Dataset:
group, burst_index = normalise_group(group)
absgroup = f"/{group}"
fs, manifest_path = get_fs_path(product_urlpath, fs)
if fs.isdir(manifest_path):
manifest_path = os.path.join(manifest_path, "manifest.safe")
base_path = os.path.dirname(manifest_path)
with fs.open(manifest_path) as file:
product_attrs, product_files = esa_safe.parse_manifest_sentinel1(file)
ancillary_data_paths = esa_safe.get_ancillary_data_paths(base_path, product_files)
if drop_variables is not None:
warnings.warn("'drop_variables' is currently ignored")
groups = find_avalable_groups(ancillary_data_paths, product_attrs, fs=fs)
if group != "" and group not in groups:
raise ValueError(
f"Invalid group {group!r}, please select one of the following groups:"
f"\n{list(groups.keys())}"
)
metadata = ""
if group == "":
ds = xr.Dataset()
subgroups = list(groups)
else:
subgroups = [
g[len(group) + 1 :] for g in groups if g.startswith(group) and g != group
]
if "/" not in group:
ds = xr.Dataset()
elif group.count("/") == 1:
subswath, pol = group.split("/", 1)
ds = open_pol_dataset(
ancillary_data_paths[subswath][pol]["s1Level1MeasurementSchema"],
ancillary_data_paths[subswath][pol]["s1Level1ProductSchema"],
chunks=chunks,
)
if burst_index is not None:
ds = crop_burst_dataset(ds, burst_index=burst_index)
else:
subswath, pol, metadata = group.split("/", 2)
with fs.open(groups[group]) as file:
ds = METADATA_OPENERS[metadata](file)
product_attrs["group"] = absgroup
if len(subgroups):
product_attrs["subgroups"] = subgroups
ds.attrs.update(product_attrs) # type: ignore
conventions.update_attributes(ds, group=metadata)
return ds
class Sentinel1Backend(xr.backends.common.BackendEntrypoint):
def open_dataset( # type: ignore
self,
filename_or_obj: str,
drop_variables: T.Optional[T.Tuple[str]] = None,
group: T.Optional[str] = None,
) -> xr.Dataset:
return open_dataset(filename_or_obj, drop_variables=drop_variables, group=group)
def guess_can_open(self, filename_or_obj: T.Any) -> bool:
try:
_, ext = os.path.splitext(filename_or_obj)
except TypeError:
return False
return ext.lower() in {".safe", ".safe/"}
METADATA_OPENERS = {
"gcp": open_gcp_dataset,
"orbit": open_orbit_dataset,
"attitude": open_attitude_dataset,
"dc_estimate": open_dc_estimate_dataset,
"azimuth_fm_rate": open_azimuth_fm_rate_dataset,
"calibration": open_calibration_dataset,
}
| 2.421875 | 2 |
utils/metrics.py | teddy4445/OIEI | 6 | 12768925 | """
Authors: <NAME>, <NAME>
Helper functions:
1. Overall score between, explainability and performance with normalization between 0-1 (logaritmic_power, sigmoid_power).
2. An explainability minimization (smaller is better) with additive constrains according the number of leaves and the error for the optimization.
3. Accuracy score.
"""
from sklearn.metrics import accuracy_score
from sklearn.tree import _tree
import numpy as np
import math
def get_score(y_true, y_pred):
return accuracy_score(y_true, y_pred)
def logaritmic_power(x, y):
'''
Parameters:
----------
input:
x: performance (scalar)
y: explainability (scalar)
output:
factor: Normalized overall score
----------
'''
z = 1-x
l = np.log2(y ** z)
factor = x ** l
return factor
def sigmoid_power(x, y):
'''
Parameters:
----------
input:
x: performance (scalar)
y: explainability (scalar)
output:
factor: Normalized overall score
----------
'''
sigmoid = 1/(1 + math.exp(-y))
factor = x ** sigmoid
return factor
def explainability_metric(clf, x):
'''
Parameters:
----------
input:
x: performance
clf: object of decision tree
output:
minimize: explainable (scalar)
----------
'''
size_leaf = clf.tree_.n_leaves
size_node = len([z for z in clf.tree_.feature if z != _tree.TREE_UNDEFINED])
_lambda = 1
error = (1.0 - x)
minimize = error + _lambda * size_node
return minimize | 3.1875 | 3 |
main/libusbmuxd/template.py | RoastVeg/cports | 0 | 12768926 | <reponame>RoastVeg/cports
pkgname = "libusbmuxd"
pkgver = "2.0.2"
pkgrel = 0
build_style = "gnu_configure"
hostmakedepends = ["pkgconf", "automake", "libtool"]
makedepends = ["libusb-devel", "libplist-devel"]
pkgdesc = "Client library to multiplex connections to/from iOS devices"
maintainer = "q66 <<EMAIL>>"
license = "LGPL-2.1-only"
url = "https://libimobiledevice.org"
source = f"https://github.com/libimobiledevice/{pkgname}/archive/{pkgver}.tar.gz"
sha256 = "8ae3e1d9340177f8f3a785be276435869363de79f491d05d8a84a59efc8a8fdc"
def pre_configure(self):
self.do("autoreconf", "-if")
@subpackage("libusbmuxd-devel")
def _devel(self):
return self.default_devel()
@subpackage("libusbmuxd-progs")
def _progs(self):
return self.default_progs()
| 1.367188 | 1 |
ks4.py | raspitv/KStracker | 0 | 12768927 | #!/usr/bin/env python2.7
# -*- coding: utf-8 -*-
# cli arguments: nolog nodis noloop
from urllib2 import Request, urlopen, URLError
from time import sleep # we need sleep for a delay between readings
import sys # we use sys for command line arguments
# insert the path to your log file
#file_path = '/home/kstrack/' # without it's name e.g. /home/pi/
file_path = '' # not really needed unless running from cron
# Here we list the urls of the KS campaigns we want to track
urls =['https://www.kickstarter.com/projects/raspitv/raspio-duino-affordable-arduino-programming-on-ras',
'https://www.kickstarter.com/projects/ryanteckltd/raspberry-pi-debug-clip']
if 'nolog' in sys.argv:
logging_enabled = 0
else:
logging_enabled = 1 # change to 0 to switch off logging
if 'nodis' in sys.argv:
display_enabled = 0
else:
display_enabled = 1 # change to 0 to switch off screen output
if 'noloop' in sys.argv:
loop_forever = 0
else:
loop_forever = 1 # change to 0 to just scan once and not loop
pc='%' # defining % as a variable avoids confusion
def last_entry(project_name): # helps handle when a KS finishes
logfile = file_path + project_name + '.txt'
read_log = open(logfile, 'r') # open file for appending
log_contents = read_log.readlines()
read_log.close()
last_line = log_contents[-1]
time_left = int(last_line.split(',')[4])
if time_left == 0:
return 1
else:
return 0
def log(project_name, target, percent, amount_raised, campaign_duration,
time_left, time_left_unit, backers, amount_per_hour, hours_into_campaign,
project_currency):
# convert non-string variables to strings for writing to file
percent = "%.2f" % percent
hours_into_campaign = "%.3f" % hours_into_campaign
if len(str(campaign_duration)) > 4: # restrict length stored
campaign_duration = "{:.4f}".format(campaign_duration)
else:
campaign_duration = str(campaign_duration)
backers = str(backers)
amount_per_hour = "%.2f" % amount_per_hour
logfile = file_path + project_name + ".txt"
write_list = [target, percent, amount_raised, campaign_duration, time_left,
time_left_unit, backers, amount_per_hour, hours_into_campaign,
project_currency]
write_string = ','.join(write_list) + '\n'
log_data = open(logfile, 'a') # open file for appending
log_data.write(write_string)
log_data.close()
def scan(someurl): # page scanning function
global logging_enabled
req = Request(someurl)
try:
response = urlopen(req)
except URLError as e:
if hasattr(e, 'reason'):
print 'We failed to reach a server.'
print 'Reason: ', e.reason
elif hasattr(e, 'code'):
print 'The server couldn\'t fulfill the request.'
print 'Error code: ', e.code
else:
the_page = response.readlines()
project_name = someurl.split('/')[5].split('-')[0] # take project name from URL
for line in the_page:
if 'data-duration' in line: # line 457
time_left = float(line.split('"')[5])
campaign_duration = float(line.split('"')[1])
hours_into_campaign = (24 * campaign_duration) - time_left
if time_left >= 72:
time_left_unit = "days"
time_left = str(int(time_left / 24))
elif time_left >= 1:
time_left_unit = "hours"
time_left = str(int(time_left))
elif 0 < time_left < 1:
time_left_unit = "minutes"
time_left = str(int(time_left * 60))
else:
time_left_unit = "seconds"
time_left = str(int(time_left))
if last_entry(project_name):
logging_enabled = 0 # stop logging if campaign finished
if 'data-backers-count' in line:
backers = int(line.split('"')[3])
if 'data-goal' in line: # line 449
words = line.split(" ")
for word in words:
if 'data-goal' in word:
target = word.split('"')
# bold and yellow for labels, bold and white for figures
if 'data-percent-raised' in word:
percent = word.split('"')
if 'data-pledged' in word:
amount_raised = word.split('"')
if 'project_currency_code' in line:
project_currency = line.split('"')[1]
project_currency = project_currency.split(' ')[1]
if project_currency == 'usd':
project_currency_symbol = '$'
elif project_currency == 'gbp':
project_currency_symbol = '£'
else:
project_currency_symbol = '£' # we can add more currencies as needed
amount_per_hour = float(amount_raised[1]) / hours_into_campaign
if display_enabled:
print '\033[34m\033[1m' + project_name + '\033[0m' # bold and blue title
print '\033[33m\033[1mtarget:\033[0m \033[1m\033[37m%s%.2f\033[0m' % (project_currency_symbol,float(target[1]))
print '\033[33m\033[1mpercentage raised:\033[0m \033[1m\033[37m%.2f%s\033[0m' % ((float(percent[1]) * 100) , pc)
print '\033[33m\033[1mTotal so far:\033[0m \033[1m\033[37m%s%.2f\033[0m' % (project_currency_symbol, float(amount_raised[1]))
print '\033[33m\033[1mTime left:\033[0m \033[1m\033[37m%s %s\033[0m' % (time_left, time_left_unit)
print '\033[33m\033[1mBackers:\033[0m \033[1m\033[37m%d \033[0m' % backers
print '\033[33m\033[1m%s/hr:\033[0m \033[1m\033[37m%s%.2f \033[0m \n' % (project_currency_symbol, project_currency_symbol, amount_per_hour)
if (logging_enabled and counter % log_interval == 0):
log(project_name, target[1], (float(percent[1]) * 100), amount_raised[1],
campaign_duration, time_left, time_left_unit, backers, amount_per_hour,
hours_into_campaign, project_currency)
counter = 0
log_interval = 10
while True: # continuous loop scans each URL we define
for url in urls:
scan(url)
sleep(15)
counter += 1 # to be able to limit logging frequency
if not loop_forever:
break
# Instructions for ks4.py
# To use this script, edit lines 12-13 to include the KS URLs you want to track,
# save, then type...
# python ks4.py
# By default, it will display on-screen output, loop continuously until you
# CTRL+C out of it (or it errors out), and log every tenth cycle to a file
# python ks4.py [nolog | nodis | noloop]
# You can add nolog and/or nodis and/or noloop to the command
#
# These will disable logging | display | looping respectively
# You can use any, all or none of the above, but if you use...
# nolog nodis noloop, the program won't do anything with the KS page it scans.
# If you want to run it from cron, you'll need to add the file_path (line 8)
# e.g. /home/pi (but not the filename itself, just the path)
| 2.640625 | 3 |
gluon/tests/api/test_baseObject.py | lfntac/ipv6 | 0 | 12768928 | # Copyright 2016, AT&T
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from mock import patch
import datetime
import wsme
from gluon.api import baseObject
from gluon.tests.api import base
class APIBaseTestCase(base.APITestCase):
def setUp(self):
super(APIBaseTestCase, self).setUp()
pass
"""
test get_fields
"""
# each APIBase object should always has two fields:
# created_at and updated_at
def test_get_fields(self):
api_base = baseObject.APIBase()
fields = api_base.get_fields()
self.assertIn("created_at", fields)
self.assertIn("updated_at", fields)
"""
test as_dict
"""
# each APIBase object should always has two fields:
# created_at and updated_at
def test_as_dict(self):
api_base = baseObject.APIBase()
# set the created_at and updated_at fields
now = datetime.datetime.now()
api_base.created_at = now
api_base.updated_at = now
fields_dict = api_base.as_dict()
self.assertIn("created_at", fields_dict)
self.assertIn("updated_at", fields_dict)
self.assertEqual(fields_dict["created_at"], now)
self.assertEqual(fields_dict["updated_at"], now)
"""
test unset_fields_except
"""
# unset created_at and keep updated_at
def test_unset_fields_except(self):
api_base = baseObject.APIBase()
# set the created_at and updated_at fields
today = datetime.datetime.today()
api_base.created_at = today
api_base.updated_at = today
except_list = ["updated_at"]
api_base.unset_fields_except(except_list)
self.assertEqual(api_base.created_at, wsme.Unset)
self.assertEqual(api_base.updated_at, today)
class APIBaseObjectTestCase(base.APITestCase):
def setUp(self):
super(APIBaseObjectTestCase, self).setUp()
pass
"""
test class_builder
"""
# new_class should have __name__ and db_model attribute
# new_class object has attribute attr_foo which only accept str value
@patch('gluon.api.baseObject.dbapi.get_instance')
def test_class_builder(self, mock_get_instance):
new_class_name = 'new_class'
_db_model = mock.Mock()
attributes = {"attr_foo": str}
new_class = baseObject.APIBaseObject.class_builder(
new_class_name, _db_model, attributes)
self.assertEqual(new_class.db_model, _db_model)
self.assertEqual(new_class.__name__, new_class_name)
mock_get_instance.assert_called_once()
# create new_class object and assign value to it
new_obj = new_class()
try:
# should throw wsme.exc.InvalidInput since attr_foo is str type
new_obj.attr_foo = 123
except wsme.exc.InvalidInput as e:
self.assertIsNotNone(e)
"""
test build
"""
# create new_class which has attribute "attr_foo"
# create mock_db_obj has attribute "attr_foo" with value "123"
# build() should return object whose attr_foo attribute is "123"
def test_build(self):
new_class_name = 'new_class'
_db_model = mock.Mock()
attributes = {"attr_foo": str}
new_class = baseObject.APIBaseObject.class_builder(
new_class_name, _db_model, attributes)
mock_db_obj = mock.Mock()
mock_db_obj.as_dict.return_value = {"attr_foo": "123"}
new_obj = new_class.build(mock_db_obj)
observed = new_obj.attr_foo
expected = "123"
self.assertEqual(observed, expected)
"""
test get_from_db
"""
# mock db and mock db_obj
# create mock_db_obj has attribute "attr_foo" with value "123"
@patch('gluon.api.baseObject.dbapi.get_instance')
def test_get_from_db(self, mock_get_instance):
# mock_db_obj has as_dict function that returns a dict
mock_db_obj = mock.Mock()
mock_db_obj.as_dict.return_value = {"attr_foo": "123"}
# mock_db has get_by_primary_key function that returns mock_db_obj
mock_db = mock.Mock()
mock_db.get_by_primary_key.return_value = mock_db_obj
# set the dbapi.get_instance to return mock_db
mock_get_instance.return_value = mock_db
# create new_class with db field pointing to mock_db
new_class_name = 'new_class'
_db_model = mock.Mock()
attributes = {"attr_foo": str}
new_class = baseObject.APIBaseObject.class_builder(
new_class_name, _db_model, attributes)
new_obj = new_class.get_from_db("any_key")
observed = new_obj.attr_foo
expected = "123"
self.assertEqual(observed, expected)
"""
test create_in_db
"""
# mock db and mock db_obj
# create mock_db_obj has attribute "attr_foo" with value "123"
@patch('gluon.api.baseObject.dbapi.get_instance')
def test_create_in_db(self, mock_get_instance):
# mock_db_obj has as_dict function that returns a dict
mock_db_obj = mock.Mock()
mock_db_obj.as_dict.return_value = {"attr_foo": "123"}
# mock_db has create function that returns mock_db_obj
mock_db = mock.Mock()
mock_db.create.return_value = mock_db_obj
# set the dbapi.get_instance to return mock_db
mock_get_instance.return_value = mock_db
# create new_class with db field pointing to mock_db
new_class_name = 'new_class'
_db_model = mock.Mock()
attributes = {"attr_foo": str}
new_class = baseObject.APIBaseObject.class_builder(
new_class_name, _db_model, attributes)
new_obj = new_class.create_in_db({"attr_foo": "123"})
observed = new_obj.attr_foo
expected = "123"
self.assertEqual(observed, expected)
# also assert create() has called once
mock_db.create.assert_called_once()
"""
test update_in_db
"""
# mock db and mock db_obj
# create mock_db_obj has attribute "attr_foo" with value "123"
@patch('gluon.api.baseObject.dbapi.get_instance')
def test_update_in_db(self, mock_get_instance):
# mock_db_obj has as_dict function that returns a dict
mock_db_obj = mock.Mock()
mock_db_obj.as_dict.return_value = {"attr_foo": "123"}
# mock_db has get_by_primary_key function that returns mock_db_obj
mock_db = mock.Mock()
mock_db.get_by_primary_key.return_value = mock_db_obj
# set the dbapi.get_instance to return mock_db
mock_get_instance.return_value = mock_db
# create new_class with db field pointing to mock_db
new_class_name = 'new_class'
_db_model = mock.Mock()
attributes = {"attr_foo": str}
new_class = baseObject.APIBaseObject.class_builder(
new_class_name, _db_model, attributes)
new_obj = new_class.update_in_db("any_key", {"attr_foo": "123"})
observed = new_obj.attr_foo
expected = "123"
self.assertEqual(observed, expected)
# also assert get_by_primary_key(), save(), update() has called once
mock_db.get_by_primary_key.assert_called_once()
mock_db_obj.update.assert_called_once()
mock_db_obj.save.assert_called_once()
"""
test delete_from_db
"""
# mock db and mock db_obj
# create mock_db_obj has attribute "attr_foo" with value "123"
@patch('gluon.api.baseObject.dbapi.get_instance')
def test_delete_from_db(self, mock_get_instance):
# mock_db_obj
mock_db_obj = mock.Mock()
# mock_db has get_by_primary_key function that returns mock_db_obj
mock_db = mock.Mock()
mock_db.get_by_primary_key.return_value = mock_db_obj
# set the dbapi.get_instance to return mock_db
mock_get_instance.return_value = mock_db
# create new_class with db field pointing to mock_db
new_class_name = 'new_class'
_db_model = mock.Mock()
attributes = {"attr_foo": str}
new_class = baseObject.APIBaseObject.class_builder(
new_class_name, _db_model, attributes)
new_class.delete_from_db("any_key")
# assert get_by_primary_key(), delete()has called once
mock_db.get_by_primary_key.assert_called_once()
mock_db_obj.delete.assert_called_once()
class APIBaseListTestCase(base.APITestCase):
def setUp(self):
super(APIBaseListTestCase, self).setUp()
pass
"""
test class_builder
"""
# create api_object_class whose instances will be elements of list
# create new_class by calling class_builder with api_object_class
@patch('gluon.api.baseObject.dbapi.get_instance')
def test_class_builder(self, mock_get_instance):
_db_model = type("FooDb", (object, ), {"foo": str})
api_object_class = baseObject.APIBaseObject.class_builder(
"FooAPI", _db_model, {"foo": str})
class_name = 'FooListAPI'
list_name = "listOfFoo"
new_class = baseObject.APIBaseList.class_builder(
class_name, list_name, api_object_class)
self.assertEqual(new_class.list_name, "listOfFoo")
self.assertEqual(new_class.api_object_class, api_object_class)
# test assigning valuse to the new_api_list
# its listOfFoo should only take list of FooClass objects
new_api_list = new_class()
# case 1: throws error if assign list of string
expected_exception = None
try:
new_api_list.listOfFoo = ["foo"]
except Exception as e:
expected_exception = e
self.assertIsNotNone(expected_exception)
# case 2: NO error if assign list of FooClass objects
expected_exception = None
try:
new_api_list.listOfFoo = [api_object_class()]
except Exception as e:
expected_exception = e
self.assertIsNone(expected_exception)
"""
test build
"""
# create api_object_class whose instances will be elements of list
# create new_class by calling class_builder with this api_object_class
# mock the db to return a list of db_objs
# call new_class.build() with the mock db data to generate api_obj_list
@patch('gluon.api.baseObject.dbapi.get_instance')
def test_build(self, mock_get_instance):
_db_model = type("FooDb", (object, ), {"foo": str})
api_object_class = baseObject.APIBaseObject.class_builder(
"FoodAPI", _db_model, {"foo": str})
class_name = 'FooListAPI'
list_name = "listOfFoo"
new_class = baseObject.APIBaseList.class_builder(
class_name, list_name, api_object_class)
# mock db setups that will return a list of db_obj
mock_db_obj = mock.Mock()
mock_db_obj.as_dict.return_value = {"foo": "123"}
mock_db = mock.Mock()
mock_db.get_list.return_value = [mock_db_obj]
mock_get_instance.return_value = mock_db
# start testing by calling build() on new_class
api_obj_list = new_class.build()
# listOfFoo should contain one element
self.assertEqual(len(api_obj_list.listOfFoo), 1)
# this element should have foo field with value "123"
api_obj = api_obj_list.listOfFoo[0]
self.assertEqual(api_obj.foo, "123")
class RootObjectControllerTestCase(base.APITestCase):
def setUp(self):
super(RootObjectControllerTestCase, self).setUp()
pass
"""
test class_builder
"""
# FIXME write test cases for this function
# have trouble with @wsme_pecan.wsexpose
@patch('gluon.managers.manager_base.get_api_manager')
@patch('gluon.api.baseObject.APIBaseList.class_builder')
@patch('gluon.api.baseObject.wsme_pecan.wsexpose')
def test_class_builder(self,
mock_wsexpose,
mock_APIBaseList_class_builder,
mock_get_api_manager):
api_object_class = mock.Mock()
name = "FooController"
primary_key_type = str
api_name = "foo"
list_object_class = mock.Mock()
mock_APIBaseList_class_builder.return_value = list_object_class
baseObject.RootObjectController.class_builder(
name, api_object_class, primary_key_type, api_name)
pass
class RootSubObjectControllerTestCase(base.APITestCase):
def setUp(self):
super(RootSubObjectControllerTestCase, self).setUp()
pass
"""
test class_builder
"""
# FIXME write test cases for this function
# have trouble with @wsme_pecan.wsexpose
def test_class_builder(self):
pass
| 2.3125 | 2 |
05.04.2022/listas/final.py | N0N4T0/python-codes | 0 | 12768929 | allowedAccess = ['Danilo', 'Lucio', 'Daiana', 'Vanessa']
requestName = input("Por gentileza informa seu nome para verificar nivel de permissão: ")
for name in allowedAccess:
if(name == requestName):
print("Acesso autorizado a " + requestName)
break
else:
print("Acesso negado a " + requestName)
break
| 3.765625 | 4 |
terrascript/oneandone/__init__.py | hugovk/python-terrascript | 507 | 12768930 | <filename>terrascript/oneandone/__init__.py
# terrascript/oneandone/__init__.py
import terrascript
class oneandone(terrascript.Provider):
pass
| 1.242188 | 1 |
functests/execassets.py | Serhiy1/archivist-python | 2 | 12768931 | <reponame>Serhiy1/archivist-python
"""
Test assets creation
"""
from copy import copy, deepcopy
import json
from os import environ
from unittest import skip, TestCase
from uuid import uuid4
from archivist.archivist import Archivist
from archivist.proof_mechanism import ProofMechanism
# pylint: disable=fixme
# pylint: disable=missing-docstring
# pylint: disable=unused-variable
ATTRS = {
"arc_firmware_version": "1.0",
"arc_serial_number": "vtl-x4-07",
"arc_description": "Traffic flow control light at A603 North East",
"some_custom_attribute": "value",
}
class TestAssetCreate(TestCase):
"""
Test Archivist Asset Create method
"""
maxDiff = None
@classmethod
def setUpClass(cls):
with open(environ["TEST_AUTHTOKEN_FILENAME"], encoding="utf-8") as fd:
auth = fd.read().strip()
cls.arch = Archivist(
environ["TEST_ARCHIVIST"], auth, verify=False, max_time=300
)
cls.attrs = deepcopy(ATTRS)
cls.traffic_light = deepcopy(ATTRS)
cls.traffic_light["arc_display_type"] = "Traffic light with violation camera"
@classmethod
def tearDownClass(cls):
cls.arch = None
cls.attrs = None
cls.traffic_light = None
def test_asset_create_simple_hash(self):
"""
Test asset creation uses simple hash proof mechanism
"""
asset = self.arch.assets.create(
attrs=self.traffic_light,
confirm=True,
)
self.assertEqual(
asset["proof_mechanism"],
ProofMechanism.SIMPLE_HASH.name,
msg="Incorrect asset proof mechanism",
)
@skip("takes too long")
def test_asset_create_khipu(self):
"""
Test asset creation using khipu proof mechanism
"""
asset = self.arch.assets.create(
props={
"proof_mechanism": ProofMechanism.KHIPU.name,
},
attrs=self.traffic_light,
confirm=True,
)
self.assertEqual(
asset["proof_mechanism"],
ProofMechanism.KHIPU.name,
msg="Incorrect asset proof mechanism",
)
def test_asset_create_with_fixtures(self):
"""
Test creation with fixtures
"""
# creates simple_hash endpoint
simple_hash = copy(self.arch)
simple_hash.fixtures = {
"assets": {
"proof_mechanism": ProofMechanism.SIMPLE_HASH.name,
},
}
# create traffic lights endpoint from simple_hash
traffic_lights = copy(simple_hash)
traffic_lights.fixtures = {
"assets": {
"attributes": {
"arc_display_type": "Traffic light with violation camera",
"arc_namespace": f"functests {uuid4()}",
},
},
}
traffic_light = traffic_lights.assets.create(
attrs=self.attrs,
confirm=True,
)
self.assertEqual(
traffic_lights.assets.count(),
1,
msg="Incorrect number of traffic_lights",
)
# create fancy traffic lights endpoint from traffic lights
fancy_traffic_lights = copy(traffic_lights)
fancy_traffic_lights.fixtures = {
"assets": {
"attributes": {
"arc_namespace1": f"functests {uuid4()}",
},
},
}
fancy_traffic_light = fancy_traffic_lights.assets.create(
attrs=self.attrs,
confirm=True,
)
self.assertEqual(
fancy_traffic_lights.assets.count(),
1,
msg="Incorrect number of fancy_traffic_lights",
)
def test_asset_create_event(self):
"""
Test list
"""
# get identity of first asset
identity = None
for asset in self.arch.assets.list():
print("asset", json.dumps(asset, sort_keys=True, indent=4))
identity = asset["identity"]
break
self.assertIsNotNone(
identity,
msg="Identity is None",
)
# different behaviours are also different.
props = {
"operation": "Record",
# This event is used to record evidence.
"behaviour": "RecordEvidence",
# Optional Client-claimed time at which the maintenance was performed
"timestamp_declared": "2019-11-27T14:44:19Z",
# Optional Client-claimed identity of person performing the operation
"principal_declared": {
"issuer": "idp.synsation.io/1234",
"subject": "phil.b",
"email": "<EMAIL>",
},
}
attrs = {
# Required Details of the RecordEvidence request
"arc_description": "Safety conformance approved for version 1.6.",
# Required The evidence to be retained in the asset history
"arc_evidence": "DVA Conformance Report attached",
# Example Client can add any additional information in further attributes,
# including free text or attachments
"conformance_report": "blobs/e2a1d16c-03cd-45a1-8cd0-690831df1273",
}
event = self.arch.events.create(
identity, props=props, attrs=attrs, confirm=True
)
print("event", json.dumps(event, sort_keys=True, indent=4))
| 2.53125 | 3 |
src/UQpy/inference/information_criteria/baseclass/InformationCriterion.py | SURGroup/UncertaintyQuantification | 0 | 12768932 | <reponame>SURGroup/UncertaintyQuantification
from abc import ABC, abstractmethod
from typing import Union
import numpy as np
from UQpy.inference.BayesParameterEstimation import BayesParameterEstimation
from UQpy.inference.MLE import MLE
class InformationCriterion(ABC):
@abstractmethod
def minimize_criterion(self, data: np.ndarray,
parameter_estimator: Union[MLE, BayesParameterEstimation],
return_penalty: bool = False) -> float:
"""
Function that must be implemented by the user in order to create new concrete implementation of the
:class:`.InformationCriterion` baseclass.
"""
pass
| 2.875 | 3 |
test/functional/test-framework/core/test_run.py | josehu07/open-cas-linux-mf | 2 | 12768933 | <gh_stars>1-10
#
# Copyright(c) 2019-2020 Intel Corporation
# SPDX-License-Identifier: BSD-3-Clause-Clear
#
from log.logger import Log
import pytest
class Blocked(Exception):
pass
class TestRun:
dut = None
executor = None
LOGGER: Log = None
plugin_manager = None
@classmethod
def step(cls, message):
return cls.LOGGER.step(message)
@classmethod
def group(cls, message):
return cls.LOGGER.group(message)
@classmethod
def iteration(cls, iterable, group_name=None):
TestRun.LOGGER.start_group(f"{group_name}" if group_name is not None else "Iteration list")
items = list(iterable)
for i, item in enumerate(items, start=1):
cls.LOGGER.start_iteration(f"Iteration {i}/{len(items)}")
yield item
TestRun.LOGGER.end_iteration()
TestRun.LOGGER.end_group()
@classmethod
def fail(cls, message):
pytest.fail(message)
@classmethod
def block(cls, message):
raise Blocked(message)
| 2.078125 | 2 |
XboxHelper.py | abaire/nv2a-trace | 0 | 12768934 | <filename>XboxHelper.py
"""Various helper methods"""
# pylint: disable=missing-function-docstring
# pylint: disable=consider-using-f-string
# pylint: disable=chained-comparison
import atexit
from collections import namedtuple
from typing import Optional
from typing import Tuple
import time
DMAState = namedtuple(
"DMAState", ["non_increasing", "method", "subchannel", "method_count", "error"]
)
Method = namedtuple(
"Method", ["method", "subchannel", "method_count", "non_increasing"]
)
# For general information on PFIFO, see
# https://envytools.readthedocs.io/en/latest/hw/fifo/intro.html
# mmio blocks
NV2A_MMIO_BASE = 0xFD000000
BLOCK_PMC = 0x000000
BLOCK_PBUS = 0x001000
BLOCK_PFIFO = 0x002000
BLOCK_PRMA = 0x007000
BLOCK_PVIDEO = 0x008000
BLOCK_PTIMER = 0x009000
BLOCK_PCOUNTER = 0x00A000
BLOCK_PVPE = 0x00B000
BLOCK_PTV = 0x00D000
BLOCK_PRMFB = 0x0A0000
BLOCK_PRMVIO = 0x0C0000
BLOCK_PFB = 0x100000
BLOCK_PSTRAPS = 0x101000
BLOCK_PGRAPH = 0x400000
BLOCK_PCRTC = 0x600000
BLOCK_PRMCIO = 0x601000
BLOCK_PRAMDAC = 0x680000
BLOCK_PRMDIO = 0x681000
BLOCK_PRAMIN = 0x700000
BLOCK_USER = 0x800000
def _PFIFO(addr):
return NV2A_MMIO_BASE + BLOCK_PFIFO + addr
def _PGRAPH(addr):
return NV2A_MMIO_BASE + BLOCK_PGRAPH + addr
# Pushbuffer state
NV_PFIFO_CACHE1_DMA_STATE = 0x00001228
DMA_STATE = _PFIFO(NV_PFIFO_CACHE1_DMA_STATE)
# Pushbuffer write address
NV_PFIFO_CACHE1_DMA_PUT = 0x00001240
DMA_PUSH_ADDR = _PFIFO(NV_PFIFO_CACHE1_DMA_PUT)
# Pushbuffer read address
NV_PFIFO_CACHE1_DMA_GET = 0x00001244
DMA_PULL_ADDR = _PFIFO(NV_PFIFO_CACHE1_DMA_GET)
NV_PFIFO_CACHE1_DMA_SUBROUTINE = 0x0000124C
DMA_SUBROUTINE = _PFIFO(NV_PFIFO_CACHE1_DMA_SUBROUTINE)
NV_PFIFO_CACHE1_PUSH0 = 0x00001200
CACHE_PUSH_MASTER_STATE = _PFIFO(NV_PFIFO_CACHE1_PUSH0)
# CACHE write state
NV_PFIFO_CACHE1_DMA_PUSH = 0x00001220
CACHE_PUSH_STATE = _PFIFO(NV_PFIFO_CACHE1_DMA_PUSH)
# CACHE read state
NV_PFIFO_CACHE1_PULL0 = 0x00001250
CACHE_PULL_STATE = _PFIFO(NV_PFIFO_CACHE1_PULL0)
# CACHE write address
NV_PFIFO_CACHE1_PUT = 0x00001210
CACHE_PUSH_ADDR = _PFIFO(NV_PFIFO_CACHE1_PUT)
# CACHE read address
NV_PFIFO_CACHE1_GET = 0x00001270
CACHE_PULL_ADDR = _PFIFO(NV_PFIFO_CACHE1_GET)
NV_PFIFO_CACHE1_METHOD = 0x00001800
CACHE1_METHOD = _PFIFO(NV_PFIFO_CACHE1_METHOD)
NV_PFIFO_CACHE1_DATA = 0xFD003804
CACHE1_DATA = _PFIFO(NV_PFIFO_CACHE1_DATA)
NV_PFIFO_RAMHT = 0x00000210
RAM_HASHTABLE = _PFIFO(NV_PFIFO_RAMHT)
NV_PGRAPH_CTX_SWITCH1 = 0x0000014C
CTX_SWITCH1 = _PGRAPH(NV_PGRAPH_CTX_SWITCH1)
NV_PGRAPH_FIFO = 0x00000720
PGRAPH_STATE = _PGRAPH(NV_PGRAPH_FIFO)
NV_PGRAPH_STATUS = 0x00000700
PGRAPH_STATUS = _PGRAPH(NV_PGRAPH_STATUS)
NV_PGRAPH_TEXOFFSET0 = 0x00001A24
PGRAPH_TEXOFFSET0 = _PGRAPH(NV_PGRAPH_TEXOFFSET0)
NV_PGRAPH_TEXCTL0_0 = 0x000019CC
PGRAPH_TEXCTL0_0 = _PGRAPH(NV_PGRAPH_TEXCTL0_0)
NV_PGRAPH_TEXCTL1_0 = 0x000019DC
PGRAPH_TEXCTL1_0 = _PGRAPH(NV_PGRAPH_TEXCTL1_0)
NV_PGRAPH_TEXFMT0 = 0x00001A04
PGRAPH_TEXFMT0 = _PGRAPH(NV_PGRAPH_TEXFMT0)
def _free_allocation(xbox, address):
print("_free_allocation: Free'ing 0x%08X" % address)
xbox.ke.MmFreeContiguousMemory(address)
# Sleep to ensure the call is fully processed.
time.sleep(0.1)
print("_free_allocation: Freed")
def load_binary(xbox, data):
"""Loads arbitrary data into a new contiguous memory block on the xbox."""
data_len = len(data)
code_addr = xbox.ke.MmAllocateContiguousMemory(data_len)
print("load_binary: Allocated %d bytes at 0x%08X" % (data_len, code_addr))
atexit.register(_free_allocation, xbox, code_addr)
xbox.write(code_addr, data)
return code_addr
def parse_command(addr, word, display=False) -> Tuple[int, Optional[Method]]:
prefix = "0x%08X: Opcode: 0x%08X" % (addr, word)
if (word & 0xE0000003) == 0x20000000:
# state->get_jmp_shadow = control->dma_get;
# NV2A_DPRINTF("pb OLD_JMP 0x%" HWADDR_PRIx "\n", control->dma_get);
addr = word & 0x1FFFFFFC
print(prefix + "; old jump 0x%08X" % addr)
return addr, None
if (word & 3) == 1:
addr = word & 0xFFFFFFFC
print(prefix + "; jump 0x%08X" % addr)
# state->get_jmp_shadow = control->dma_get;
return addr, None
if (word & 3) == 2:
print(prefix + "; unhandled opcode type: call")
# if (state->subroutine_active) {
# state->error = NV_PFIFO_CACHE1_DMA_STATE_ERROR_CALL;
# break;
# }
# state->subroutine_return = control->dma_get;
# state->subroutine_active = true;
# control->dma_get = word & 0xfffffffc;
return 0, None
if word == 0x00020000:
# return
print(prefix + "; unhandled opcode type: return")
return 0, None
masked = word & 0xE0030003
is_method_increasing = not masked
is_method_non_increasing = masked == 0x40000000
if is_method_increasing or is_method_non_increasing:
# Should method be (word >> 2) & 0x7ff?
# See https://envytools.readthedocs.io/en/latest/hw/fifo/dma-pusher.html#fifo-dma-pusher
info = Method(
method=word & 0x1FFF,
subchannel=(word >> 13) & 7,
method_count=(word >> 18) & 0x7FF,
non_increasing=is_method_non_increasing,
)
# state->dcount = 0;
if display:
print(
prefix
+ "; Method: 0x%04X (%d times)" % (info.method, info.method_count)
)
addr += 4 + info.method_count * 4
return addr, info
print(prefix + "; unknown opcode type")
return addr, None
class XboxHelper:
"""Provides various functions for interaction with XBOX"""
def __init__(self, xbox):
self.xbox = xbox
self.ramht_offset = 0
self.ramht_size = 0
def delay(self):
# FIXME: if this returns `True`, the functions below should have their own
# loops which check for command completion
# time.sleep(0.01)
return False
def disable_pgraph_fifo(self):
state = self.xbox.read_u32(PGRAPH_STATE)
self.xbox.write_u32(PGRAPH_STATE, state & 0xFFFFFFFE)
def wait_until_pgraph_idle(self):
while self.xbox.read_u32(PGRAPH_STATUS) & 0x00000001:
time.sleep(0.001)
def enable_pgraph_fifo(self):
state = self.xbox.read_u32(PGRAPH_STATE)
self.xbox.write_u32(PGRAPH_STATE, state | 0x00000001)
if self.delay():
pass
def pause_fifo_puller(self):
"""Disable the PFIFO puller"""
state_s1 = self.xbox.read_u32(CACHE_PULL_STATE)
self.xbox.write_u32(CACHE_PULL_STATE, state_s1 & 0xFFFFFFFE)
if self.delay():
pass
# print("Puller State was 0x" + format(state_s1, '08X'))
def resume_fifo_puller(self):
"""Enable the PFIFO puller"""
state_s2 = self.xbox.read_u32(CACHE_PULL_STATE)
self.xbox.write_u32(
CACHE_PULL_STATE, (state_s2 & 0xFFFFFFFE) | 1
) # Recover puller state
if self.delay():
pass
def wait_until_pusher_idle(self):
"""Busy wait until the PFIFO pusher stops being busy"""
while self.xbox.read_u32(CACHE_PUSH_STATE) & (1 << 4):
pass
def pause_fifo_pusher(self):
"""Disable the PFIFO pusher"""
# Must be kept in sync with method used in kick_fifo.asm
state = self.xbox.read_u32(CACHE_PUSH_STATE)
self.xbox.write_u32(CACHE_PUSH_STATE, state & 0xFFFFFFFE)
if self.delay():
pass
def resume_fifo_pusher(self):
"""Enable the PFIFO pusher"""
# Must be kept in sync with method used in kick_fifo.asm
state = self.xbox.read_u32(CACHE_PUSH_STATE)
self.xbox.write_u32(
CACHE_PUSH_STATE, (state & 0xFFFFFFFE) | 1
) # Recover pusher state
if self.delay():
pass
def allow_populate_fifo_cache(self):
"""Temporarily enable the PFIFO pusher to populate the CACHE
It is assumed that the pusher was previously paused, and it will be paused on
exit.
"""
self.resume_fifo_pusher()
time.sleep(0.05)
self.pause_fifo_pusher()
def _dump_pb(self, start, end):
offset = start
while offset != end:
word = self.xbox.read_u32(0x80000000 | offset)
offset, _method = parse_command(offset, word, True)
if offset == 0:
break
def print_enable_states(self):
"""Prints out the states of PGRAPH and the PFIFO push/pull registers."""
print("Enable states:")
print(f" PGRAPH: {self.is_pgraph_enabled()}")
print(f" Push: {self.is_cache_push_master_enabled()}")
print(
f" DMA_Push: {self.is_cache_push_dma_enabled()} (Busy: {self.is_cache_push_dma_busy()} Empty: {self.is_cache_push_dma_buffer_empty()})"
)
print(f" DMA_Pull: {self.is_cache_pull_dma_enabled()}")
# FIXME: This works poorly if the method count is not 0
def print_pb_state(self):
dma_pull_addr = self.xbox.read_u32(DMA_PULL_ADDR)
dma_push_addr = self.xbox.read_u32(DMA_PUSH_ADDR)
dma_subroutine = self.xbox.read_u32(DMA_SUBROUTINE)
print(
"PB-State: Pull: 0x%08X Push: 0x%08X Sub: 0x%08X"
% (dma_pull_addr, dma_push_addr, dma_subroutine)
)
self._dump_pb(dma_pull_addr, dma_push_addr)
print()
def print_cache_state(self, print_contents=False):
pull_addr = self.xbox.read_u32(CACHE_PULL_ADDR)
push_addr = self.xbox.read_u32(CACHE_PUSH_ADDR)
pull_state = self.xbox.read_u32(CACHE_PULL_STATE)
push_state = self.xbox.read_u32(CACHE_PUSH_STATE)
print("CACHE-State: PULL: 0x%X PUSH: 0x%X" % (pull_addr, push_addr))
print("Put / Pusher enabled: %s" % ("Yes" if (push_state & 1) else "No"))
print("Get / Puller enabled: %s" % ("Yes" if (pull_state & 1) else "No"))
if print_contents:
print("Cache:")
# JFR: This is intentionally read in a loop as behavior is dependent on the
# implementation of xboxpy's `read`.
for i in range(128):
cache1_method = self.xbox.read_u32(CACHE1_METHOD + i * 8)
cache1_data = self.xbox.read_u32(CACHE1_DATA + i * 8)
output = " [0x%02X] 0x%04X (0x%08X)" % (i, cache1_method, cache1_data)
pull_offset = i * 8 - pull_addr
if pull_offset >= 0 and pull_offset < 8:
output += " < get[%d]" % pull_offset
push_offset = i * 8 - push_addr
if push_offset >= 0 and push_offset < 8:
output += " < put[%d]" % push_offset
print(output)
print()
def print_dma_addresses(self):
push_addr = self.get_dma_push_address()
pull_addr = self.get_dma_pull_address()
print("DMA: PULL: 0x%X PUSH: 0x%X" % (pull_addr, push_addr))
def print_dma_state(self):
state = self.parse_dma_state()
print("dma_method: 0x%04X (count: %d)" % (state.method, state.method_count))
def is_cache_empty(self):
"""Returns True if the CACHE is currently empty."""
pull_addr = self.xbox.read_u32(CACHE_PULL_ADDR)
push_addr = self.xbox.read_u32(CACHE_PUSH_ADDR)
return pull_addr == push_addr
def fetch_ramht(self):
ht = self.xbox.read_u32(RAM_HASHTABLE)
NV_PFIFO_RAMHT_BASE_ADDRESS = 0x000001F0
NV_PFIFO_RAMHT_SIZE = 0x00030000
offset = (ht & NV_PFIFO_RAMHT_BASE_ADDRESS) << 12
size = 1 << (((ht & NV_PFIFO_RAMHT_SIZE) >> 16) + 12)
self.ramht_offset = offset
self.ramht_size = size
print("RAMHT: 0x%X - Base addr 0x%X size: %d" % (ht, offset, size))
def fetch_graphics_class(self):
"""Returns the target graphics class."""
ctx_switch_1 = self.xbox.read_u32(CTX_SWITCH1)
return ctx_switch_1 & 0xFF
def parse_dma_state(self):
dma_state = self.xbox.read_u32(DMA_STATE)
ret = DMAState(
non_increasing=dma_state & 0x01,
method=(dma_state >> 2) & 0x1FFF,
subchannel=(dma_state >> 13) & 0x07,
method_count=(dma_state >> 18) & 0x7FF,
error=(dma_state >> 29) & 0x07,
)
return ret
def is_cache_push_master_enabled(self):
return bool(self.xbox.read_u32(CACHE_PUSH_MASTER_STATE) & 0x01)
def is_cache_push_dma_enabled(self):
return bool(self.xbox.read_u32(CACHE_PUSH_STATE) & 0x01)
def is_cache_push_dma_busy(self):
return bool(self.xbox.read_u32(CACHE_PUSH_STATE) & 0x10)
def is_cache_push_dma_buffer_empty(self):
return bool(self.xbox.read_u32(CACHE_PUSH_STATE) & 0x100)
def is_cache_pull_dma_enabled(self):
return bool(self.xbox.read_u32(CACHE_PULL_STATE) & 0x01)
def is_pgraph_enabled(self):
return bool(self.xbox.read_u32(PGRAPH_STATE) & 0x01)
def get_dma_push_address(self):
return self.xbox.read_u32(DMA_PUSH_ADDR)
def get_dma_pull_address(self):
return self.xbox.read_u32(DMA_PULL_ADDR)
def set_dma_push_address(self, target):
self.xbox.write_u32(DMA_PUSH_ADDR, target)
def apply_anti_aliasing_factor(surface_anti_aliasing, x, y):
if surface_anti_aliasing == 0:
return x, y
if surface_anti_aliasing == 1:
return x * 2, y
if surface_anti_aliasing == 2:
return x * 2, y * 2
assert False
| 2.359375 | 2 |
GreyMatter/sleep.py | nayangupta824/Melissa-Web | 20 | 12768935 | from SenseCells.tts import tts
def go_to_sleep():
tts('Goodbye! Have a great day!')
quit()
| 1.625 | 2 |
playground.py | JoseLuisRojasAranda/FB-Friends-Scraper | 0 | 12768936 | <reponame>JoseLuisRojasAranda/FB-Friends-Scraper<gh_stars>0
import time
import scrapy
from scrapy import Spider
from scrapy.utils.response import open_in_browser
from scrapy.http import FormRequest
from scrapy.crawler import CrawlerProcess
from selenium import webdriver
class FBCrawler(object):
def __init__(self):
self.driver = webdriver.Chrome(executable_path="./chromedriver")
def login_facebook(self, email, passw):
self.driver.get('https://www.facebook.com/')
print ("Opened facebook")
time.sleep(1)
username_box = self.driver.find_element_by_id('email')
username_box.send_keys(email)
print ("Email Id entered")
time.sleep(1)
password_box = self.driver.find_element_by_id('pass')
password_box.send_keys(<PASSWORD>)
print ("Password entered")
login_box = self.driver.find_element_by_id('loginbutton')
login_box.click()
print ("Done")
input('Press anything to continue')
profile = self.driver.find_element_by_xpath('//*[@title="Perfil"]')
profile.click()
input('Press anything to quit')
self.driver.quit()
print("Finished")
def main():
crawler = FBCrawler()
crawler.login_facebook("<EMAIL>", "Perrita1")
main()
| 3.046875 | 3 |
seed/routes/helpers/graphene_view.py | erick-rivas/django-reference | 0 | 12768937 | <filename>seed/routes/helpers/graphene_view.py<gh_stars>0
"""
__Seed builder__
(Read_only) Routes helper
"""
from graphene_django.views import GraphQLView
from rest_framework.authentication import TokenAuthentication
from rest_framework.permissions import IsAuthenticated
from rest_framework.decorators import authentication_classes, permission_classes, api_view
from django.views.decorators.csrf import csrf_exempt
from app.settings import get_env
class AuthGraphQLView(GraphQLView):
@classmethod
def as_view(cls, *args, **kwargs):
view = super(AuthGraphQLView, cls).as_view(*args, **kwargs)
view = permission_classes((IsAuthenticated,))(view)
view = authentication_classes((TokenAuthentication,))(view)
view = api_view(['POST'])(view)
return view
def graphene_view():
if get_env('ENABLE_AUTH'):
return csrf_exempt(AuthGraphQLView.as_view(graphiql=True))
return csrf_exempt(GraphQLView.as_view(graphiql=True)) | 2.078125 | 2 |
BitTornado/clock.py | weedy/BitTornado | 0 | 12768938 | """Replicate win32 time.clock() behavior for all platforms"""
import time
import sys
_MAXFORWARD = 100
_FUDGE = 1
class RelativeTime:
def __init__(self):
self.time = time.time()
self.offset = 0
def get_time(self):
t = time.time() + self.offset
if t < self.time or t > self.time + _MAXFORWARD:
self.time += _FUDGE
self.offset += self.time - t
return self.time
self.time = t
return t
if sys.platform != 'win32':
_RTIME = RelativeTime()
def clock():
return _RTIME.get_time()
| 3.46875 | 3 |
Archive/dynasty_extract.py | 46319943/SLan-NLP | 0 | 12768939 | <reponame>46319943/SLan-NLP<filename>Archive/dynasty_extract.py
import re
def past_time_concat(loc_list: list):
'''
连接误分割的公元前字符串
'''
loc_list = loc_list.copy()
for index, loc_str in enumerate(loc_list):
if loc_str == '公元前' and '000' in loc_list[index + 1]:
loc_list[index] = loc_list[index] + loc_list[index + 1]
loc_list[index + 1] = ''
print(loc_list[index])
return loc_list
def time_filter(time_str: str) -> str:
'''
过滤无法判断时间的词语
归类表示现在的词语
:param time_str:
:return:
'''
if time_str == '' or time_str == ' ':
return None
if time_str in [
'每年', '每日', '当时', '中期', '此后', '当年', '早期', '昔日', '明天', '同年',
'一时', '过去', '每天', '一日', '其后', '次年', '未来', '后来', '初年', '后期',
'古代', '初期', '今',
]:
return None
if time_str in [
'目前', '现在', '今天', '近年', '现代', '现', '今日', '近代', '此时',
'近现代', '当代', '当前']:
return '现代'
time_str = time_str.replace('时期', '')
return time_str
def year_filter(time_str: str) -> str:
'''
将年转为具体朝代
:param time_str:
:return:
'''
if '年' not in time_str:
return time_str
if time_str.startswith('近'):
return None
if '以后' in time_str:
return None
if time_str == '2020年前':
return None
time_str.replace('1700万,1850年', '1850年')
pattern_before = re.search('前\S*?(\d{3,4})', time_str)
pattern_past = re.search('(\d{3,4})\S*?前', time_str)
pattern_4 = re.search('\d{4}', time_str)
pattern_3 = re.search('\d{3}', time_str)
pattern_2 = re.search('公元(\d{2})', time_str)
if pattern_before:
time_int = -int(pattern_before.group(1))
elif pattern_past:
time_int = 2021 - int(pattern_past.group(1))
elif '多年' in time_str:
time_int = None
elif pattern_4:
time_int = int(pattern_4.group(0))
elif pattern_3:
time_int = int(pattern_3.group(0))
elif pattern_2:
time_int = int(pattern_2.group(1))
else:
time_int = None
# if re.search('\d{4}', time_str):
# print(time_str + ' --> ' + re.search('\d{4}', time_str).group(0))
# return re.search('\d{4}', time_str).group(0)
# elif re.search('\d{1,2}', time_str):
# print(time_str + 'X')
# return None
# if time_int is not None:
# print(time_str + ' --> ' + str(time_int))
# return str(time_int)
# else:
# print(time_str + ' X')
# return None
if time_int is None:
return None
if time_int >= 1368 and time_int <= 1644:
return '明朝'
elif time_int >= 1644 and time_int <= 1912:
return '清朝'
elif time_int > 1912:
return '现代'
elif time_int >= 1279 and time_int <= 1368:
return '元朝'
elif time_int >= 907 and time_int <= 1279:
return '宋朝'
elif time_int >= 618 and time_int <= 907:
return '唐朝'
elif time_int >= 581 and time_int <= 618:
return '隋朝'
elif time_int >= 265 and time_int <= 581:
return '魏晋南北朝'
elif time_int >= 220 and time_int <= 265:
return '三国'
elif time_int >= -206 and time_int <= 220:
return '汉代'
elif time_int >= -221 and time_int <= -206:
return '秦代'
elif time_int >= -770 and time_int <= -221:
return '春秋战国'
elif time_int >= -1100 and time_int <= -770:
return '西周'
elif time_int >= -1600 and time_int <= -1100:
return '商代'
elif time_int >= -2100 and time_int <= -1600:
return '夏代'
elif time_int <= -2100:
return '黄帝'
else:
raise Exception('No dynasty matched')
def dynasty_filter(time_str) -> str:
import re
mapping_dict = {
'西汉初期': '汉代',
'西汉': '汉代',
'明中期': '明朝',
'唐时': '唐朝',
'商末': '商代',
'东汉初': '汉代',
'当上元': '唐朝',
'盛唐': '唐朝',
'元朝末期': '元朝',
'五代': '宋朝',
'北魏': '魏晋南北朝',
'秦汉': ['秦代', '汉代'],
'元朝初': '元朝',
'明前期': '明朝',
'民国初': '现代',
'明': '明朝',
'明清时': ['明朝', '清朝'],
'五代十国': '宋朝',
'晚唐': '唐朝',
'隋末': '隋朝',
'唐中': '唐朝',
'宋末': '宋朝',
'唐末': '唐朝',
'魏晋': '魏晋南北朝',
'明朝中期': '明朝',
'先秦': '秦代',
'战国': '春秋战国',
'汉朝': '汉代',
'宋元': ['宋朝', '元朝'],
'明清': ['明朝', '清朝'],
'清代': '清朝',
'元代': '元朝',
'明代': '明朝',
'宋代': '宋朝',
'唐代': '唐朝',
'隋代': '隋朝',
'南北朝': '魏晋南北朝',
'晚清': '清朝',
'春秋': '春秋战国',
'元末': '元朝',
'元初': '元朝',
'东汉': '汉代',
'唐': '唐朝',
'民国': '现代',
'秦朝': '秦代',
'唐宋': ['唐朝', '宋朝'],
'明末': '明朝',
'明初': '明朝',
'清初': '清朝',
'隋唐': ['隋朝', '唐朝'],
'清末': '清朝',
}
if time_str in mapping_dict:
return mapping_dict[time_str]
return time_str
def dynasty_select(time_str: str) -> bool:
return time_str in ['现代', '清朝', '明朝', '唐朝', '宋朝', '元朝', '隋朝', '魏晋南北朝', '春秋战国', '汉代', '秦代', '三国', '黄帝', '夏代', '西周',
'商代']
def dynasty_extract(loc_list: list):
loc_list = loc_list.copy()
loc_list = past_time_concat(loc_list)
loc_list = [time_filter(time_str) for time_str in loc_list if time_filter(time_str) is not None]
loc_list = [year_filter(time_str) for time_str in loc_list if year_filter(time_str) is not None]
loc_list_copy = loc_list.copy()
loc_list = []
for index, time_str in enumerate(loc_list_copy):
dynasty_result = dynasty_filter(time_str)
if isinstance(dynasty_result, list):
loc_list.extend(dynasty_result)
else:
loc_list.append(dynasty_result)
loc_list = [time_str for time_str in loc_list if dynasty_select(time_str)]
return loc_list
def dynasty_extract_plus_loc(time_list, loc_list):
pass
if __name__ == '__main__':
import pandas as pd
from slab.pickle_util import pickle_to_file, unpickle_from_file
df = unpickle_from_file('df.pkl')
loc_list = df['line_time_result'].values
loc_list = [word for line in loc_list for word in line]
print(pd.Series(dynasty_extract(loc_list)).value_counts())
| 2.796875 | 3 |
paprika/system/Message.py | thunder-/paprika | 0 | 12768940 | <filename>paprika/system/Message.py
import json
from paprika.system.logger.Logger import Logger
from requests import post
from paprika.system.Traceback import Traceback
class Message:
def __init__(self):
pass
@staticmethod
def get_header(response, name):
for key in response.headers.keys():
if key.lower() == name:
result = response.headers[key]
return result
@staticmethod
def post_request(url, message):
logger = Logger(Message())
try:
headers = {'Content-Type': 'application/json'}
logger.debug('', 'url : ' + url + ', message : ' + json.dumps(message))
response = post(url, json.dumps(message), headers=headers)
content_type = Message.get_header(response, 'content-type')
logger.debug('', 'url : ' + url + ', response : ' + str(response.status_code) + ', message : ' + json.dumps(message))
logger.debug('', 'url : ' + url + ', content_type : ' + str(content_type) + ', message : ' + json.dumps(message))
if content_type == 'application/json':
result = json.loads(response.content)
result['status_code'] = response.status_code
else:
result = dict()
result['state'] = 'FAILED'
result['status_code'] = response.status_code
result['message'] = response.reason
result['backtrace'] = response.text
return result
except:
result = Traceback.build()
result['state'] = 'FAILED'
result['status_code'] = 400
return result
| 2.453125 | 2 |
app/__init__.py | alphagov/team-metrics | 0 | 12768941 | <gh_stars>0
import os
import uuid
import alembic.config
from alembic.config import Config
import alembic.command
from flask import redirect, request, session
from flask import Flask
from psycopg2 import ProgrammingError
import requests
from app.metrics_db import Metrics_DB
import logging
db = Metrics_DB()
def create_app(application):
logging.basicConfig(level=logging.DEBUG)
handler = logging.FileHandler('log/app.log') # errors logged to this file
handler.setLevel(logging.DEBUG) # only log errors and above
application.logger.addHandler(handler) # attach the handler to the app's logger
from app.config import Config
application.config.from_object(Config)
db.init()
alembic_upgrade()
application.before_request(check_auth_before_request)
register_blueprint(application)
return application
def alembic_upgrade():
config = Config('alembic.ini')
config.attributes['configure_logger'] = False
try:
alembic.command.upgrade(config, 'head')
except ProgrammingError as e:
logging.error('post upgrade exception')
raise RuntimeError(e)
def register_blueprint(application):
from app.views.index import index_blueprint
from app.views.assets import assets_blueprint
from app.views.team_metrics import team_metrics_blueprint
application.register_blueprint(index_blueprint)
application.register_blueprint(assets_blueprint)
application.register_blueprint(team_metrics_blueprint)
def check_auth_before_request():
if '/teams/' in request.url and not session.get('email'):
session['target_url'] = request.url
return redirect('/')
| 2.171875 | 2 |
localization_env.py | robger98/CS5313_Localization_Env | 0 | 12768942 | # Author : <NAME>
# Contact : <EMAIL>
# Date : Feb 16, 2020
import random
import time
import numpy as np
import random
import time
import numpy as np
try:
from CS5313_Localization_Env import maze
except:
print(
'Problem finding CS5313_Localization_Env.maze... Trying to "import maze" only...'
)
try:
import maze
print("Successfully imported maze")
except Exception as ex:
print("Could not import maze")
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
print(ex)
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
try:
from CS5313_Localization_Env import RobotLocalization as viz
except:
print(
'Problem finding CS5313_Localization_Env.RobotLocalization... Trying to "import RobotLocalization" only...'
)
try:
import RobotLocalization as viz
print("Successfully imported RobotLocalization")
except Exception as ex:
print("Could not import RobotLocalization")
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
print(ex)
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
try:
from CS5313_Localization_Env import localization_env as le
except:
print(
'Problem finding CS5313_Localization_Env.localization_env... Trying to "import localization_env" only...'
)
try:
import localization_env as le
print("Successfully imported localization_env")
except Exception as ex:
print("Could not import localization_env")
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
print(ex)
print("----->LOOK HERE FOR EXCEPTION MESSAGE<-----")
from enum import Enum
# Change this to true to print out information on the robot location and heading
printouts = True
# Change this to true inorder to print out the map as a dataframe to console every time move() is called, as well as the Transition Tables to csv files named "heading.csv" and "location.csv". Won't do anything if printouts is false expect import pandas
df = False
if df:
from pandas import DataFrame
class Directions(Enum):
"""An Enum containing the directions S, E, N, W, and St (stationary) and their respective (x, y) movement tuples. Ex. S = (0, 1) meaning down one row, and stationary in the columns."""
S = (0, 1)
E = (1, 0)
N = (0, -1)
W = (-1, 0)
St = (0, 0)
def get_ortho(self, value):
""" Return the Direction Enums orthogonal to the given direction
Arguements:\n
value -- The given direction for which the orthogonal directions will be based on.\n
Returns:\n
A list of directions orthogonal to the given direction.
"""
if value in [self.N, self.S]:
return [self.W, self.E]
return [self.N, self.S]
class Headings(Enum):
"""An enum containing the headings S, E, N, W and their respective (x, y) movement tuples"""
S = (0, 1)
E = (1, 0)
N = (0, -1)
W = (-1, 0)
def get_ortho(self, value):
""" Return the Headings Enums orthogonal to the given heading
Arguements:\n
value -- The given heading for which the orthogonal heading will be based on.\n
Returns:\n
A list of headings orthogonal to the given heading.
"""
if value in [self.N, self.S]:
return [self.W, self.E]
return [self.N, self.S]
class Environment:
""" An environment for testing a randomly moving robot around a maze.
Important Class Variables\n
map -- The map of the the maze. A 2d list of lists in the form list[x][y] where a value of 1 signifies there is a wall, 0 signifies the cell is traversable, and 'x' denotes the robot location.\n
location_transitions -- The table of transition probabilities for each cell. Format is [x][y][heading][direction] which will return the probabilities of moving the direction, given the robot's current x, y, and heading.\n
heading_transitions -- The table of transition probabilities for the headings given each cell. Format is [x][y][heading][heading] which will return the probabilities of each heading for the next time step given the robot's current x, y, and heading.\n
robot_location -- The current location of the robot, given as a tuple in the for (x, y).
robot_heading -- The current heading of the robot, given as a Headings enum.
"""
def __init__(
self,
action_bias,
observation_noise,
action_noise,
dimensions,
seed=None,
window_size=[750, 750],
):
"""Initializes the environment. The robot starts in a random traversable cell.
Arguements:\n
action_bias -- Provides a bias for the robots actions. Positive values increase the likelihood of South and East movements, and negative favor North and West. (float in range -1-1)\n
observation_noise -- The probability that any given observation value will flip values erroneously. (float in range 0-1)\n
action_noise -- The probability that an action will move either direction perpendicular to the inteded direction. (float in range 0-1)\n
dimensions -- The dimensions of the map, given in the form (x,y). (tuple in range (1+, 1+))\n
seed (optional) -- The random seed value. (int) default=10\n
window_size(optional) -- The [x, y] size of the display. Default is [750, 750]. Should be the same aspect ratio as the maze to avoid strange looking graphics.
Return:\n
No return
"""
# the pygame state
self.running = True
# Step counter
self.steps = 0
# save the bias, noise, and map sizze parameters
self.action_bias = action_bias
self.observation_noise = observation_noise
self.action_noise = action_noise
self.dimensions = dimensions
# set the random seed and display it
self.seed = seed if seed != None else random.randint(1, 10000)
random.seed(self.seed)
# creat the map and list of free cells
self.map = maze.make_maze(dimensions[0], dimensions[1], seed)
self.free_cells = [
(x, y)
for x in range(dimensions[0])
for y in range(dimensions[1])
if self.map[x][y] == 0
]
# create the transistion table
self.location_transitions = self.create_locations_table()
self.headings_transitions = self.create_headings_table()
if df:
DataFrame(self.location_transitions).transpose().to_csv("location.csv")
DataFrame(self.headings_transitions).transpose().to_csv("heading.csv")
# set the robot location and print
self.robot_location = self.free_cells[
random.randint(0, len(self.free_cells) - 1)
]
self.location_priors, self.heading_priors = self.compute_prior_probabilities()
self.observation_tables = self.create_observation_tables()
self.map[self.robot_location[0]][self.robot_location[1]] = "x"
# Set the robot heading
self.robot_heading = random.choice(
[
h
for h in Headings
if self.traversable(self.robot_location[0], self.robot_location[1], h)
]
)
# gen initial headings probs
probs = {}
# prob_sum = 0
for h in le.Headings:
# num = random.random()
probs[h] = 1
# prob_sum += num
# for h in le.Headings:
# probs[h] /= prob_sum
# init viz
self.window_size = window_size
self.game = viz.Game()
self.game.init_pygame(self.window_size)
self.game.update(
self.map,
self.robot_location,
self.robot_heading,
[[0] * self.dimensions[1]] * self.dimensions[0],
probs,
)
self.game.display()
if printouts:
print("Random seed:", self.seed)
print("Robot starting location:", self.robot_location)
print("Robot starting heading:", self.robot_heading)
if df:
print(DataFrame(self.map).transpose())
def compute_prior_probabilities(self):
location_priors = {}
for cell in self.free_cells:
location_priors[cell] = 1 / len(self.free_cells)
heading_priors = {}
for heading in Headings:
heading_priors[heading] = 0
for cell in self.free_cells:
for heading2 in Headings:
heading_priors[heading] += self.headings_transitions[cell[0]][
cell[1]
][heading2][heading]
heading_priors[heading] /= len(self.free_cells) * 4
return location_priors, heading_priors
def random_dictionary_sample(self, probs):
sample = random.random()
prob_sum = 0
for key in probs.keys():
prob_sum += probs[key]
if prob_sum > sample:
return key
def move(self):
"""Updates the robots heading and moves the robot to a new position based off of the transistion table and its current location and new heading.
Return:\n
A list of the observations modified by the observation noise, where 1 signifies a wall and 0 signifies an empty cell. The order of the list is [S, E, N, W]
"""
# get the new location
self.map[self.robot_location[0]][self.robot_location[1]] = 0
probs = self.location_transitions[self.robot_location[0]][
self.robot_location[1]
][self.robot_heading]
direction = self.random_dictionary_sample(probs)
self.robot_location = (
self.robot_location[0] + direction.value[0],
self.robot_location[1] + direction.value[1],
)
self.map[self.robot_location[0]][self.robot_location[1]] = "x"
# Get the new heading
h_probs = self.headings_transitions[self.robot_location[0]][
self.robot_location[1]
][self.robot_heading]
self.robot_heading = self.random_dictionary_sample(h_probs)
# # get the new location
# self.map[self.robot_location[0]][self.robot_location[1]] = 0
# probs = self.location_transitions[self.robot_location[0]][
# self.robot_location[1]
# ][self.robot_heading]
self.steps += 1
# return the new observation
if printouts:
print()
print(
"---------------------------Steps: "
+ str(self.steps)
+ " ---------------------------------"
)
print(self.robot_location)
print(self.robot_heading)
print(direction)
if df:
print(DataFrame(self.map).transpose())
# if self.running:
# self.game.update(
# self.map,
# self.robot_location,
# self.robot_heading,
# location_probs,
# headings_probs,
# )
# self.running = self.game.display()
# else:
# print("Pygame closed. Quiting...")
# self.game.quit()
return self.observe()
def update(self, location_probs, headings_probs):
"""Updates the visualizer to represent where your filtering method estimates the robot to be, and where it estimates the robot is heading.
Arguments:\n
location_probs: The probability of the robot being in any (x, y) cell in the map. Created from your project code. Format list[x][y] = float\n
headings_probs: The probability of the robot's current heading being any given heading. Created from your project code. Format dict{<Headings enum> : float, <Headings enum> : float,... }\n
"""
if self.running:
self.game.update(
self.map,
self.robot_location,
self.robot_heading,
location_probs,
headings_probs,
)
self.running = self.game.display()
else:
print("Pygame closed. Quiting...")
self.game.quit()
def observe(self):
"""Observes the walls at the current robot location
Return:\n
A list of the observations modified by the observation noise, where 1 signifies a wall and 0 signifies an empty cell. The order of the list is [S, E, N, W]
"""
# get the neighboring walls to create the true observation table
observations = [
0
if self.traversable(
self.robot_location[0], self.robot_location[1], direction
)
else 1
for direction in Directions
if direction != Directions.St
]
# apply observation noise
observations = [
1 - x if random.random() < self.observation_noise else x
for x in observations
]
return observations
def create_observation_tables(self):
observation_table = []
for x in range(self.dimensions[0]):
observation_table.append({})
for y in range(self.dimensions[1]):
if self.map[x][y] == 1:
observation_table[x][y] = -1
continue
observation_table[x][y] = {}
observations = [
0
if self.traversable(
x, y, direction
)
else 1
for direction in Directions
if direction != Directions.St
]
for a in [0, 1]:
for b in [0, 1]:
for c in [0, 1]:
for d in [0, 1]:
potential_obs = (a, b, c, d)
num_wrong = 0
for i in range(len(observations)):
if observations[i] != potential_obs[i]:
num_wrong += 1
prob = (1 - self.observation_noise) ** (len(
observations
)-num_wrong) * self.observation_noise ** num_wrong
observation_table[x][y][potential_obs] = prob
return observation_table
def create_locations_table(self):
temp = []
# loop through the x dim
for x in range(self.dimensions[0]):
temp.append([])
# loop through the y dim
for y in range(self.dimensions[1]):
# If the cell is not traversable than set its value in the transition table to -1
if self.map[x][y] == 1:
temp[x].append(-1)
continue
temp[x].append({})
for heading in list(Headings):
probs = {}
# Compute Transistion probabilities ignoring walls
for direction in Directions:
if direction.name == heading.name:
probs[direction] = 1 - self.action_noise
elif direction in Directions.get_ortho(
Directions, Directions[heading.name]
):
probs[direction] = self.action_noise / 2
else:
probs[direction] = 0
# init stationary probability
probs[Directions.St] = 0
# account for walls. If there is a wall for one of the transition probabilities add the probability to the stationary probability and set the transisition probability to 0
for direction in Directions:
if not self.traversable(x, y, direction):
probs[Directions.St] += probs[direction]
probs[direction] = 0
# add the new transistion probabilities
temp[x][y].update({heading: probs})
return temp
def create_headings_table(self):
temp = []
# loop through the x dim
for x in range(self.dimensions[0]):
temp.append([])
# loop through the y dim
for y in range(self.dimensions[1]):
# If the cell is not traversable than set its value in the transition table to -1
if self.map[x][y] == 1:
temp[x].append(-1)
continue
temp[x].append({})
for heading in Headings:
probs = {}
# Handle case when the current heading is traversable
if self.traversable(x, y, heading):
for new_heading in Headings:
if heading == new_heading:
probs[new_heading] = 1
else:
probs[new_heading] = 0
temp[x][y].update({heading: probs})
continue
# If the current heading is not traversable
# Find which headings are available
headings_traversablity = {}
for new_heading in Headings:
if self.traversable(x, y, new_heading):
headings_traversablity[new_heading] = 1
else:
headings_traversablity[new_heading] = 0
# Sum these values for later arithmetic
total_traversable = sum(list(headings_traversablity.values()))
se_traversable = (
headings_traversablity[Headings.S]
+ headings_traversablity[Headings.E]
)
nw_traversable = (
headings_traversablity[Headings.N]
+ headings_traversablity[Headings.W]
)
# Compute the heading probabilities for traversable headings
for new_heading in Headings:
if self.traversable(x, y, new_heading):
if new_heading in [Headings.S, Headings.E]:
probs[new_heading] = (
1 / total_traversable
+ self.action_bias / se_traversable
)
else:
probs[new_heading] = (
1 / total_traversable
- self.action_bias / nw_traversable
)
else:
probs[new_heading] = 0
# normalize heading probabilities
probs_sum = sum([probs[x] for x in Headings])
for h in Headings:
probs[h] /= probs_sum
# add the new transistion probabilities
temp[x][y].update({heading: probs})
return temp
def traversable(self, x, y, direction):
"""
Returns true if the cell to the given direction of (x,y) is traversable, otherwise returns false.
Arguements:\n
row -- the x coordinate of the initial cell\n
col -- the y coordinate of the initial cell\n
direction -- the direction of the cell to check for traversablility. Type: localization_env.Directions enum or localization_env.Headings\n
Return:\n
A boolean signifying whether the cell to the given direction is traversable or not
"""
# see if the cell in the direction is traversable. If statement to handle out of bounds errors
if (
x + direction.value[0] >= 0
and x + direction.value[0] < self.dimensions[0]
and y + direction.value[0] >= 0
and y + direction.value[0] < self.dimensions[1]
):
if self.map[x + direction.value[0]][y + direction.value[1]] == 0:
return True
return False
def dummy_location_and_heading_probs(self):
"""
Returns a dummy location probability table and a dummy heading probability dictionary for testing purposes
Returns:\n
location probability table: Format is list[x][y] = float between (0-1)\n
Headings probability table: Format is dict{<Heading enum> : float between (0-1)}
"""
loc_probs = list()
sum_probs = 0
for x in range(self.dimensions[0]):
loc_probs.append([])
for y in range(self.dimensions[1]):
if self.map[x][y] == 1:
loc_probs[x].append(0.0)
else:
num = random.random()
loc_probs[x].append(num)
sum_probs += num
for x in range(self.dimensions[0]):
for y in range(self.dimensions[1]):
loc_probs[x][y] /= sum_probs
hed_probs = {}
sample = np.random.rand(4)
sample = (sample / np.sum(sample)).tolist()
i = 0
for heading in le.Headings:
hed_probs[heading] = sample[i]
i += 1
return loc_probs, hed_probs
if __name__ == "__main__":
env = Environment(0.1, 0.1, 0.2, (10, 10), window_size=[1000, 1000])
# print("Starting test. Press <enter> to make move")
location, heading = env.dummy_location_and_heading_probs()
done = False
while env.running:
observation = env.move(location, heading)
if printouts:
print(observation)
time.sleep(0.25)
| 2.4375 | 2 |
quant_eval.py | sharanramjee/single-image-stereo-depth-estimation | 0 | 12768943 | <reponame>sharanramjee/single-image-stereo-depth-estimation
import os
import numpy as np
from PIL import Image
def replace_zeros(data):
min_nonzero = np.min(data[np.nonzero(data)])
data[data == 0] = min_nonzero
return data
def compute_errors(gt_path, pred_path):
gt = np.clip(np.asarray(Image.open(gt_path), dtype=float) / 255, 0, 1)
pred = np.clip(np.asarray(Image.open(pred_path), dtype=float) / 255, 0, 1)
gt = replace_zeros(gt)
pred = replace_zeros(pred)
thresh = np.maximum((gt / pred), (pred / gt))
a1 = (thresh < 1.25).mean()
a2 = (thresh < 1.25 ** 2).mean()
a3 = (thresh < 1.25 ** 3).mean()
rmse = (gt - pred) ** 2
rmse = np.sqrt(rmse.mean())
rmse_log = (np.log(gt) - np.log(pred)) ** 2
rmse_log = np.sqrt(rmse_log.mean())
abs_rel = np.mean(np.abs(gt - pred) / gt)
sq_rel = np.mean(((gt - pred)**2) / gt)
return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3
def compute_avg_errors(gt_dir, pred_dir):
gts = [gt_dir+f for f in os.listdir(gt_dir) if not f.startswith('.')]
preds = [pred_dir+f for f in os.listdir(pred_dir) if not f.startswith('.')]
abs_rels = list()
sq_rels = list()
rmses = list()
rmse_logs = list()
count = 1
for gt, pred in zip(gts, preds):
abs_rel, sq_rel, rmse, rmse_log, _, _, _ = compute_errors(gt, pred)
abs_rels.append(abs_rel)
sq_rels.append(sq_rel)
rmses.append(rmse)
rmse_logs.append(rmse_log)
print('Image', count, 'processed')
count += 1
avg_abs_rel = np.mean(abs_rels)
print('ARD:', avg_abs_rel)
avg_sq_rel = np.mean(sq_rels)
print('SRD:', avg_sq_rel)
avg_rmse = np.mean(rmses)
print('RMSE:', avg_rmse)
avg_rmse_log = np.mean(rmse_logs)
print('log RMSE:', avg_rmse_log)
if __name__ == '__main__':
gt_dir_path = 'data/test/depth/'
pred_dir_path = 'output/DenseDepth_original/'
# pred_dir_path = 'output/stereo_depth_estimator/'
compute_avg_errors(gt_dir_path, pred_dir_path)
| 2.6875 | 3 |
django_auth_http_basic/__init__.py | t73fde/django-auth-http-basic | 8 | 12768944 | # -*- coding: utf-8 -*-
"""Simple authenticaton backend based on HTTP basic authentication.
:copyright: (c) 2016-2019 by <NAME>
:license: Apache 2.0, see LICENSE
"""
import logging
import requests
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
from django.conf import settings
LOGGER = logging.getLogger(__name__)
def is_insensitive():
"""Check if user name should be treated case-insensitive."""
case_spec = getattr(settings, 'HTTP_BASIC_AUTH_CASE', "y")
if isinstance(case_spec, str):
return case_spec[:1].lower() in ("0", "f", "n")
return not bool(case_spec)
def canonical_username(username):
"""Return the canonical user name.
If user names should be treated case-insensitive, return lower case. Else
return do not change anything.
"""
if is_insensitive():
return username.lower()
return username
class HttpBasicAuthBackend(ModelBackend):
"""Authentication backend that uses HTTP basic authentication.
In all other aspects this backend should behave like the default
model-based backend from Django.
"""
@staticmethod
def checkpw_basic_auth(url, username, password):
"""Check authentication via HTTP basic authentication."""
LOGGER.debug("Basic-auth URL=%s, user=%s", url, username)
if url is None:
return True
try:
response = requests.head(url, auth=(username, password))
status_code = response.status_code
LOGGER.debug('Basic-auth Status-Code=%d', status_code)
return 200 <= status_code <= 299
except requests.RequestException:
LOGGER.exception(
"Unable to get authentication from '%s' for user '%s':",
url, username)
return None
def authenticate(
self, request=None, username=None, password=None, **kwargs):
"""Authenticate with an user name and a password.
Requires a setting HTTP_BASIC_AUTH_URL for specifying the URL endpoint
for checking user name / password. URL can be set to None for testing
purposes. In this case, no HTTP request is done, all checks are
successful.
Optional is a setting HTTP_BASIC_AUTH_CASE that specifies whether the
user name will be treated case sensitive or case-insensitive. Any value
that starts with a '0', 'f', or 'n' will result in a case insensitive
setting. The value of HTTP_BASIC_AUTH_CASE is case-insensitive, of
course.
"""
user_model = get_user_model()
if username is None:
username = kwargs.get(user_model.USERNAME_FIELD)
url = getattr(settings, 'HTTP_BASIC_AUTH_URL', '')
if url == '':
LOGGER.error("No HTTP_BASIC_AUTH_URL")
return None
username = canonical_username(username)
if not self.checkpw_basic_auth(
settings.HTTP_BASIC_AUTH_URL, username, password):
return None
user, _ = user_model.objects.get_or_create(**{
user_model.USERNAME_FIELD: username,
})
return user if self.user_can_authenticate(user) else None
| 2.96875 | 3 |
synapseclient/annotations.py | lingyunsong/synapse_scripts | 0 | 12768945 | <filename>synapseclient/annotations.py
"""
***********
Annotations
***********
Annotations are arbitrary metadata attached to Synapse entities. They can be
accessed like ordinary object properties or like dictionary keys::
entity.my_annotation = 'This is one way to do it'
entity['other_annotation'] = 'This is another'
Annotations can be given in the constructor for Synapse Entities::
entity = File('data.xyz', parent=my_project, rating=9.1234)
Annotate the entity with location data::
entity.lat_long = [47.627477, -122.332154]
Record when we collected the data::
from datetime import datetime as Datetime
entity.collection_date = Datetime.now()
See:
- :py:meth:`synapseclient.Synapse.getAnnotation`
- :py:meth:`synapseclient.Synapse.setAnnotation`
~~~~~~~~~~~~~~~~~~~~~~~
Annotating data sources
~~~~~~~~~~~~~~~~~~~~~~~
Data sources are best recorded using Synapse's `provenance <Activity.html>`_ tools.
~~~~~~~~~~~~~~~~~~~~~~
Implementation details
~~~~~~~~~~~~~~~~~~~~~~
In Synapse, entities have both properties and annotations. Properties are used by
the system, whereas annotations are completely user defined. In the Python client,
we try to present this situation as a normal object, with one set of properties.
For more on the implementation and a few gotchas, see the documentation on
:py:mod:`synapseclient.entity`.
See also:
- :py:class:`synapseclient.entity.Entity`
- :py:mod:`synapseclient.entity`
"""
import collections
import warnings
from utils import to_unix_epoch_time, from_unix_epoch_time, _is_date, _to_list
from exceptions import SynapseError
def is_synapse_annotations(annotations):
"""Tests if the given object is a Synapse-style Annotations object."""
keys=['id', 'etag', 'creationDate', 'uri', 'stringAnnotations','longAnnotations','doubleAnnotations','dateAnnotations', 'blobAnnotations']
if not isinstance(annotations, collections.Mapping): return False
return all([key in keys for key in annotations.keys()])
def to_synapse_annotations(annotations):
"""Transforms a simple flat dictionary to a Synapse-style Annotation object."""
if is_synapse_annotations(annotations):
return annotations
synapseAnnos = {}
for key, value in annotations.iteritems():
if key in ['id', 'etag', 'blobAnnotations', 'creationDate', 'uri']:
synapseAnnos[key] = value
elif key in ['stringAnnotations','longAnnotations','doubleAnnotations','dateAnnotations'] and isinstance(value, collections.Mapping):
synapseAnnos.setdefault(key, {}).update({k:_to_list(v) for k,v in value.iteritems()})
else:
elements = _to_list(value)
if all((isinstance(elem, basestring) for elem in elements)):
synapseAnnos.setdefault('stringAnnotations', {})[key] = elements
elif all((isinstance(elem, bool) for elem in elements)):
synapseAnnos.setdefault('stringAnnotations', {})[key] = [str(element).lower() for element in elements]
elif all((isinstance(elem, int) or isinstance(elem, long) for elem in elements)):
synapseAnnos.setdefault('longAnnotations', {})[key] = elements
elif all((isinstance(elem, float) for elem in elements)):
synapseAnnos.setdefault('doubleAnnotations', {})[key] = elements
elif all((_is_date(elem) for elem in elements)):
synapseAnnos.setdefault('dateAnnotations', {})[key] = [to_unix_epoch_time(elem) for elem in elements]
## TODO: support blob annotations
# elif all((isinstance(elem, ???) for elem in elements)):
# synapseAnnos.setdefault('blobAnnotations', {})[key] = [???(elem) for elem in elements]
else:
synapseAnnos.setdefault('stringAnnotations', {})[key] = [str(elem) for elem in elements]
return synapseAnnos
def from_synapse_annotations(annotations):
"""Transforms a Synapse-style Annotation object to a simple flat dictionary."""
def process_user_defined_annotations(kvps, annos, func):
"""
for each annotation of a given class (date, string, double, ...), process the
annotation with the given function and add it to the dict 'annos'.
"""
for k,v in kvps.iteritems():
## don't overwrite system keys which won't be lists
if k in ['id', 'etag', 'creationDate', 'uri'] or (k in annos and not isinstance(annos[k], list)):
warnings.warn('A user defined annotation, "%s", has the same name as a system defined annotation and will be dropped. Try syn._getRawAnnotations to get annotations in native Synapse format.' % k)
else:
annos.setdefault(k,[]).extend([func(elem) for elem in v])
# Flatten the raw annotations to consolidate doubleAnnotations, longAnnotations,
# stringAnnotations and dateAnnotations into one dictionary
annos = dict()
for key, value in annotations.iteritems():
if key=='dateAnnotations':
process_user_defined_annotations(value, annos, lambda x: from_unix_epoch_time(float(x)))
elif key in ['stringAnnotations','longAnnotations']:
process_user_defined_annotations(value, annos, lambda x: x)
elif key == 'doubleAnnotations':
process_user_defined_annotations(value, annos, lambda x: float(x))
elif key=='blobAnnotations':
pass ## TODO: blob annotations not supported
else:
annos[key] = value
return annos
def is_submission_status_annotations(annotations):
"""Tests if the given dictionary is in the form of annotations to submission status"""
keys = ['objectId', 'scopeId', 'stringAnnos','longAnnos','doubleAnnos']
if not isinstance(annotations, collections.Mapping): return False
return all([key in keys for key in annotations.keys()])
def to_submission_status_annotations(annotations, is_private=True):
"""
Converts a normal dictionary to the format used to annotate submission
statuses, which is different from the format used to annotate entities.
:param annotations: A normal Python dictionary whose values are strings, floats, ints or doubles
:param isPrivate: Set privacy on all annotations at once. These can be set individually using :py:func:`set_privacy`.
Example::
from synapseclient.annotations import to_submission_status_annotations, from_submission_status_annotations
from datetime import datetime as Datetime
## create a submission and get its status
submission = syn.submit(evaluation, 'syn11111111')
submission_status = syn.getSubmissionStatus(submission)
## add annotations
submission_status.annotations = {'foo':'bar', 'shoe_size':12, 'IQ':12, 'timestamp':Datetime.now()}
## convert annotations
submission_status.annotations = to_submission_status_annotations(submission_status.annotations)
submission_status = syn.store(submission_status)
Synapse categorizes these annotations by: stringAnnos, doubleAnnos,
longAnnos. If date or blob annotations are supported, they are not
`documented <http://rest.synapse.org/org/sagebionetworks/repo/model/annotation/Annotations.html>`_
"""
if is_submission_status_annotations(annotations):
return annotations
synapseAnnos = {}
for key, value in annotations.iteritems():
if key in ['objectId', 'scopeId', 'stringAnnos','longAnnos','doubleAnnos']:
synapseAnnos[key] = value
elif isinstance(value, bool):
synapseAnnos.setdefault('stringAnnos', []).append({ 'key':key, 'value':unicode(value).lower(), 'isPrivate':is_private })
elif isinstance(value, int) or isinstance(value, long):
synapseAnnos.setdefault('longAnnos', []).append({ 'key':key, 'value':value, 'isPrivate':is_private })
elif isinstance(value, float):
synapseAnnos.setdefault('doubleAnnos', []).append({ 'key':key, 'value':value, 'isPrivate':is_private })
elif isinstance(value, basestring):
synapseAnnos.setdefault('stringAnnos', []).append({ 'key':key, 'value':value, 'isPrivate':is_private })
elif _is_date(value):
synapseAnnos.setdefault('longAnnos', []).append({ 'key':key, 'value':to_unix_epoch_time(value), 'isPrivate':is_private })
else:
synapseAnnos.setdefault('stringAnnos', []).append({ 'key':key, 'value':unicode(value), 'isPrivate':is_private })
return synapseAnnos
## TODO: this should accept a status object and return its annotations or an empty dict if there are none
def from_submission_status_annotations(annotations):
"""
Convert back from submission status annotation format to a normal dictionary.
Example::
submission_status.annotations = from_submission_status_annotations(submission_status.annotations)
"""
dictionary = {}
for key, value in annotations.iteritems():
if key in ['stringAnnos','longAnnos']:
dictionary.update( { kvp['key']:kvp['value'] for kvp in value } )
elif key == 'doubleAnnos':
dictionary.update( { kvp['key']:float(kvp['value']) for kvp in value } )
else:
dictionary[key] = value
return dictionary
def set_privacy(annotations, key, is_private=True, value_types=['longAnnos', 'doubleAnnos', 'stringAnnos']):
"""
Set privacy of individual annotations, where annotations are in the format used by Synapse
SubmissionStatus objects. See the `Annotations documentation <http://rest.synapse.org/org/sagebionetworks/repo/model/annotation/Annotations.html>`_
and the docs regarding `querying annotations <http://rest.synapse.org/GET/evaluation/submission/query.html>`_.
:param annotations: Annotations that have already been converted to Synapse format using
:py:func:`to_submission_status_annotations`.
:param key: The key of the annotation whose privacy we're setting.
:param is_private: If False, the annotation will be visible to users with READ permission on the evaluation.
If True, the it will be visible only to users with READ_PRIVATE_SUBMISSION on the evaluation.
Note: Is this really correct???
:param value_types: A list of the value types in which to search for the key. Defaults to all types
['longAnnos', 'doubleAnnos', 'stringAnnos'].
"""
for value_type in value_types:
kvps = annotations.get(value_type, None)
if kvps:
for kvp in kvps:
if kvp['key'] == key:
kvp['isPrivate'] = is_private
return kvp
raise KeyError('The key "%s" couldn\'t be found in the annotations.' % key)
| 2.953125 | 3 |
src/view/sprites/founder.py | Matimed/Barbarism | 2 | 12768946 | <filename>src/view/sprites/founder.py
import pygame as pg
from src.events import Tick
from src.references.images import JOB
from src.view.sprites.charactor import CharactorSprite
class FounderSprite(CharactorSprite):
native_job = JOB['founder']
job = native_job.copy()
@classmethod
def update_size(cls):
""" Scales the image to the given size.
"""
height = cls.get_actual_size()
surface = cls.native_job
new_surface = pg.transform.scale(surface,(height,height))
cls.job = new_surface
cls.update_chip_size()
@classmethod
def get_job(cls): return cls.job
def __init__(self, chip_image):
super().__init__(chip_image)
self.rect = self.image.get_rect()
FounderSprite.get_event_dispatcher().add(Tick, self.routine_update)
self.update_size()
def routine_update(self, event):
pass
def refresh(self):
""" Replace its Image and Rect with new ones
in order to update its information (e.g. size).
"""
self.image = self.get_image()
self.rect = self.image.get_rect() | 2.734375 | 3 |
src/strategies/reinforce/NoReinforceStrategy.py | jgc128/crabada.py | 34 | 12768947 | from typing import Any, List
from src.libs.CrabadaWeb2Client.types import CrabForLending, Game, TeamStatus
from src.strategies.reinforce.ReinforceStrategy import ReinforceStrategy
class NoReinforceStrategy(ReinforceStrategy):
"""
Strategy to adopt if you do not wish to reinforce
at all; useful for auto-lose teams
"""
def query(self, game: Game) -> dict[str, Any]:
"""No need to make a query at all"""
return None
def handleNoSuitableCrabFound(self) -> None:
"""No need to alert the user, because finding no crab is
the aim of the strategy :-)"""
pass
def mayReturnNone(self) -> bool:
"""Do not worry if this strategy returns no result"""
return True
| 2.90625 | 3 |
pymedphys/labs/pedromartinez/qc-lightrad.py | pymedphys/pymedphys-archive-2019 | 1 | 12768948 | #############################START LICENSE##########################################
# Copyright (C) 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#############################END LICENSE##########################################
###########################################################################################
#
# Script name: qc-lightrad
#
# Description: This script performs automated EPID QC of the QC-3 phantom developed in Manitoba.
# There are other tools out there that do this but generally the ROI are fixed whereas this script
# aims to dynamically identify them using machine vision and the bibs in the phantom.
#
# Example usage: python qc-lightrad "/file/"
#
# Using MED-TEC MT-IAD-1 phantom
#
# Author: <NAME>
# <EMAIL>
# 5877000722
# Date:2019-04-09
#
###########################################################################################
import argparse
import os
from datetime import datetime
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from PIL import Image
from skimage.feature import blob_log
import pydicom
from pymedphys.labs.pedromartinez.utils import utils as u
def point_detect(imcirclist):
k = 0
detCenterXRegion = []
detCenterYRegion = []
print("Finding bibs in phantom...")
for img in tqdm(imcirclist):
grey_img = np.array(img, dtype=np.uint8) # converting the image to grayscale
blobs_log = blob_log(
grey_img, min_sigma=15, max_sigma=40, num_sigma=10, threshold=0.05
)
centerXRegion = []
centerYRegion = []
centerRRegion = []
grey_ampRegion = []
for blob in blobs_log:
y, x, r = blob
# center = (int(x), int(y))
centerXRegion.append(x)
centerYRegion.append(y)
centerRRegion.append(r)
grey_ampRegion.append(grey_img[int(y), int(x)])
# radius = int(r)
# print('center=', center, 'radius=', radius, 'value=', img[center], grey_img[center])
xindx = int(centerXRegion[np.argmin(grey_ampRegion)])
yindx = int(centerYRegion[np.argmin(grey_ampRegion)])
# rindx = int(centerRRegion[np.argmin(grey_ampRegion)])
detCenterXRegion.append(xindx)
detCenterYRegion.append(yindx)
k = k + 1
return detCenterXRegion, detCenterYRegion
def read_dicom(filenm, ioptn):
dataset = pydicom.dcmread(filenm)
now = datetime.now()
ArrayDicom = np.zeros(
(dataset.Rows, dataset.Columns), dtype=dataset.pixel_array.dtype
)
ArrayDicom = dataset.pixel_array
SID = dataset.RTImageSID
print("array_shape=", np.shape(ArrayDicom))
height = np.shape(ArrayDicom)[0]
width = np.shape(ArrayDicom)[1]
dx = 1 / (SID * (1 / dataset.ImagePlanePixelSpacing[0]) / 1000)
dy = 1 / (SID * (1 / dataset.ImagePlanePixelSpacing[1]) / 1000)
print("pixel spacing row [mm]=", dx)
print("pixel spacing col [mm]=", dy)
# creating the figure extent based on the image dimensions, we divide by 10 to get the units in cm
extent = (
0,
0 + (ArrayDicom.shape[1] * dx / 10),
0 + (ArrayDicom.shape[0] * dy / 10),
0,
)
# creating the figure extent list for the bib images
list_extent = []
# plt.figure()
# plt.imshow(ArrayDicom, extent=extent, origin='upper')
# plt.imshow(ArrayDicom)
# plt.xlabel('x distance [cm]')
# plt.ylabel('y distance [cm]')
# plt.show()
if ioptn.startswith(("y", "yeah", "yes")):
height, width = ArrayDicom.shape
ArrayDicom_mod = ArrayDicom[
:, width // 2 - height // 2 : width // 2 + height // 2
]
else:
ArrayDicom_mod = ArrayDicom
# we take a diagonal profile to avoid phantom artifacts
# im_profile = ArrayDicom_mod.diagonal()
# test to make sure image is displayed correctly bibs are high amplitude against dark background
ctr_pixel = ArrayDicom_mod[height // 2, width // 2]
corner_pixel = ArrayDicom_mod[0, 0]
if ctr_pixel > corner_pixel:
ArrayDicom = u.range_invert(ArrayDicom)
ArrayDicom = u.norm01(ArrayDicom)
# working on transforming the full image and invert it first and go from there.
if ioptn.startswith(("y", "yeah", "yes")):
ROI1 = {"edge_top": 70, "edge_bottom": 130, "edge_left": 270, "edge_right": 350}
ROI2 = {"edge_top": 70, "edge_bottom": 130, "edge_left": 680, "edge_right": 760}
ROI3 = {
"edge_top": 150,
"edge_bottom": 210,
"edge_left": 760,
"edge_right": 830,
}
ROI4 = {
"edge_top": 560,
"edge_bottom": 620,
"edge_left": 760,
"edge_right": 830,
}
ROI5 = {
"edge_top": 640,
"edge_bottom": 700,
"edge_left": 680,
"edge_right": 760,
}
ROI6 = {
"edge_top": 640,
"edge_bottom": 700,
"edge_left": 270,
"edge_right": 350,
}
ROI7 = {
"edge_top": 560,
"edge_bottom": 620,
"edge_left": 200,
"edge_right": 270,
}
ROI8 = {
"edge_top": 150,
"edge_bottom": 210,
"edge_left": 200,
"edge_right": 270,
}
else:
ROI1 = {
"edge_top": 280,
"edge_bottom": 360,
"edge_left": 360,
"edge_right": 440,
}
ROI2 = {
"edge_top": 280,
"edge_bottom": 360,
"edge_left": 830,
"edge_right": 910,
}
ROI3 = {
"edge_top": 360,
"edge_bottom": 440,
"edge_left": 940,
"edge_right": 1020,
}
ROI4 = {
"edge_top": 840,
"edge_bottom": 920,
"edge_left": 940,
"edge_right": 1020,
}
ROI5 = {
"edge_top": 930,
"edge_bottom": 1000,
"edge_left": 830,
"edge_right": 910,
}
ROI6 = {
"edge_top": 930,
"edge_bottom": 1000,
"edge_left": 360,
"edge_right": 440,
}
ROI7 = {
"edge_top": 840,
"edge_bottom": 920,
"edge_left": 280,
"edge_right": 360,
}
ROI8 = {
"edge_top": 360,
"edge_bottom": 440,
"edge_left": 280,
"edge_right": 360,
}
# images for object detection
imcirclist = []
imcirc1 = Image.fromarray(
255
* ArrayDicom[
ROI1["edge_top"] : ROI1["edge_bottom"],
ROI1["edge_left"] : ROI1["edge_right"],
]
)
imcirc1 = imcirc1.resize((imcirc1.width * 10, imcirc1.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI1["edge_left"] * dx / 10),
(ROI1["edge_right"] * dx / 10),
(ROI1["edge_bottom"] * dy / 10),
(ROI1["edge_top"] * dy / 10),
)
)
imcirc2 = Image.fromarray(
255
* ArrayDicom[
ROI2["edge_top"] : ROI2["edge_bottom"],
ROI2["edge_left"] : ROI2["edge_right"],
]
)
imcirc2 = imcirc2.resize((imcirc2.width * 10, imcirc2.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI2["edge_left"] * dx / 10),
(ROI2["edge_right"] * dx / 10),
(ROI2["edge_bottom"] * dy / 10),
(ROI2["edge_top"] * dy / 10),
)
)
imcirc3 = Image.fromarray(
255
* ArrayDicom[
ROI3["edge_top"] : ROI3["edge_bottom"],
ROI3["edge_left"] : ROI3["edge_right"],
]
)
imcirc3 = imcirc3.resize((imcirc3.width * 10, imcirc3.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI3["edge_left"] * dx / 10),
(ROI3["edge_right"] * dx / 10),
(ROI3["edge_bottom"] * dy / 10),
(ROI3["edge_top"] * dy / 10),
)
)
imcirc4 = Image.fromarray(
255
* ArrayDicom[
ROI4["edge_top"] : ROI4["edge_bottom"],
ROI4["edge_left"] : ROI4["edge_right"],
]
)
imcirc4 = imcirc4.resize((imcirc4.width * 10, imcirc4.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI4["edge_left"] * dx / 10),
(ROI4["edge_right"] * dx / 10),
(ROI4["edge_bottom"] * dy / 10),
(ROI4["edge_top"] * dy / 10),
)
)
imcirc5 = Image.fromarray(
255
* ArrayDicom[
ROI5["edge_top"] : ROI5["edge_bottom"],
ROI5["edge_left"] : ROI5["edge_right"],
]
)
imcirc5 = imcirc5.resize((imcirc5.width * 10, imcirc5.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI5["edge_left"] * dx / 10),
(ROI5["edge_right"] * dx / 10),
(ROI5["edge_bottom"] * dy / 10),
(ROI5["edge_top"] * dy / 10),
)
)
imcirc6 = Image.fromarray(
255
* ArrayDicom[
ROI6["edge_top"] : ROI6["edge_bottom"],
ROI6["edge_left"] : ROI6["edge_right"],
]
)
imcirc6 = imcirc6.resize((imcirc6.width * 10, imcirc6.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI6["edge_left"] * dx / 10),
(ROI6["edge_right"] * dx / 10),
(ROI6["edge_bottom"] * dy / 10),
(ROI6["edge_top"] * dy / 10),
)
)
imcirc7 = Image.fromarray(
255
* ArrayDicom[
ROI7["edge_top"] : ROI7["edge_bottom"],
ROI7["edge_left"] : ROI7["edge_right"],
]
)
imcirc7 = imcirc7.resize((imcirc7.width * 10, imcirc7.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI7["edge_left"] * dx / 10),
(ROI7["edge_right"] * dx / 10),
(ROI7["edge_bottom"] * dy / 10),
(ROI7["edge_top"] * dy / 10),
)
)
imcirc8 = Image.fromarray(
255
* ArrayDicom[
ROI8["edge_top"] : ROI8["edge_bottom"],
ROI8["edge_left"] : ROI8["edge_right"],
]
)
imcirc8 = imcirc8.resize((imcirc8.width * 10, imcirc8.height * 10), Image.LANCZOS)
list_extent.append(
(
(ROI8["edge_left"] * dx / 10),
(ROI8["edge_right"] * dx / 10),
(ROI8["edge_bottom"] * dy / 10),
(ROI8["edge_top"] * dy / 10),
)
)
imcirclist.append(imcirc1)
imcirclist.append(imcirc2)
imcirclist.append(imcirc3)
imcirclist.append(imcirc4)
imcirclist.append(imcirc5)
imcirclist.append(imcirc6)
imcirclist.append(imcirc7)
imcirclist.append(imcirc8)
xdet, ydet = point_detect(imcirclist)
profiles = []
profile1 = np.array(imcirc1, dtype=np.uint8)[:, xdet[0]] / 255
profile2 = np.array(imcirc2, dtype=np.uint8)[:, xdet[1]] / 255
profile3 = np.array(imcirc3, dtype=np.uint8)[ydet[2], :] / 255
profile4 = np.array(imcirc4, dtype=np.uint8)[ydet[3], :] / 255
profile5 = np.array(imcirc5, dtype=np.uint8)[:, xdet[4]] / 255
profile6 = np.array(imcirc6, dtype=np.uint8)[:, xdet[5]] / 255
profile7 = np.array(imcirc7, dtype=np.uint8)[ydet[6], :] / 255
profile8 = np.array(imcirc8, dtype=np.uint8)[ydet[7], :] / 255
profiles.append(profile1)
profiles.append(profile2)
profiles.append(profile3)
profiles.append(profile4)
profiles.append(profile5)
profiles.append(profile6)
profiles.append(profile7)
profiles.append(profile8)
k = 0
fig = plt.figure(figsize=(8, 12)) # this figure will hold the bibs
plt.subplots_adjust(hspace=0.35)
# creating the page to write the results
dirname = os.path.dirname(filenm)
# tolerance levels to change at will
tol = 1.0 # tolearance level
act = 2.0 # action level
phantom_distance = 3.0 # distance from the bib to the edge of the phantom
with PdfPages(
dirname
+ "/"
+ now.strftime("%d-%m-%Y_%H:%M_")
+ dataset[0x0008, 0x1010].value
+ "_Lightrad_report.pdf"
) as pdf:
Page = plt.figure(figsize=(4, 5))
Page.text(0.45, 0.9, "Report", size=18)
kk = 0 # counter for data points
for profile in profiles:
_, index = u.find_nearest(profile, 0.5) # find the 50% amplitude point
# value_near, index = find_nearest(profile, 0.5) # find the 50% amplitude point
if ( # pylint: disable = consider-using-in
k == 0 or k == 1 or k == 4 or k == 5
): # there are the bibs in the horizontal
offset_value_y = round(
abs((ydet[k] - index) * (dy / 10)) - phantom_distance, 2
)
txt = str(offset_value_y)
# print('offset_value_y=', offset_value_y)
if abs(offset_value_y) <= tol:
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="g",
)
elif abs(offset_value_y) > tol and abs(offset_value_y) <= act:
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="y",
)
else:
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="r",
)
kk = kk + 1
ax = fig.add_subplot(
4, 2, k + 1
) # plotting all the figures in a single plot
ax.imshow(
np.array(imcirclist[k], dtype=np.uint8) / 255,
extent=list_extent[k],
origin="upper",
)
ax.scatter(
list_extent[k][0] + xdet[k] * dx / 100,
list_extent[k][3] + ydet[k] * dy / 100,
s=30,
marker="P",
color="y",
)
ax.set_title("Bib=" + str(k + 1))
ax.axhline(
list_extent[k][3] + index * dy / 100, color="r", linestyle="--"
)
ax.set_xlabel("x distance [cm]")
ax.set_ylabel("y distance [cm]")
else:
offset_value_x = round(
abs((xdet[k] - index) * (dx / 10)) - phantom_distance, 2
)
txt = str(offset_value_x)
if abs(offset_value_x) <= tol:
# print('1')
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="g",
)
elif abs(offset_value_x) > tol and abs(offset_value_x) <= act:
# print('2')
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="y",
)
else:
# print('3')
Page.text(
0.1,
0.8 - kk / 10,
"Point" + str(kk + 1) + " offset=" + txt + " mm",
color="r",
)
kk = kk + 1
ax = fig.add_subplot(
4, 2, k + 1
) # plotting all the figures in a single plot
ax.imshow(
np.array(imcirclist[k], dtype=np.uint8) / 255,
extent=list_extent[k],
origin="upper",
)
ax.scatter(
list_extent[k][0] + xdet[k] * dx / 100,
list_extent[k][3] + ydet[k] * dy / 100,
s=30,
marker="P",
color="y",
)
ax.set_title("Bib=" + str(k + 1))
ax.axvline(
list_extent[k][0] + index * dx / 100, color="r", linestyle="--"
)
ax.set_xlabel("x distance [cm]")
ax.set_ylabel("y distance [cm]")
k = k + 1
pdf.savefig()
pdf.savefig(fig)
# we now need to select a horizontal and a vertical profile to find the edge of the field from an image
# for the field size calculation
im = Image.fromarray(255 * ArrayDicom)
if ioptn.startswith(("y", "yeah", "yes")):
PROFILE = {
"horizontal": 270,
"vertical": 430,
} # location to extract the horizontal and vertical profiles if this is a linac
else:
PROFILE = {
"horizontal": 470,
"vertical": 510,
} # location to extract the horizontal and vertical profiles if this is a true beam
profilehorz = (
np.array(im, dtype=np.uint8)[PROFILE["horizontal"], :] / 255
) # we need to change these limits on a less specific criteria
profilevert = np.array(im, dtype=np.uint8)[:, PROFILE["vertical"]] / 255
# top_edge, index_top = find_nearest(profilevert[0:height//2], 0.5) # finding the edge of the field on the top
# bot_edge, index_bot = find_nearest(profilevert[height//2:height], 0.5) # finding the edge of the field on the bottom
_, index_top = u.find_nearest(
profilevert[0 : height // 2], 0.5
) # finding the edge of the field on the top
_, index_bot = u.find_nearest(
profilevert[height // 2 : height], 0.5
) # finding the edge of the field on the bottom
# l_edge, index_l = find_nearest(profilehorz[0:width//2], 0.5) #finding the edge of the field on the bottom
# r_edge, index_r = find_nearest(profilehorz[width//2:width], 0.5) #finding the edge of the field on the right
_, index_l = u.find_nearest(
profilehorz[0 : width // 2], 0.5
) # finding the edge of the field on the bottom
_, index_r = u.find_nearest(
profilehorz[width // 2 : width], 0.5
) # finding the edge of the field on the right
fig2 = plt.figure(
figsize=(7, 5)
) # this figure will show the vertical and horizontal calculated field size
ax = fig2.subplots()
ax.imshow(ArrayDicom, extent=extent, origin="upper")
ax.set_xlabel("x distance [cm]")
ax.set_ylabel("y distance [cm]")
# adding a vertical arrow
ax.annotate(
s="",
xy=(PROFILE["vertical"] * dx / 10, index_top * dy / 10),
xytext=(PROFILE["vertical"] * dx / 10, (height // 2 + index_bot) * dy / 10),
arrowprops=dict(arrowstyle="<->", color="r"),
) # example on how to plot a double headed arrow
ax.text(
(PROFILE["vertical"] + 10) * dx / 10,
(height // 1.25) * dy / 10,
"Vfs="
+ str(round((height // 2 + index_bot - index_top) * dy / 10, 2))
+ "cm",
rotation=90,
fontsize=14,
color="r",
)
# adding a horizontal arrow
# print(index_l*dx, index_l, PROFILE['horizontal']*dy, PROFILE['horizontal'])
ax.annotate(
s="",
xy=(index_l * dx / 10, PROFILE["horizontal"] * dy / 10),
xytext=((width // 2 + index_r) * dx / 10, PROFILE["horizontal"] * dy / 10),
arrowprops=dict(arrowstyle="<->", color="r"),
) # example on how to plot a double headed arrow
ax.text(
(width // 2) * dx / 10,
(PROFILE["horizontal"] - 10) * dy / 10,
"Hfs=" + str(round((width // 2 + index_r - index_l) * dx / 10, 2)) + "cm",
rotation=0,
fontsize=14,
color="r",
)
pdf.savefig(fig2)
if __name__ == "__main__":
while True: # example of infinite loops using try and except to catch only numbers
line = input("Are these files from a clinac [yes(y)/no(n)]> ")
try:
## if line == 'done':
## break
ioption = str(line.lower())
if ioption.startswith(("y", "yeah", "yes", "n", "no", "nope")):
break
except: # pylint: disable = bare-except
print("Please enter a valid option:")
parser = argparse.ArgumentParser()
parser.add_argument("file", type=str, help="Input the Light/Rad file")
args = parser.parse_args()
filename = args.file
read_dicom(filename, ioption)
| 1.445313 | 1 |
cli/endpoints/online/triton/ensemble/models/triton/bidaf-preprocess/1/model.py | denniseik/azureml-examples | 331 | 12768949 | import nltk
import json
import numpy as np
from nltk import word_tokenize
import triton_python_backend_utils as pb_utils
class TritonPythonModel:
"""Your Python model must use the same class name. Every Python model
that is created must have "TritonPythonModel" as the class name.
"""
def initialize(self, args):
"""`initialize` is called only once when the model is being loaded.
Implementing `initialize` function is optional. This function allows
the model to intialize any state associated with this model.
Parameters
----------
args : dict
Both keys and values are strings. The dictionary keys and values are:
* model_config: A JSON string containing the model configuration
* model_instance_kind: A string containing model instance kind
* model_instance_device_id: A string containing model instance device ID
* model_repository: Model repository path
* model_version: Model version
* model_name: Model name
"""
# You must parse model_config. JSON string is not parsed here
self.model_config = model_config = json.loads(args["model_config"])
# Get OUTPUT0 configuration
output0_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT0")
# Get OUTPUT1 configuration
output1_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT1")
# Get OUTPUT2 configuration
output2_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT2")
# Get OUTPUT3 configuration
output3_config = pb_utils.get_output_config_by_name(model_config, "OUTPUT3")
# Convert Triton types to numpy types
self.output0_dtype = pb_utils.triton_string_to_numpy(
output0_config["data_type"]
)
self.output1_dtype = pb_utils.triton_string_to_numpy(
output1_config["data_type"]
)
self.output2_dtype = pb_utils.triton_string_to_numpy(
output2_config["data_type"]
)
self.output3_dtype = pb_utils.triton_string_to_numpy(
output3_config["data_type"]
)
# Get model repository path to read labels
self.model_repository = model_repository = args["model_repository"]
print(model_repository)
# Initialize tokenizer
nltk.download("punkt")
def tokenize(self, text):
tokens = word_tokenize(text)
# split into lower-case word tokens, in numpy array with shape of (seq, 1)
words = np.array([w.lower() for w in tokens], dtype=np.object_).reshape(-1, 1)
# split words into chars, in numpy array with shape of (seq, 1, 1, 16)
chars = [[c for c in t][:16] for t in tokens]
chars = [cs + [""] * (16 - len(cs)) for cs in chars]
chars = np.array(chars, dtype=np.object_).reshape(-1, 1, 1, 16)
return words, chars
def execute(self, requests):
"""
Parameters
----------
requests : list
A list of pb_utils.InferenceRequest
Returns
-------
list
A list of pb_utils.InferenceResponse. The length of this list must
be the same as `requests`
"""
output0_dtype = self.output0_dtype
output1_dtype = self.output1_dtype
output2_dtype = self.output2_dtype
output3_dtype = self.output3_dtype
responses = []
# Every Python backend must iterate over everyone of the requests
# and create a pb_utils.InferenceResponse for each of them.
for request in requests:
# Get INPUT0
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT0")
context = in_0.as_numpy().astype(str)
print(context)
# Get INPUT1
in_0 = pb_utils.get_input_tensor_by_name(request, "INPUT1")
query = in_0.as_numpy().astype(str)
print(query)
cw, cc = self.tokenize(context[0])
qw, qc = self.tokenize(query[0])
out_0 = np.array(qw, dtype=output0_dtype)
out_1 = np.array(cc, dtype=output1_dtype)
out_2 = np.array(qc, dtype=output2_dtype)
out_3 = np.array(cw, dtype=output3_dtype)
# Create output tensors. You need pb_utils.Tensor objects to create pb_utils.InferenceResponse.
out_tensor_0 = pb_utils.Tensor("OUTPUT0", out_0)
out_tensor_1 = pb_utils.Tensor("OUTPUT1", out_1)
out_tensor_2 = pb_utils.Tensor("OUTPUT2", out_2)
out_tensor_3 = pb_utils.Tensor("OUTPUT3", out_3)
inference_response = pb_utils.InferenceResponse(
output_tensors=[out_tensor_0, out_tensor_1, out_tensor_2, out_tensor_3]
)
responses.append(inference_response)
return responses
def finalize(self):
"""`finalize` is called only once when the model is being unloaded.
Implementing `finalize` function is OPTIONAL. This function allows
the model to perform any necessary clean ups before exit.
"""
print("Cleaning up...")
| 3.203125 | 3 |
run.py | neoocean/arlo | 0 | 12768950 | from arlo import Arlo
from datetime import timedelta, date
import datetime
import sys
import platform
import os.path
from os import path
USERNAME = ''
PASSWORD = ''
try:
# Instantiating the Arlo object automatically calls Login(), which returns an oAuth token that gets cached.
# Subsequent successful calls to login will update the oAuth token.
arlo = Arlo(USERNAME, PASSWORD)
# At this point you're logged into Arlo.
today = (date.today()-timedelta(days=0)).strftime("%Y%m%d")
seven_days_ago = (date.today() - timedelta(days=6)).strftime("%Y%m%d")
# Get all of the recordings for a date range.
library = arlo.GetLibrary(seven_days_ago, today)
# 실행하는 기계에 따라 원드라이브 동기화 위치 변경
if platform.node() == 'surface':
storage = 'E:/OneDrive/Video/arlo/Video/'
elif platform.node() == 'home':
storage = 'D:/OneDrive/Video/arlo/Video/'
elif platform.node() == 'DESKTOP-F4EOHEL':
storage = 'D:/OneDrive/Video/arlo/Video/'
else:
storage = 'E:/OneDrive/Video/arlo/Video/'
print('platform.node() = ' + platform.node())
# 다운로드 할지말지 결정.
doDownload = False
# Iterate through the recordings in the library.
for recording in library:
videofilename = datetime.datetime.fromtimestamp(int(recording['name'])//1000).strftime('%Y-%m-%d %H-%M-%S') + ' ' + recording['uniqueId'] + '.mp4'
# 다운로드가 필요한지 확인. 파일이 없으면 다운로드. 파일이 있지만 0바이트이면 다운로드.
if path.exists(storage + videofilename) == False:
doDownload = True
elif path.exists(storage + videofilename) == True and os.path.getsize(storage + videofilename) > 0:
doDownload = False
else:
doDownload = True
# 다운로드 실행.
if doDownload == True:
stream = arlo.StreamRecording(recording['presignedContentUrl'])
with open(storage + videofilename, 'wb') as f:
for chunk in stream:
f.write(chunk)
f.close()
print('Downloaded: '+videofilename+' from '+recording['createdDate']+'.')
# else:
# print('Skipped: '+videofilename+' from '+recording['createdDate']+'.')
# Delete all of the videos you just downloaded from the Arlo library.
# Notice that you can pass the "library" object we got back from the GetLibrary() call.
# result = arlo.BatchDeleteRecordings(library)
# If we made it here without an exception, then the videos were successfully deleted.
# print('Batch deletion of videos completed successfully.')
except Exception as e:
print(e) | 2.875 | 3 |