hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 | count_classes int64 0 1.6M | score_classes float64 0 1 | count_generators int64 0 651k | score_generators float64 0 1 | count_decorators int64 0 990k | score_decorators float64 0 1 | count_async_functions int64 0 235k | score_async_functions float64 0 1 | count_documentation int64 0 1.04M | score_documentation float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7ee0ad4229cbb8785cc665b7f1d02577367e5533 | 6,358 | py | Python | venv/lib/python3.6/site-packages/xero_python/accounting/models/branding_theme.py | 6enno/FarmXero | 881b1e6648e927631b276e66a4c5287e4de2cbc1 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/xero_python/accounting/models/branding_theme.py | 6enno/FarmXero | 881b1e6648e927631b276e66a4c5287e4de2cbc1 | [
"MIT"
] | null | null | null | venv/lib/python3.6/site-packages/xero_python/accounting/models/branding_theme.py | 6enno/FarmXero | 881b1e6648e927631b276e66a4c5287e4de2cbc1 | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Accounting API
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
Contact: api@xero.com
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
from xero_python.models import BaseModel
class BrandingTheme(BaseModel):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
"branding_theme_id": "str",
"name": "str",
"logo_url": "str",
"type": "str",
"sort_order": "int",
"created_date_utc": "datetime[ms-format]",
}
attribute_map = {
"branding_theme_id": "BrandingThemeID",
"name": "Name",
"logo_url": "LogoUrl",
"type": "Type",
"sort_order": "SortOrder",
"created_date_utc": "CreatedDateUTC",
}
def __init__(
self,
branding_theme_id=None,
name=None,
logo_url=None,
type=None,
sort_order=None,
created_date_utc=None,
): # noqa: E501
"""BrandingTheme - a model defined in OpenAPI""" # noqa: E501
self._branding_theme_id = None
self._name = None
self._logo_url = None
self._type = None
self._sort_order = None
self._created_date_utc = None
self.discriminator = None
if branding_theme_id is not None:
self.branding_theme_id = branding_theme_id
if name is not None:
self.name = name
if logo_url is not None:
self.logo_url = logo_url
if type is not None:
self.type = type
if sort_order is not None:
self.sort_order = sort_order
if created_date_utc is not None:
self.created_date_utc = created_date_utc
@property
def branding_theme_id(self):
"""Gets the branding_theme_id of this BrandingTheme. # noqa: E501
Xero identifier # noqa: E501
:return: The branding_theme_id of this BrandingTheme. # noqa: E501
:rtype: str
"""
return self._branding_theme_id
@branding_theme_id.setter
def branding_theme_id(self, branding_theme_id):
"""Sets the branding_theme_id of this BrandingTheme.
Xero identifier # noqa: E501
:param branding_theme_id: The branding_theme_id of this BrandingTheme. # noqa: E501
:type: str
"""
self._branding_theme_id = branding_theme_id
@property
def name(self):
"""Gets the name of this BrandingTheme. # noqa: E501
Name of branding theme # noqa: E501
:return: The name of this BrandingTheme. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this BrandingTheme.
Name of branding theme # noqa: E501
:param name: The name of this BrandingTheme. # noqa: E501
:type: str
"""
self._name = name
@property
def logo_url(self):
"""Gets the logo_url of this BrandingTheme. # noqa: E501
The location of the image file used as the logo on this branding theme # noqa: E501
:return: The logo_url of this BrandingTheme. # noqa: E501
:rtype: str
"""
return self._logo_url
@logo_url.setter
def logo_url(self, logo_url):
"""Sets the logo_url of this BrandingTheme.
The location of the image file used as the logo on this branding theme # noqa: E501
:param logo_url: The logo_url of this BrandingTheme. # noqa: E501
:type: str
"""
self._logo_url = logo_url
@property
def type(self):
"""Gets the type of this BrandingTheme. # noqa: E501
Always INVOICE # noqa: E501
:return: The type of this BrandingTheme. # noqa: E501
:rtype: str
"""
return self._type
@type.setter
def type(self, type):
"""Sets the type of this BrandingTheme.
Always INVOICE # noqa: E501
:param type: The type of this BrandingTheme. # noqa: E501
:type: str
"""
allowed_values = ["INVOICE", "None"] # noqa: E501
if type:
if type not in allowed_values:
raise ValueError(
"Invalid value for `type` ({0}), must be one of {1}".format( # noqa: E501
type, allowed_values
)
)
self._type = type
@property
def sort_order(self):
"""Gets the sort_order of this BrandingTheme. # noqa: E501
Integer – ranked order of branding theme. The default branding theme has a value of 0 # noqa: E501
:return: The sort_order of this BrandingTheme. # noqa: E501
:rtype: int
"""
return self._sort_order
@sort_order.setter
def sort_order(self, sort_order):
"""Sets the sort_order of this BrandingTheme.
Integer – ranked order of branding theme. The default branding theme has a value of 0 # noqa: E501
:param sort_order: The sort_order of this BrandingTheme. # noqa: E501
:type: int
"""
self._sort_order = sort_order
@property
def created_date_utc(self):
"""Gets the created_date_utc of this BrandingTheme. # noqa: E501
UTC timestamp of creation date of branding theme # noqa: E501
:return: The created_date_utc of this BrandingTheme. # noqa: E501
:rtype: datetime
"""
return self._created_date_utc
@created_date_utc.setter
def created_date_utc(self, created_date_utc):
"""Sets the created_date_utc of this BrandingTheme.
UTC timestamp of creation date of branding theme # noqa: E501
:param created_date_utc: The created_date_utc of this BrandingTheme. # noqa: E501
:type: datetime
"""
self._created_date_utc = created_date_utc
| 27.885965 | 124 | 0.596886 | 6,045 | 0.950173 | 0 | 0 | 4,114 | 0.646652 | 0 | 0 | 3,787 | 0.595253 |
7ee0d66cd69f9cc4f31be7268f2139f698f3ce65 | 1,568 | py | Python | setup.py | idex-biometrics/fusesoc | 58bbb864723112e9bfd7e02a17749800225815e9 | [
"BSD-2-Clause"
] | 829 | 2015-03-10T12:28:42.000Z | 2022-03-28T02:44:12.000Z | setup.py | idex-biometrics/fusesoc | 58bbb864723112e9bfd7e02a17749800225815e9 | [
"BSD-2-Clause"
] | 460 | 2015-01-26T18:03:19.000Z | 2022-03-30T08:30:41.000Z | setup.py | idex-biometrics/fusesoc | 58bbb864723112e9bfd7e02a17749800225815e9 | [
"BSD-2-Clause"
] | 177 | 2015-02-02T13:58:12.000Z | 2022-03-30T20:56:21.000Z | # Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="fusesoc",
packages=["fusesoc", "fusesoc.capi2", "fusesoc.provider"],
use_scm_version={
"relative_to": __file__,
"write_to": "fusesoc/version.py",
},
author="Olof Kindgren",
author_email="olof.kindgren@gmail.com",
description=(
"FuseSoC is a package manager and a set of build tools for HDL "
"(Hardware Description Language) code."
),
license="BSD-2-Clause",
keywords=[
"VHDL",
"verilog",
"hdl",
"rtl",
"synthesis",
"FPGA",
"simulation",
"Xilinx",
"Altera",
],
url="https://github.com/olofk/fusesoc",
long_description=read("README.md"),
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 5 - Production/Stable",
"Topic :: Utilities",
"Topic :: Software Development :: Build Tools",
"License :: OSI Approved :: BSD License",
],
entry_points={"console_scripts": ["fusesoc = fusesoc.main:main"]},
setup_requires=[
"setuptools_scm",
],
install_requires=[
"edalize>=0.2.3",
"pyparsing",
"pyyaml",
"simplesat>=0.8.0",
],
# Supported Python versions: 3.6+
python_requires=">=3.6, <4",
)
| 25.704918 | 72 | 0.595026 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 829 | 0.528699 |
7ee162a59b2d4c88fd31a1e5da83b93341c5641c | 2,980 | py | Python | planner/migrations/0020_auto_20171028_1709.py | zhajio1988/Vplanner | 2b84bac7c8e36fde5eecc73682fde561613273d1 | [
"Apache-2.0"
] | 4 | 2019-08-26T01:20:35.000Z | 2022-01-26T09:18:27.000Z | planner/migrations/0020_auto_20171028_1709.py | zhajio1988/Vplanner | 2b84bac7c8e36fde5eecc73682fde561613273d1 | [
"Apache-2.0"
] | null | null | null | planner/migrations/0020_auto_20171028_1709.py | zhajio1988/Vplanner | 2b84bac7c8e36fde5eecc73682fde561613273d1 | [
"Apache-2.0"
] | 1 | 2020-07-27T16:14:01.000Z | 2020-07-27T16:14:01.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-10-28 09:09
from __future__ import unicode_literals
import django.core.validators
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('planner', '0019_auto_20171028_1706'),
]
operations = [
migrations.CreateModel(
name='FeatureDetail',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('priority', models.CharField(choices=[('p1', 'P1'), ('p2', 'P2'), ('p3', 'P3')], default='p1', max_length=10)),
('sim_req', models.CharField(max_length=128, verbose_name='Simulation Requirements')),
('seq_req', models.CharField(max_length=128, verbose_name='Sequence Requirements')),
('check_desp', models.CharField(max_length=128, verbose_name='Checking Description')),
('func_cov_req', models.CharField(max_length=128, verbose_name='Func Cov Requirements')),
('measure_src', models.TextField(verbose_name='Measure Source')),
('test_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Testcase Coverage')),
('line_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Line Coverage')),
('con_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Conditional Coverage')),
('toggle_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Toggle Coverage')),
('fsm_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='FSM Coverage')),
('branch_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Branch Coverage')),
('assert_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Assertion Coverage')),
('func_cov', models.PositiveSmallIntegerField(default=0, validators=[django.core.validators.MaxValueValidator(100)], verbose_name='Functional Coverage')),
('feature', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to='planner.Feature')),
],
),
migrations.RenameModel(
old_name='OperationLogs',
new_name='ChangeList',
),
migrations.RemoveField(
model_name='featureitem',
name='feature',
),
migrations.DeleteModel(
name='FeatureItem',
),
]
| 59.6 | 171 | 0.667785 | 2,759 | 0.925839 | 0 | 0 | 0 | 0 | 0 | 0 | 649 | 0.217785 |
7ee1d07b4bc413cb7389c911457d3de03b101227 | 2,460 | py | Python | check_mana.py | CheapskateProjects/MtgManaRecognition | 9119a843f5c235ca09c695a46611bb46fea37573 | [
"MIT"
] | 7 | 2020-01-24T13:15:51.000Z | 2021-11-18T00:59:14.000Z | check_mana.py | CheapskateProjects/MtgManaRecognition | 9119a843f5c235ca09c695a46611bb46fea37573 | [
"MIT"
] | null | null | null | check_mana.py | CheapskateProjects/MtgManaRecognition | 9119a843f5c235ca09c695a46611bb46fea37573 | [
"MIT"
] | 3 | 2017-12-11T08:42:20.000Z | 2021-05-23T22:16:37.000Z | """
This code will read file given as parameter and list what mana symbols it contains.
created Apr 2017
by CheapskateProjects
---------------------------
The MIT License (MIT)
Copyright (c) 2017 CheapskateProjects
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
import cv2
import numpy as np
from os import listdir
import sys
if ( len(sys.argv) <= 1 ):
print "Usage: <file to check>"
sys.exit()
# Config
matchingThreshold = 0.7
# Read templates at the begining. Only once
green = cv2.imread('mana_icons/green_mana.jpg',0)
blue = cv2.imread('mana_icons/blue_mana.jpg',0)
red = cv2.imread('mana_icons/red_mana.jpg',0)
white = cv2.imread('mana_icons/white_mana.jpg',0)
black = cv2.imread('mana_icons/black_mana.jpg',0)
# All the mana logos are about the same size
w, h = green.shape[::-1]
def colorcheck( color_template, draw_color, img_gray ):
results = cv2.matchTemplate(img_gray,color_template,cv2.TM_CCOEFF_NORMED)
locations = np.where( results >= matchingThreshold)
if len(zip(*locations[::-1])) > 0:
return "Yes"
else:
return "No"
filename=sys.argv[1]
img_to_check = cv2.imread(filename)
img_gray = cv2.cvtColor(img_to_check, cv2.COLOR_BGR2GRAY)
print "Green: " + colorcheck(green, (0,255,0), img_gray)
print "Red: " + colorcheck(red, (0,0,255), img_gray)
print "Black: " + colorcheck(black, (0,0,0), img_gray)
print "Blue: " + colorcheck(blue, (255,0,0), img_gray)
print "White: " + colorcheck(white, (255,255,255), img_gray)
| 46.415094 | 460 | 0.742683 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,546 | 0.628455 |
7ee1d369253b685eb3c0ced8fa15abb487773d16 | 2,922 | py | Python | src/estimator.py | Dru94/scovid | 637a56d95014d785f55c3253e424874520850257 | [
"MIT"
] | null | null | null | src/estimator.py | Dru94/scovid | 637a56d95014d785f55c3253e424874520850257 | [
"MIT"
] | null | null | null | src/estimator.py | Dru94/scovid | 637a56d95014d785f55c3253e424874520850257 | [
"MIT"
] | null | null | null | import pprint
#Dictionary to hold impact, severe impact and output data
data=dict()
impactDict=dict()
severeImpactDict=dict()
estimate=dict()
keylist=["currentlyInfected","infectionsByRequestedTime","severeCasesByRequestedTime","hospitalBedsByRequestedTime",
"casesForICUByRequestedTime","casesForVentilatorsByRequestedTime","dollarsInFlight"]
def estimator(x):
# variables holding inputdata
period=x["periodType"]
reportedCases=x['reportedCases']
time=x["timeToElapse"]
beds=x["totalHospitalBeds"]
avgDailyIncome=x["region"]["avgDailyIncomeInUSD"]
avgDailyPopulation=x["region"]["avgDailyIncomePopulation"]
pop=x["population"]
# check if reported cases key has a value
if reportedCases == None:
print("No value for reported Cases")
# Days, weeks, months normalization to days
if period == "weeks":
days=time*7
elif period == "months":
days=time*30
else:
days=time
factor= days/3
factor=int(factor)
number_of_days=pow(2,factor)
# Impact
currently_infect=reportedCases *10
infections_by_time=currently_infect* number_of_days
infections_by_time=int(infections_by_time)
severeCasesByRequestedTime= 0.15 * infections_by_time
severeCasesByRequestedTime=int(severeCasesByRequestedTime)
available_beds=0.35 * beds
usable_beds=available_beds-severeCasesByRequestedTime
usable_beds=int(usable_beds)
casesForICUByRequestedTime=0.05 * infections_by_time
casesForICUByRequestedTime=int(casesForICUByRequestedTime)
casesForVentilatorsByRequestedTime=0.02 * infections_by_time
casesForVentilatorsByRequestedTime=int(casesForVentilatorsByRequestedTime)
dollars=(infections_by_time*avgDailyPopulation*avgDailyIncome)/time
dollars=int(dollars)
tlc=[currently_infect,infections_by_time,severeCasesByRequestedTime,usable_beds,
casesForICUByRequestedTime,casesForVentilatorsByRequestedTime,dollars]
impactDict=dict(zip(keylist,tlc))
# Severe impact
scurrently_infect=reportedCases*50
scurrently_infect=int(scurrently_infect)
sinfections_by_time=scurrently_infect*number_of_days
sinfections_by_time=int(sinfections_by_time)
severeCasesByRequest=0.15 * sinfections_by_time
severeCasesByRequest=int(severeCasesByRequest)
free_beds=available_beds-severeCasesByRequest
free_beds=int(free_beds)
casesForICUByRequest=0.05 * sinfections_by_time
casesForICUByRequest=int(casesForICUByRequest)
casesForVentilatorsByRequest=0.02 * sinfections_by_time
casesForVentilatorsByRequest=int(casesForVentilatorsByRequest)
sdollars=(sinfections_by_time*avgDailyPopulation*avgDailyIncome)/time
sdollars=int(sdollars)
reekado=[scurrently_infect,sinfections_by_time,severeCasesByRequest,free_beds,casesForICUByRequest,
casesForVentilatorsByRequest,sdollars]
severeImpactDict=dict(zip(keylist,reekado))
# populating data dicts
estimate["impact"]=impactDict
estimate["severeImpact"]=severeImpactDict
data["data"]=[x]
data["estimated"]=estimate
return data | 27.566038 | 116 | 0.826489 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 620 | 0.212183 |
7ee23d44668fd487a80e9214c2ad17038e0da11f | 3,739 | py | Python | src/tests/control/test_auth.py | abrock/pretix | cd9c048458afce1198276e5936bf583578855a4f | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-06-23T07:44:54.000Z | 2021-06-23T07:44:54.000Z | src/tests/control/test_auth.py | awg24/pretix | b1d67a48601838bac0d4e498cbe8bdcd16013d60 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/tests/control/test_auth.py | awg24/pretix | b1d67a48601838bac0d4e498cbe8bdcd16013d60 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | from django.test import Client, TestCase
from tests.base import BrowserTest
from pretix.base.models import User
class LoginFormBrowserTest(BrowserTest):
def setUp(self):
super().setUp()
self.user = User.objects.create_user('dummy@dummy.dummy', 'dummy@dummy.dummy', 'dummy')
def test_login(self):
self.driver.implicitly_wait(10)
self.driver.get('%s%s' % (self.live_server_url, '/control/login'))
username_input = self.driver.find_element_by_name("email")
username_input.send_keys('dummy@dummy.dummy')
password_input = self.driver.find_element_by_name("password")
password_input.send_keys('dummy')
self.driver.find_element_by_css_selector('button[type="submit"]').click()
self.driver.find_element_by_class_name("navbar-right")
def test_login_fail(self):
self.driver.implicitly_wait(10)
self.driver.get('%s%s' % (self.live_server_url, '/control/login'))
username_input = self.driver.find_element_by_name("email")
username_input.send_keys('dummy@dummy.dummy')
password_input = self.driver.find_element_by_name("password")
password_input.send_keys('wrong')
self.driver.find_element_by_css_selector('button[type="submit"]').click()
self.driver.find_element_by_class_name("alert-danger")
class LoginFormTest(TestCase):
"""
This test case tests various methods around the properties /
variations concept.
"""
def setUp(self):
self.user = User.objects.create_user('dummy@dummy.dummy', 'dummy@dummy.dummy', 'dummy')
def test_wrong_credentials(self):
c = Client()
response = c.post('/control/login', {
'email': 'dummy@dummy.dummy',
'password': 'foo',
})
self.assertEqual(response.status_code, 200)
def test_correct_credentials(self):
c = Client()
response = c.post('/control/login', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 302)
def test_inactive_account(self):
self.user.is_active = False
self.user.save()
c = Client()
response = c.post('/control/login', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 200)
def test_redirect(self):
c = Client()
response = c.post('/control/login?next=/control/events/', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 302)
self.assertIn('/control/events/', response['Location'])
def test_logged_in(self):
c = Client()
response = c.post('/control/login?next=/control/events/', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 302)
self.assertIn('/control/events/', response['Location'])
response = c.get('/control/login')
self.assertEqual(response.status_code, 302)
response = c.get('/control/login?next=/control/events/')
self.assertEqual(response.status_code, 302)
self.assertIn('/control/events/', response['Location'])
def test_logout(self):
c = Client()
response = c.post('/control/login', {
'email': 'dummy@dummy.dummy',
'password': 'dummy',
})
self.assertEqual(response.status_code, 302)
response = c.get('/control/logout')
self.assertEqual(response.status_code, 302)
response = c.get('/control/login')
self.assertEqual(response.status_code, 200)
| 34.62037 | 95 | 0.622626 | 3,620 | 0.968173 | 0 | 0 | 0 | 0 | 0 | 0 | 961 | 0.257021 |
7ee657d8672c3d1e01a4a553eebf1a0a57578480 | 1,871 | py | Python | python/example_code/iam/create_policy.py | Ciul/aws-doc-sdk-examples | 0ee496eb93b0404e214d387c1933ca4e231503cb | [
"Apache-2.0"
] | 1 | 2019-01-09T01:32:02.000Z | 2019-01-09T01:32:02.000Z | python/example_code/iam/create_policy.py | cloudcansee/aws-doc-sdk-examples | 571fe9a546ab24ccac8e865190dce127f457f587 | [
"Apache-2.0"
] | null | null | null | python/example_code/iam/create_policy.py | cloudcansee/aws-doc-sdk-examples | 571fe9a546ab24ccac8e865190dce127f457f587 | [
"Apache-2.0"
] | null | null | null | # Copyright 2010-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import json
import boto3
# Create IAM client
iam = boto3.client('iam')
# Create a policy
my_managed_policy = {
"Version": "2012-10-17",
"Statement": [
{
"Effect": "Allow",
"Action": "logs:CreateLogGroup",
"Resource": "RESOURCE_ARN"
},
{
"Effect": "Allow",
"Action": [
"dynamodb:DeleteItem",
"dynamodb:GetItem",
"dynamodb:PutItem",
"dynamodb:Scan",
"dynamodb:UpdateItem"
],
"Resource": "RESOURCE_ARN"
}
]
}
response = iam.create_policy(
PolicyName='myDynamoDBPolicy',
PolicyDocument=json.dumps(my_managed_policy)
)
print(response)
#snippet-comment:[These are tags for the AWS doc team's sample catalog. Do not remove.]
#snippet-sourcedescription:[create_policy.py demonstrates how to create a new managed policy for your AWS account.]
#snippet-keyword:[Python]
#snippet-keyword:[AWS SDK for Python (Boto3)]
#snippet-keyword:[Code Sample]
#snippet-keyword:[AWS Identity and Access Management (IAM)]
#snippet-service:[iam]
#snippet-sourcetype:[full-example]
#snippet-sourcedate:[]
#snippet-sourceauthor:[jschwarzwalder (AWS)]
| 30.177419 | 116 | 0.638696 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,360 | 0.726884 |
7ee6a44163dee19b7f4b3895d2366e18514d868b | 1,451 | py | Python | Part-2-Answers/2-Dictionaries/1-Word2SMS.py | Spigot-Dev/Grok-Intro_To_Programming_Python1-2 | 69c64019c0424f6cc8eb326b4456a510baab7ea7 | [
"MIT"
] | 2 | 2021-11-20T11:28:22.000Z | 2022-02-07T21:56:46.000Z | Part-2-Answers/2-Dictionaries/1-Word2SMS.py | Spigot-Dev/Grok-Intro_To_Programming_Python1-2 | 69c64019c0424f6cc8eb326b4456a510baab7ea7 | [
"MIT"
] | null | null | null | Part-2-Answers/2-Dictionaries/1-Word2SMS.py | Spigot-Dev/Grok-Intro_To_Programming_Python1-2 | 69c64019c0424f6cc8eb326b4456a510baab7ea7 | [
"MIT"
] | 4 | 2021-11-20T11:28:25.000Z | 2022-03-12T04:10:54.000Z | # 11/03/21
# What does this code do?
# This code introduces a new idea, Dictionaries. The codes purpose is to take an input, and convert it into the numbers you'd need to press
# on an alphanumeric keypad, as shown in the picture.
# How do Dictionaries work?
# To use our dictionary, we first need to initialise it. We can do this as follows:
# Syntax: <DICTNAME> = {'Key1':'Value1'}
# Example: MyPetSounds = {"Cat":"Meow", "Dog":"Woof"}
# To explain further, dictionaries work in a Key and Value paired system. To create an entry, you need to define 2 things,
# The key (or how the entry will be called), and then the value (What will be referenced when the key is called.) They are seperated by a colon.
# A dictionary containing the letter to digit phone keypad mappings.
KEYPAD = {
'A': '2', 'B': '2', 'C': '2', 'D': '3', 'E': '3',
'F': '3', 'G': '4', 'H': '4', 'I': '4', 'J': '5',
'K': '5', 'L': '5', 'M': '6', 'N': '6', 'O': '6',
'P': '7', 'Q': '7', 'R': '7', 'S': '7', 'T': '8',
'U': '8', 'V': '8', 'W': '9', 'X': '9', 'Y': '9',
'Z': '9',
}
word = input("Enter word: ")
for key in word:
print(KEYPAD[key], end='')
print()
print("This code was created by $pigot.")
# What is happening here?
# In the first 6 lines of this code, we are simply initialising our dictionary. We are associating the numbers on the keypad, to the 3 or 4
# letters that they can enter. | 43.969697 | 148 | 0.594762 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,194 | 0.822881 |
7ee771d4ce34d997b16dc36b66c0cfae9cd23bd9 | 1,177 | py | Python | typhon/core/type_system/constraints/member_constraint.py | strongrex2001/typhon | 7a8ad7e0252768844009ab331fc8aa61350f23a9 | [
"Apache-2.0"
] | 4 | 2021-03-03T12:44:34.000Z | 2021-07-03T10:15:43.000Z | typhon/core/type_system/constraints/member_constraint.py | eliphatfs/typhon | 7a8ad7e0252768844009ab331fc8aa61350f23a9 | [
"Apache-2.0"
] | null | null | null | typhon/core/type_system/constraints/member_constraint.py | eliphatfs/typhon | 7a8ad7e0252768844009ab331fc8aa61350f23a9 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Mar 14 09:40:01 2021
@author: eliphat
"""
from ..type_var import TypeVar
from ..type_repr import RecordType, BottomType
from ..system import TypeSystem
from .base_constraint import BaseConstraint
from .equality_constraint import EqualityConstraint
class MemberConstraint(BaseConstraint):
def __init__(self, v_dst: TypeVar, v_src: TypeVar, record_label: str):
self.dst = v_dst
self.src = v_src
self.k = record_label
def cause_vars(self):
return [self.src]
def effect_vars(self):
return [self.dst]
def fix(self, ts: TypeSystem):
T = self.src.T
if isinstance(T, BottomType):
return
if isinstance(T, RecordType):
if self.k in T.members:
rec = T.members[self.k]
if isinstance(rec, TypeVar):
ts.add_constraint(EqualityConstraint(self.dst, rec))
else:
self.dst.T = rec
return
raise TypeError("Type %s does not have member %s" % (T, self.k))
def is_resolved(self):
return isinstance(self.src.T, RecordType)
| 28.02381 | 74 | 0.607477 | 882 | 0.749363 | 0 | 0 | 0 | 0 | 0 | 0 | 117 | 0.099405 |
7ee7f3867098c176134098cbe45484ecb33e7f96 | 3,592 | py | Python | conflowgen/posthoc_analyses/inbound_and_outbound_vehicle_capacity_analysis_report.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | null | null | null | conflowgen/posthoc_analyses/inbound_and_outbound_vehicle_capacity_analysis_report.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | null | null | null | conflowgen/posthoc_analyses/inbound_and_outbound_vehicle_capacity_analysis_report.py | 1grasse/conflowgen | 142330ab6427254109af3b86102a30a13144ba0c | [
"MIT"
] | null | null | null | from __future__ import annotations
from conflowgen.posthoc_analyses.inbound_and_outbound_vehicle_capacity_analysis import \
InboundAndOutboundVehicleCapacityAnalysis
from conflowgen.reporting import AbstractReportWithMatplotlib
class InboundAndOutboundVehicleCapacityAnalysisReport(AbstractReportWithMatplotlib):
"""
This analysis report takes the data structure as generated by :class:`.InboundAndOutboundVehicleCapacityAnalysis`
and creates a comprehensible representation for the user, either as text or as a graph.
"""
report_description = """
Analyze the vehicle capacity by vehicle type for the inbound and outbound journeys and check for the maximum
capacity of each vehicle type.
If e.g. for the vehicle type 'feeder' the maximum outbound capacity is used up, most likely there are more vehicles
that deliver containers destined for feeder vessels than there are feeder vessels planned during the period of data
generation (between `start_date` and `end_date`).
"""
def __init__(self):
super().__init__()
self.analysis = InboundAndOutboundVehicleCapacityAnalysis(
transportation_buffer=self.transportation_buffer
)
def get_report_as_text(self) -> str:
inbound_capacities, outbound_actual_capacities, outbound_maximum_capacities = self._get_capacities()
# create string representation
report = "\n"
report += "vehicle type "
report += "inbound capacity "
report += "outbound actual capacity "
report += "outbound max capacity"
report += "\n"
for vehicle_type in self.order_of_vehicle_types_in_report:
vehicle_type_as_text = str(vehicle_type).replace("_", " ")
report += f"{vehicle_type_as_text:<15} "
report += f"{inbound_capacities[vehicle_type]:>16.1f} "
report += f"{outbound_actual_capacities[vehicle_type]:>24.1f} "
report += f"{outbound_maximum_capacities[vehicle_type]:>21.1f}"
report += "\n"
report += "(rounding errors might exist)\n"
return report
def get_report_as_graph(self) -> object:
"""
The report as a graph is represented as a bar chart using pandas.
Returns:
The matplotlib axis of the bar chart.
"""
import pandas as pd # pylint: disable=import-outside-toplevel
import seaborn as sns # pylint: disable=import-outside-toplevel
sns.set_palette(sns.color_palette())
inbound_capacities, outbound_actual_capacities, outbound_maximum_capacities = self._get_capacities()
df = pd.DataFrame({
"inbound capacities": inbound_capacities,
"outbound actual capacities": outbound_actual_capacities,
"outbound maximum capacities": outbound_maximum_capacities
})
df.index = [str(i).replace("_", " ") for i in df.index]
ax = df.plot.barh()
ax.set_xlabel("Capacity (in TEU)")
ax.set_title("Inbound and outbound vehicle capacity analysis")
return ax
def _get_capacities(self):
assert self.transportation_buffer is not None
self.analysis.update(
transportation_buffer=self.transportation_buffer
)
# gather data
inbound_capacities = self.analysis.get_inbound_capacity_of_vehicles()
outbound_actual_capacities, outbound_maximum_capacities = self.analysis.get_outbound_capacity_of_vehicles()
return inbound_capacities, outbound_actual_capacities, outbound_maximum_capacities
| 44.345679 | 119 | 0.69794 | 3,356 | 0.934298 | 0 | 0 | 0 | 0 | 0 | 0 | 1,426 | 0.396993 |
7ee8718c462fcdfabadd0e929c133270742ee1d3 | 15,799 | py | Python | src/ehrudite/core/dnn/transformer.py | ClaudioBorges/ehrudite | 8633995d3bf795fffeccabd7d20be522241f3bb5 | [
"Apache-2.0"
] | null | null | null | src/ehrudite/core/dnn/transformer.py | ClaudioBorges/ehrudite | 8633995d3bf795fffeccabd7d20be522241f3bb5 | [
"Apache-2.0"
] | null | null | null | src/ehrudite/core/dnn/transformer.py | ClaudioBorges/ehrudite | 8633995d3bf795fffeccabd7d20be522241f3bb5 | [
"Apache-2.0"
] | 1 | 2022-03-18T09:26:05.000Z | 2022-03-18T09:26:05.000Z | """Transformer from 'Attention is all you need' (Vaswani et al., 2017)"""
# Reference: https://www.tensorflow.org/text/tutorials/transformer
# Reference: https://keras.io/examples/nlp/text_classification_with_transformer/
import numpy as np
import tensorflow as tf
class Transformer(tf.keras.Model):
def __init__(
self,
num_layers,
d_model,
num_heads,
dff,
input_vocab_size,
target_vocab_size,
pe_input,
pe_target,
rate=0.1,
):
super().__init__()
self.encoder = Encoder(
num_layers, d_model, num_heads, dff, input_vocab_size, pe_input, rate
)
self.decoder = Decoder(
num_layers, d_model, num_heads, dff, target_vocab_size, pe_target, rate
)
self.final_layer = tf.keras.layers.Dense(target_vocab_size)
def call(self, inputs, training):
# Keras models prefer if you pass all your inputs in the first argument
inp, tar = inputs
enc_padding_mask, look_ahead_mask, dec_padding_mask = self.create_masks(
inp, tar
)
enc_output = self.encoder(
inp, training, enc_padding_mask
) # (batch_size, inp_seq_len, d_model)
# dec_output.shape == (batch_size, tar_seq_len, d_model)
dec_output, attention_weights = self.decoder(
tar, enc_output, training, look_ahead_mask, dec_padding_mask
)
final_output = self.final_layer(
dec_output
) # (batch_size, tar_seq_len, target_vocab_size)
return final_output, attention_weights
def create_masks(self, inp, tar):
# Encoder padding mask
enc_padding_mask = _create_padding_mask(inp)
# Used in the 2nd attention block in the decoder
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = _create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = _create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = _create_padding_mask(tar)
look_ahead_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return enc_padding_mask, look_ahead_mask, dec_padding_mask
class Encoder(tf.keras.layers.Layer):
"""Transformer encoder from 'Attention is all you need' (Vaswani et al., 2017)
Contains:
1. Input Embedding
2. Positional Encoding
3. N encoder layers
"""
def __init__(
self,
num_layers,
d_model,
num_heads,
dff,
input_vocab_size,
maximum_position_encoding,
rate=0.1,
):
super(Encoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(input_vocab_size, d_model)
self.pos_encoding = _positional_encoding(
maximum_position_encoding, self.d_model
)
self.enc_layers = [
EncoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)
]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
seq_len = tf.shape(x)[1]
# adding embedding and position encoding
x = self.embedding(x) # (batch_size, input_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x = self.enc_layers[i](x, training, mask)
return x # (batch_size, input_seq_len, d_model)
class Decoder(tf.keras.layers.Layer):
def __init__(
self,
num_layers,
d_model,
num_heads,
dff,
target_vocab_size,
maximum_position_encoding,
rate=0.1,
):
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = _positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [
DecoderLayer(d_model, num_heads, dff, rate) for _ in range(num_layers)
]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](
x, enc_output, training, look_ahead_mask, padding_mask
)
attention_weights[f"decoder_layer{i+1}_block1"] = block1
attention_weights[f"decoder_layer{i+1}_block2"] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights
class EncoderLayer(tf.keras.layers.Layer):
"""Transformer encoder layer from 'Attention is all you need' (Vaswani et al., 2017)
One of the main difference between the transformer encoder from decoder is
the self-attention. The reasons for it is detailed in the Section 4 and can
be summarized as a way to reduce the path length between long-range depencies
in the network.
"""
def __init__(self, d_model=512, num_heads=8, dff=2048, rate=0.1):
"""Initializer a Transformer Encoder Layer
Attributes
----------
d_model : int
Model dimension used on all sub-layers and embedding.
num_heads : int
Number of heads. Vaswani et al., 2017 describes as $h$
dff : int
FeedForward dimension.
rate : float
Dropout rate parameter applied after self-attention and
FeedForward.
"""
super(EncoderLayer, self).__init__()
self.mha = MultiHeadAttention(num_heads=num_heads, d_model=d_model)
self.ffn = _point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
def call(self, x, training, mask):
attn_output, _ = self.mha(x, x, x, mask) # (batch_size, input_seq_len, d_model)
attn_output = self.dropout1(attn_output, training=training)
out1 = self.layernorm1(x + attn_output) # (batch_size, input_seq_len, d_model)
ffn_output = self.ffn(out1) # (batch_size, input_seq_len, d_model)
ffn_output = self.dropout2(ffn_output, training=training)
return self.layernorm2(
out1 + ffn_output
) # (batch_size, input_seq_len, d_model)
class DecoderLayer(tf.keras.layers.Layer):
"""Transformer decoder layer from 'Attention is all you need' (Vaswani et al., 2017)
Decoder layer is similar to encoder but have a third sub-layer performing
multi-head attention over the encoder stack. The self-attention sub-layer
is modified preventing positions from attending to subsequent positions.
Embeddings are also offset by one position, forcing predictions of
position i to depend on the known outputs at positions less than i.
"""
def __init__(self, d_model, num_heads, dff, rate=0.1):
super(DecoderLayer, self).__init__()
self.mha1 = MultiHeadAttention(num_heads=num_heads, d_model=d_model)
self.mha2 = MultiHeadAttention(num_heads=num_heads, d_model=d_model)
self.ffn = _point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
# enc_output.shape == (batch_size, input_seq_len, d_model)
attn1, attn_weights_block1 = self.mha1(
x, x, x, look_ahead_mask
) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask
) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(
ffn_output + out2
) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
def _point_wise_feed_forward_network(d_model, dff):
"""Position-wise Feed-Forward Network
It's a fully connnected feed-forward network applied to each position
separately and identically represented by:
```
FFN(x) = max(0, xW_1 + b_1)W_2 + b2$
```
It contains two linear transformation with a ReLU activation in between.
"""
return tf.keras.Sequential(
[
tf.keras.layers.Dense(dff, activation="relu"), # (batch_size, seq_len, dff)
tf.keras.layers.Dense(d_model), # (batch_size, seq_len, d_model)
]
)
def _create_padding_mask(seq):
"""Mask all the pad tokens in the batch of sequence"""
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions to add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def _create_look_ahead_mask(size):
"""Mask the future tokens in a sequence"""
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def _positional_encoding(position, d_model):
"""Position Encoding (PE)
Because the model contains no recurrence and convolution, positional
encoding is inject to add information about absolute position of the
tokens in the sequence. It can be fixed or learned, however, fixed
has proven to be as efficient as learned.
This is the fixed Positional Encoding and are derived from sine and
cosine functions of different frequencies:
$PE(pos, 2i) = sin(pos/10000^{2i/d_model})
$PE(pos, 2i + 1) = cos(pos/10000^{2i/d_model})
where pos is the absolute position of a token in the sequence and $i$
is the dimension.
"""
def get_angles(pos, i, d_model):
angle_rates = 1 / np.power(10000, (2 * (i // 2)) / np.float32(d_model))
return pos * angle_rates
angle_rads = get_angles(
np.arange(position)[:, np.newaxis], np.arange(d_model)[np.newaxis, :], d_model
)
# apply sin to even indices in the array; 2i
angle_rads[:, 0::2] = np.sin(angle_rads[:, 0::2])
# apply cos to odd indices in the array; 2i+1
angle_rads[:, 1::2] = np.cos(angle_rads[:, 1::2])
pos_encoding = angle_rads[np.newaxis, ...]
return tf.cast(pos_encoding, dtype=tf.float32)
def scaled_dot_product_attention(q, k, v, mask):
"""Calculate the attention weights.
q, k, v must have matching leading dimensions.
k, v must have matching penultimate dimension, i.e.: seq_len_k = seq_len_v.
The mask has different shapes depending on its type(padding or look ahead)
but it must be broadcastable for addition.
Args:
q: query shape == (..., seq_len_q, depth)
k: key shape == (..., seq_len_k, depth)
v: value shape == (..., seq_len_v, depth_v)
mask: Float tensor with shape broadcastable
to (..., seq_len_q, seq_len_k). Defaults to None.
Returns:
output, attention_weights
"""
# (..., seq_len_q, seq_len_k)
matmul_qk = tf.matmul(q, k, transpose_b=True)
# scale matmul_qk
dk = tf.cast(tf.shape(k)[-1], tf.float32)
scaled_attention_logits = matmul_qk / tf.math.sqrt(dk)
# add the mask to the scaled tensor.
if mask is not None:
scaled_attention_logits += mask * -1e9
# softmax is normalized on the last axis (seq_len_k) so that the scores
# add up to 1.
attention_weights = tf.nn.softmax(
scaled_attention_logits, axis=-1
) # (..., seq_len_q, seq_len_k)
output = tf.matmul(attention_weights, v) # (..., seq_len_q, depth_v)
return output, attention_weights
class MultiHeadAttention(tf.keras.layers.Layer):
def __init__(self, d_model, num_heads):
super(MultiHeadAttention, self).__init__()
self.num_heads = num_heads
self.d_model = d_model
assert d_model % self.num_heads == 0
self.depth = d_model // self.num_heads
self.wq = tf.keras.layers.Dense(d_model)
self.wk = tf.keras.layers.Dense(d_model)
self.wv = tf.keras.layers.Dense(d_model)
self.dense = tf.keras.layers.Dense(d_model)
def split_heads(self, x, batch_size):
"""Split the last dimension into (num_heads, depth).
Transpose the result such that the shape is (batch_size, num_heads, seq_len, depth)
"""
x = tf.reshape(x, (batch_size, -1, self.num_heads, self.depth))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, v, k, q, mask):
batch_size = tf.shape(q)[0]
q = self.wq(q) # (batch_size, seq_len, d_model)
k = self.wk(k) # (batch_size, seq_len, d_model)
v = self.wv(v) # (batch_size, seq_len, d_model)
q = self.split_heads(q, batch_size) # (batch_size, num_heads, seq_len_q, depth)
k = self.split_heads(k, batch_size) # (batch_size, num_heads, seq_len_k, depth)
v = self.split_heads(v, batch_size) # (batch_size, num_heads, seq_len_v, depth)
# scaled_attention.shape == (batch_size, num_heads, seq_len_q, depth)
# attention_weights.shape == (batch_size, num_heads, seq_len_q, seq_len_k)
scaled_attention, attention_weights = scaled_dot_product_attention(
q, k, v, mask
)
scaled_attention = tf.transpose(
scaled_attention, perm=[0, 2, 1, 3]
) # (batch_size, seq_len_q, num_heads, depth)
concat_attention = tf.reshape(
scaled_attention, (batch_size, -1, self.d_model)
) # (batch_size, seq_len_q, d_model)
output = self.dense(concat_attention) # (batch_size, seq_len_q, d_model)
return output, attention_weights
def optimizer(d_model):
"""Adam optimizer as of Section 5.3"""
class CustomSchedule(tf.keras.optimizers.schedules.LearningRateSchedule):
def __init__(self, d_model, warmup_steps=4000):
super(CustomSchedule, self).__init__()
self.d_model = d_model
self.d_model = tf.cast(self.d_model, tf.float32)
self.warmup_steps = warmup_steps
def __call__(self, step):
arg1 = tf.math.rsqrt(step)
arg2 = step * (self.warmup_steps ** -1.5)
return tf.math.rsqrt(self.d_model) * tf.math.minimum(arg1, arg2)
learning_rate = CustomSchedule(d_model)
return tf.keras.optimizers.Adam(
learning_rate, beta_1=0.9, beta_2=0.98, epsilon=1e-9
)
| 35.344519 | 91 | 0.648269 | 11,635 | 0.736439 | 0 | 0 | 0 | 0 | 0 | 0 | 5,606 | 0.354833 |
7eea074d109ec1681ca547e782a6c5293f0db45e | 43,464 | py | Python | backend/tests/baserow/contrib/database/field/test_formula_field_type.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | backend/tests/baserow/contrib/database/field/test_formula_field_type.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | backend/tests/baserow/contrib/database/field/test_formula_field_type.py | ashishdhngr/baserow | b098678d2165eb7c42930ee24dc6753a3cb520c3 | [
"MIT"
] | null | null | null | import inspect
import pytest
from django.db.models import TextField
from django.urls import reverse
from rest_framework.status import HTTP_200_OK, HTTP_204_NO_CONTENT
from baserow.contrib.database.table.cache import (
generated_models_cache,
)
from baserow.contrib.database.fields.dependencies.handler import FieldDependencyHandler
from baserow.contrib.database.fields.dependencies.update_collector import (
CachingFieldUpdateCollector,
)
from baserow.contrib.database.fields.field_cache import FieldCache
from baserow.contrib.database.fields.field_types import FormulaFieldType
from baserow.contrib.database.fields.fields import BaserowExpressionField
from baserow.contrib.database.fields.handler import FieldHandler
from baserow.contrib.database.fields.models import FormulaField, LookupField
from baserow.contrib.database.fields.registries import field_type_registry
from baserow.contrib.database.formula import (
BaserowFormulaInvalidType,
FormulaHandler,
BaserowFormulaTextType,
BaserowFormulaNumberType,
)
from baserow.contrib.database.formula.ast.tree import BaserowFunctionDefinition
from baserow.contrib.database.formula.registries import formula_function_registry
from baserow.contrib.database.rows.handler import RowHandler
from baserow.contrib.database.views.exceptions import (
ViewFilterTypeNotAllowedForField,
ViewSortFieldNotSupported,
)
from baserow.contrib.database.views.handler import ViewHandler
from baserow.contrib.database.views.models import SORT_ORDER_ASC, SORT_ORDER_DESC
from baserow.contrib.database.views.registries import view_filter_type_registry
@pytest.mark.django_db
def test_creating_a_model_with_formula_field_immediately_populates_it(data_fixture):
table = data_fixture.create_database_table()
formula_field = data_fixture.create_formula_field(
table=table, formula="'test'", formula_type="text"
)
formula_field_name = f"field_{formula_field.id}"
model = table.get_model()
row = model.objects.create()
assert getattr(row, formula_field_name) == "test"
@pytest.mark.django_db
def test_adding_a_formula_field_to_an_existing_table_populates_it_for_all_rows(
data_fixture,
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
before_model = table.get_model()
existing_row = before_model.objects.create()
formula_field = FieldHandler().create_field(
user, table, "formula", name="formula", formula="'test'"
)
formula_field_name = f"field_{formula_field.id}"
model = table.get_model()
row = model.objects.create()
assert getattr(row, formula_field_name) == "test"
assert getattr(model.objects.get(id=existing_row.id), formula_field_name) == "test"
@pytest.mark.django_db
def test_cant_change_the_value_of_a_formula_field_directly(data_fixture):
table = data_fixture.create_database_table()
data_fixture.create_formula_field(
name="formula", table=table, formula="'test'", formula_type="text"
)
data_fixture.create_text_field(name="text", table=table)
model = table.get_model(attribute_names=True)
row = model.objects.create(formula="not test")
assert row.formula == "test"
row.text = "update other field"
row.save()
row.formula = "not test"
row.save()
row.refresh_from_db()
assert row.formula == "test"
@pytest.mark.django_db
def test_get_set_export_serialized_value_formula_field(data_fixture):
table = data_fixture.create_database_table()
formula_field = data_fixture.create_formula_field(
table=table, formula="'test'", formula_type="text"
)
formula_field_name = f"field_{formula_field.id}"
formula_field_type = field_type_registry.get_by_model(formula_field)
model = table.get_model()
row_1 = model.objects.create()
row_2 = model.objects.create()
old_row_1_value = getattr(row_1, formula_field_name)
old_row_2_value = getattr(row_2, formula_field_name)
assert old_row_1_value == "test"
assert old_row_2_value == "test"
formula_field_type.set_import_serialized_value(
row_1,
formula_field_name,
formula_field_type.get_export_serialized_value(
row_1, formula_field_name, {}, None, None
),
{},
None,
None,
)
formula_field_type.set_import_serialized_value(
row_2,
formula_field_name,
formula_field_type.get_export_serialized_value(
row_2, formula_field_name, {}, None, None
),
{},
None,
None,
)
row_1.save()
row_2.save()
row_1.refresh_from_db()
row_2.refresh_from_db()
assert old_row_1_value == getattr(row_1, formula_field_name)
assert old_row_2_value == getattr(row_2, formula_field_name)
@pytest.mark.django_db
def test_changing_type_of_other_field_still_results_in_working_filter(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
grid_view = data_fixture.create_grid_view(user, table=table)
first_formula_field = data_fixture.create_formula_field(
table=table, formula="'test'", formula_type="text", name="source"
)
formula_field_referencing_first_field = data_fixture.create_formula_field(
table=table, formula="field('source')", formula_type="text"
)
data_fixture.create_view_filter(
user=user,
view=grid_view,
field=formula_field_referencing_first_field,
type="equal",
value="t",
)
# Change the first formula field to be a boolean field, meaning that the view
# filter on the referencing formula field is now and invalid and should be deleted
FieldHandler().update_field(user, first_formula_field, formula="1")
queryset = ViewHandler().get_queryset(grid_view)
assert not queryset.exists()
assert queryset.count() == 0
@pytest.mark.django_db
def test_can_use_complex_date_filters_on_formula_field(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
grid_view = data_fixture.create_grid_view(user, table=table)
data_fixture.create_date_field(user=user, table=table, name="date_field")
formula_field = data_fixture.create_formula_field(
table=table, formula="field('date_field')", formula_type="date", name="formula"
)
data_fixture.create_view_filter(
user=user,
view=grid_view,
field=formula_field,
type="date_equals_today",
value="Europe/London",
)
queryset = ViewHandler().get_queryset(grid_view)
assert not queryset.exists()
assert queryset.count() == 0
@pytest.mark.django_db
def test_can_use_complex_contains_filters_on_formula_field(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
grid_view = data_fixture.create_grid_view(user, table=table)
data_fixture.create_date_field(
user=user, table=table, name="date_field", date_format="US"
)
formula_field = data_fixture.create_formula_field(
table=table,
formula="field('date_field')",
formula_type="date",
name="formula",
date_format="US",
date_time_format="24",
)
data_fixture.create_view_filter(
user=user,
view=grid_view,
field=formula_field,
type="contains",
value="23",
)
queryset = ViewHandler().get_queryset(grid_view)
assert not queryset.exists()
assert queryset.count() == 0
@pytest.mark.django_db
def test_can_change_formula_type_breaking_other_fields(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
first_formula_field = handler.create_field(
user=user, table=table, name="1", type_name="formula", formula="1+1"
)
second_formula_field = handler.create_field(
user=user, table=table, type_name="formula", name="2", formula="field('1')+1"
)
assert list(
second_formula_field.field_dependencies.values_list("id", flat=True)
) == [first_formula_field.id]
assert list(first_formula_field.dependant_fields.values_list("id", flat=True)) == [
second_formula_field.id
]
assert (
second_formula_field.dependencies.first().dependency.specific
== first_formula_field
)
handler.update_field(
user=user, field=first_formula_field, new_type_name="formula", formula="'a'"
)
second_formula_field.refresh_from_db()
assert second_formula_field.formula_type == BaserowFormulaInvalidType.type
assert "argument number 2" in second_formula_field.error
@pytest.mark.django_db
def test_can_still_insert_rows_with_an_invalid_but_previously_date_formula_field(
data_fixture,
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
date_field = handler.create_field(
user=user, table=table, name="1", type_name="date"
)
formula_field = handler.create_field(
user=user, table=table, type_name="formula", name="2", formula="field('1')"
)
handler.update_field(user=user, field=date_field, new_type_name="single_select")
row = RowHandler().create_row(user=user, table=table)
assert getattr(row, f"field_{formula_field.id}") is None
@pytest.mark.django_db
def test_formula_with_row_id_is_populated_after_creating_row(
data_fixture,
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
formula_field = handler.create_field(
user=user, table=table, type_name="formula", name="2", formula="row_id()"
)
row = RowHandler().create_row(user=user, table=table)
assert getattr(row, f"field_{formula_field.id}") == row.id
@pytest.mark.django_db
def test_can_rename_field_preserving_whitespace(
data_fixture,
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
test_field = handler.create_field(
user=user, table=table, type_name="text", name="a"
)
formula_field = handler.create_field(
user=user, table=table, type_name="formula", name="2", formula=" field('a') \n"
)
assert formula_field.formula == f" field('a') \n"
handler.update_field(user=user, field=test_field, name="b")
formula_field.refresh_from_db()
assert formula_field.formula == f" field('b') \n"
@pytest.mark.django_db
def test_recalculate_formulas_according_to_version(
data_fixture,
):
formula_with_default_internal_field = data_fixture.create_formula_field(
formula="1",
internal_formula="",
requires_refresh_after_insert=False,
name="a",
version=1,
recalculate=False,
create_field=False,
)
formula_that_needs_refresh = data_fixture.create_formula_field(
formula="row_id()",
internal_formula="",
formula_type="number",
requires_refresh_after_insert=False,
name="b",
version=1,
recalculate=False,
create_field=False,
)
broken_reference_formula = data_fixture.create_formula_field(
formula="field('unknown')",
internal_formula="",
requires_refresh_after_insert=False,
name="c",
version=1,
recalculate=False,
create_field=False,
)
dependant_formula = data_fixture.create_formula_field(
table=formula_that_needs_refresh.table,
formula="field('b')",
internal_formula="",
requires_refresh_after_insert=False,
name="d",
version=1,
recalculate=False,
create_field=False,
)
formula_already_at_correct_version = data_fixture.create_formula_field(
formula="'a'",
internal_formula="",
requires_refresh_after_insert=False,
name="e",
version=FormulaHandler.BASEROW_FORMULA_VERSION,
recalculate=False,
create_field=False,
)
upto_date_formula_depending_on_old_version = data_fixture.create_formula_field(
table=dependant_formula.table,
formula=f"field('{dependant_formula.name}')",
internal_formula="",
requires_refresh_after_insert=False,
name="f",
version=FormulaHandler.BASEROW_FORMULA_VERSION,
recalculate=False,
create_field=False,
)
assert (
formula_already_at_correct_version.version
== FormulaHandler.BASEROW_FORMULA_VERSION
)
assert dependant_formula.version == 1
field_cache = FieldCache()
for formula_field in FormulaField.objects.all():
FieldDependencyHandler().rebuild_dependencies(formula_field, field_cache)
FormulaHandler().recalculate_formulas_according_to_version()
formula_with_default_internal_field.refresh_from_db()
assert formula_with_default_internal_field.internal_formula == "error_to_nan(1)"
assert not formula_with_default_internal_field.requires_refresh_after_insert
formula_that_needs_refresh.refresh_from_db()
assert formula_that_needs_refresh.internal_formula == "error_to_nan(row_id())"
assert formula_that_needs_refresh.requires_refresh_after_insert
broken_reference_formula.refresh_from_db()
assert broken_reference_formula.internal_formula == "field('unknown')"
assert broken_reference_formula.formula_type == "invalid"
assert not broken_reference_formula.requires_refresh_after_insert
dependant_formula.refresh_from_db()
assert dependant_formula.internal_formula == "error_to_nan(row_id())"
assert dependant_formula.requires_refresh_after_insert
# The update is not done for this formula and hence the values are left alone
formula_already_at_correct_version.refresh_from_db()
assert formula_already_at_correct_version.internal_formula == ""
assert not formula_already_at_correct_version.requires_refresh_after_insert
upto_date_formula_depending_on_old_version.refresh_from_db()
assert (
upto_date_formula_depending_on_old_version.field_dependencies.get().specific
== dependant_formula
)
assert (
upto_date_formula_depending_on_old_version.internal_formula
== "error_to_nan(row_id())"
)
assert upto_date_formula_depending_on_old_version.requires_refresh_after_insert
@pytest.mark.django_db
def test_can_update_lookup_field_value(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
table2 = data_fixture.create_database_table(user=user, database=table.database)
table_primary_field = data_fixture.create_text_field(
name="p", table=table, primary=True
)
data_fixture.create_text_field(name="primaryfield", table=table2, primary=True)
looked_up_field = data_fixture.create_date_field(
name="lookupfield",
table=table2,
date_include_time=False,
date_format="US",
)
linkrowfield = FieldHandler().create_field(
user,
table,
"link_row",
name="linkrowfield",
link_row_table=table2,
)
table2_model = table2.get_model(attribute_names=True)
a = table2_model.objects.create(lookupfield=f"2021-02-01", primaryfield="primary a")
b = table2_model.objects.create(lookupfield=f"2022-02-03", primaryfield="primary b")
table_model = table.get_model(attribute_names=True)
table_row = table_model.objects.create()
table_row.linkrowfield.add(a.id)
table_row.linkrowfield.add(b.id)
table_row.save()
formulafield = FieldHandler().create_field(
user,
table,
"formula",
name="formulafield",
formula=f"IF(datetime_format(lookup('{linkrowfield.name}',"
f"'{looked_up_field.name}'), "
f"'YYYY')='2021', 'yes', 'no')",
)
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.json() == {
"count": 1,
"next": None,
"previous": None,
"results": [
{
f"field_{table_primary_field.id}": None,
f"field_{linkrowfield.id}": [
{"id": a.id, "value": "primary a"},
{"id": b.id, "value": "primary b"},
],
f"field_{formulafield.id}": [
{"value": "yes", "id": a.id},
{"value": "no", "id": b.id},
],
"id": table_row.id,
"order": "1.00000000000000000000",
}
],
}
response = api_client.patch(
reverse(
"api:database:rows:item",
kwargs={"table_id": table2.id, "row_id": a.id},
),
{f"field_{looked_up_field.id}": "2000-02-01"},
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_200_OK
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.json() == {
"count": 1,
"next": None,
"previous": None,
"results": [
{
f"field_{table_primary_field.id}": None,
f"field_{linkrowfield.id}": [
{"id": a.id, "value": "primary a"},
{"id": b.id, "value": "primary b"},
],
f"field_{formulafield.id}": [
{"value": "no", "id": a.id},
{"value": "no", "id": b.id},
],
"id": table_row.id,
"order": "1.00000000000000000000",
}
],
}
@pytest.mark.django_db
def test_nested_lookup_with_formula(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
table2 = data_fixture.create_database_table(user=user, database=table.database)
table3 = data_fixture.create_database_table(user=user, database=table.database)
table_primary_field = data_fixture.create_text_field(
name="p", table=table, primary=True
)
data_fixture.create_text_field(name="p", table=table3, primary=True)
data_fixture.create_text_field(name="p", table=table2, primary=True)
data_fixture.create_text_field(name="lookupfield", table=table2)
linkrowfield = FieldHandler().create_field(
user,
table,
"link_row",
name="table_linkrowfield",
link_row_table=table2,
)
linkrowfield2 = FieldHandler().create_field(
user,
table2,
"link_row",
name="table2_linkrowfield",
link_row_table=table3,
)
table3_model = table3.get_model(attribute_names=True)
table3_a = table3_model.objects.create(p="table3 a")
table3_model.objects.create(p="table3 b")
table3_c = table3_model.objects.create(p="table3 c")
table3_d = table3_model.objects.create(p="table3 d")
table2_model = table2.get_model(attribute_names=True)
table2_1 = table2_model.objects.create(lookupfield=f"lookup 1", p=f"primary 1")
table2_1.table2linkrowfield.add(table3_a.id)
table2_1.save()
table2_2 = table2_model.objects.create(lookupfield=f"lookup 2", p=f"primary 2")
table2_3 = table2_model.objects.create(lookupfield=f"lookup 3", p=f"primary 3")
table2_3.table2linkrowfield.add(table3_c.id)
table2_3.table2linkrowfield.add(table3_d.id)
table2_3.save()
table_model = table.get_model(attribute_names=True)
table1_x = table_model.objects.create(p="table1 x")
table1_x.tablelinkrowfield.add(table2_1.id)
table1_x.tablelinkrowfield.add(table2_2.id)
table1_x.save()
table1_y = table_model.objects.create(p="table1 y")
table1_y.tablelinkrowfield.add(table2_3.id)
table1_y.save()
# with django_assert_num_queries(1):
lookup_field = FieldHandler().create_field(
user,
table,
type_name="formula",
name="formula",
formula=f"lookup('{linkrowfield.name}','{linkrowfield2.name}')",
)
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.json() == {
"count": 2,
"next": None,
"previous": None,
"results": [
{
f"field_{table_primary_field.id}": table1_x.p,
f"field_{linkrowfield.id}": [
{"id": table2_1.id, "value": table2_1.p},
{"id": table2_2.id, "value": table2_2.p},
],
f"field_{lookup_field.id}": [
{
"value": table3_a.p,
"ids": {
f"database_table_{table2.id}": table2_1.id,
f"database_table_{table3.id}": table3_a.id,
},
},
],
"id": table1_x.id,
"order": "1.00000000000000000000",
},
{
f"field_{table_primary_field.id}": table1_y.p,
f"field_{linkrowfield.id}": [{"id": table2_3.id, "value": table2_3.p}],
f"field_{lookup_field.id}": [
{
"value": table3_c.p,
"ids": {
f"database_table_{table2.id}": table2_3.id,
f"database_table_{table3.id}": table3_c.id,
},
},
{
"value": table3_d.p,
"ids": {
f"database_table_{table2.id}": table2_3.id,
f"database_table_{table3.id}": table3_d.id,
},
},
],
"id": table1_y.id,
"order": "1.00000000000000000000",
},
],
}
@pytest.mark.django_db
def test_can_delete_lookup_field_value(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
table2 = data_fixture.create_database_table(user=user, database=table.database)
table_primary_field = data_fixture.create_text_field(
name="p", table=table, primary=True
)
data_fixture.create_text_field(name="primaryfield", table=table2, primary=True)
looked_up_field = data_fixture.create_date_field(
name="lookupfield",
table=table2,
date_include_time=False,
date_format="US",
)
linkrowfield = FieldHandler().create_field(
user,
table,
"link_row",
name="linkrowfield",
link_row_table=table2,
)
table2_model = table2.get_model(attribute_names=True)
a = table2_model.objects.create(lookupfield=f"2021-02-01", primaryfield="primary a")
b = table2_model.objects.create(lookupfield=f"2022-02-03", primaryfield="primary b")
table_model = table.get_model(attribute_names=True)
table_row = table_model.objects.create(p="table row 1")
table_row.linkrowfield.add(a.id)
table_row.linkrowfield.add(b.id)
table_row.save()
formulafield = FieldHandler().create_field(
user,
table,
"formula",
name="formulafield",
formula=f"IF(datetime_format(lookup('{linkrowfield.name}',"
f"'{looked_up_field.name}'), "
f"'YYYY')='2021', 'yes', 'no')",
)
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.json() == {
"count": 1,
"next": None,
"previous": None,
"results": [
{
f"field_{table_primary_field.id}": "table row 1",
f"field_{linkrowfield.id}": [
{"id": a.id, "value": "primary a"},
{"id": b.id, "value": "primary b"},
],
f"field_{formulafield.id}": [
{"value": "yes", "id": a.id},
{"value": "no", "id": b.id},
],
"id": table_row.id,
"order": "1.00000000000000000000",
}
],
}
response = api_client.delete(
reverse(
"api:database:rows:item",
kwargs={"table_id": table2.id, "row_id": a.id},
),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_204_NO_CONTENT
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.json() == {
"count": 1,
"next": None,
"previous": None,
"results": [
{
f"field_{table_primary_field.id}": "table row 1",
f"field_{linkrowfield.id}": [
{"id": b.id, "value": "primary b"},
],
f"field_{formulafield.id}": [
{"value": "no", "id": b.id},
],
"id": table_row.id,
"order": "1.00000000000000000000",
}
],
}
@pytest.mark.django_db
def test_can_delete_double_link_lookup_field_value(
data_fixture, api_client, django_assert_num_queries
):
user, token = data_fixture.create_user_and_token()
table = data_fixture.create_database_table(user=user)
table2 = data_fixture.create_database_table(user=user, database=table.database)
table3 = data_fixture.create_database_table(user=user, database=table.database)
table_primary_field = data_fixture.create_text_field(
name="p", table=table, primary=True
)
data_fixture.create_text_field(name="primaryfield", table=table2, primary=True)
data_fixture.create_text_field(name="primaryfield", table=table3, primary=True)
table2_linkrowfield = FieldHandler().create_field(
user,
table2,
"link_row",
name="linkrowfield",
link_row_table=table3,
)
table3_model = table3.get_model(attribute_names=True)
table3_1 = table3_model.objects.create(primaryfield="table 3 row 1")
table3_2 = table3_model.objects.create(primaryfield="table 3 row 2")
linkrowfield = FieldHandler().create_field(
user,
table,
"link_row",
name="linkrowfield",
link_row_table=table2,
)
table2_model = table2.get_model(attribute_names=True)
table2_a = table2_model.objects.create(primaryfield="primary a")
table2_a.linkrowfield.add(table3_1.id)
table2_a.save()
table2_b = table2_model.objects.create(primaryfield="primary b")
table2_b.linkrowfield.add(table3_2.id)
table2_b.save()
table_model = table.get_model(attribute_names=True)
table_row = table_model.objects.create(p="table row 1")
table_row.linkrowfield.add(table2_a.id)
table_row.linkrowfield.add(table2_b.id)
table_row.save()
formulafield = FieldHandler().create_field(
user,
table,
"formula",
name="formulafield",
formula=f"lookup('{linkrowfield.name}','{table2_linkrowfield.name}')",
)
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.json() == {
"count": 1,
"next": None,
"previous": None,
"results": [
{
f"field_{table_primary_field.id}": "table row 1",
f"field_{linkrowfield.id}": [
{"id": table2_a.id, "value": "primary a"},
{"id": table2_b.id, "value": "primary b"},
],
f"field_{formulafield.id}": [
{
"value": table3_1.primaryfield,
"ids": {
f"database_table_{table2.id}": table2_a.id,
f"database_table_{table3.id}": table3_1.id,
},
},
{
"value": table3_2.primaryfield,
"ids": {
f"database_table_{table2.id}": table2_b.id,
f"database_table_{table3.id}": table3_2.id,
},
},
],
"id": table_row.id,
"order": "1.00000000000000000000",
}
],
}
response = api_client.delete(
reverse(
"api:database:rows:item",
kwargs={"table_id": table2.id, "row_id": table2_a.id},
),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_204_NO_CONTENT
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.json() == {
"count": 1,
"next": None,
"previous": None,
"results": [
{
f"field_{table_primary_field.id}": "table row 1",
f"field_{linkrowfield.id}": [
{"id": table2_b.id, "value": "primary b"},
],
f"field_{formulafield.id}": [
{
"value": table3_2.primaryfield,
"ids": {
f"database_table_{table2.id}": table2_b.id,
f"database_table_{table3.id}": table3_2.id,
},
},
],
"id": table_row.id,
"order": "1.00000000000000000000",
}
],
}
response = api_client.delete(
reverse(
"api:database:rows:item",
kwargs={"table_id": table3.id, "row_id": table3_2.id},
),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.status_code == HTTP_204_NO_CONTENT
response = api_client.get(
reverse("api:database:rows:list", kwargs={"table_id": table.id}),
format="json",
HTTP_AUTHORIZATION=f"JWT {token}",
)
assert response.json() == {
"count": 1,
"next": None,
"previous": None,
"results": [
{
f"field_{table_primary_field.id}": "table row 1",
f"field_{linkrowfield.id}": [
{"id": table2_b.id, "value": "primary b"},
],
f"field_{formulafield.id}": [],
"id": table_row.id,
"order": "1.00000000000000000000",
}
],
}
@pytest.mark.django_db
def test_all_functions_are_registered():
def get_all_subclasses(cls):
all_subclasses = []
for subclass in cls.__subclasses__():
if not inspect.isabstract(subclass):
all_subclasses.append(subclass)
all_subclasses.extend(get_all_subclasses(subclass))
return all_subclasses
funcs = formula_function_registry.get_all()
names = [f.type for f in funcs]
assert len(names) == len(get_all_subclasses(BaserowFunctionDefinition))
# print(json.dumps(names, indent=4))
@pytest.mark.django_db
def test_row_dependency_update_functions_do_no_row_updates_for_same_table(
data_fixture, django_assert_num_queries
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
handler.create_field(user=user, table=table, type_name="text", name="a")
formula_field = handler.create_field(
user=user,
table=table,
type_name="formula",
name="formula",
formula="field('a')",
)
table_model = table.get_model()
row = table_model.objects.create()
formula_field_type = FormulaFieldType()
update_collector = CachingFieldUpdateCollector(table, existing_model=table_model)
formula_field_type.row_of_dependency_updated(
formula_field, row, update_collector, None
)
formula_field_type.row_of_dependency_updated(
formula_field, row, update_collector, []
)
formula_field_type.row_of_dependency_created(
formula_field, row, update_collector, None
)
formula_field_type.row_of_dependency_created(
formula_field, row, update_collector, []
)
formula_field_type.row_of_dependency_deleted(
formula_field, row, update_collector, None
)
formula_field_type.row_of_dependency_deleted(
formula_field, row, update_collector, []
)
with django_assert_num_queries(0):
update_collector.apply_updates_and_get_updated_fields()
@pytest.mark.django_db
def test_recalculated_internal_type_with_incorrect_syntax_formula_sets_to_invalid(
data_fixture,
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
handler.create_field(user=user, table=table, type_name="text", name="a")
formula_field = handler.create_field(
user=user,
table=table,
type_name="formula",
name="formula",
formula="field('a')",
)
formula_field.formula = "invalid"
formula_field.save()
assert formula_field.formula_type == BaserowFormulaInvalidType.type
assert "Invalid syntax" in formula_field.error
@pytest.mark.django_db
def test_accessing_cached_internal_formula_second_time_does_no_queries(
data_fixture, django_assert_num_queries
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
a_field = handler.create_field(user=user, table=table, type_name="text", name="a")
formula_field = handler.create_field(
user=user,
table=table,
type_name="formula",
name="formula",
formula="field('a')",
)
with django_assert_num_queries(0):
assert str(formula_field.cached_untyped_expression) == formula_field.formula
assert (
str(formula_field.cached_typed_internal_expression)
== f"error_to_null(field('{a_field.db_column}'))"
)
assert formula_field.cached_formula_type.type == BaserowFormulaTextType.type
@pytest.mark.django_db
def test_saving_after_properties_have_been_cached_does_recaclulation(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
a_field = handler.create_field(user=user, table=table, type_name="text", name="a")
formula_field = handler.create_field(
user=user,
table=table,
type_name="formula",
name="formula",
formula="field('a')",
)
assert str(formula_field.cached_untyped_expression) == formula_field.formula
assert (
str(formula_field.cached_typed_internal_expression)
== f"error_to_null(field('{a_field.db_column}'))"
)
assert formula_field.cached_formula_type.type == BaserowFormulaTextType.type
formula_field.formula = "1"
formula_field.save()
assert str(formula_field.cached_untyped_expression) == "1"
assert str(formula_field.cached_typed_internal_expression) == f"error_to_nan(1)"
assert formula_field.cached_formula_type.type == BaserowFormulaNumberType.type
@pytest.mark.django_db
def test_renaming_dependency_maintains_dependency_link(data_fixture):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
a_field = handler.create_field(user=user, table=table, type_name="text", name="a")
formula_field = handler.create_field(
user=user,
table=table,
type_name="formula",
name="formula",
formula="field('a')",
)
starting_dep = formula_field.dependencies.get()
assert formula_field.field_dependencies.get().id == a_field.id
assert starting_dep.broken_reference_field_name is None
assert starting_dep.dependency_id == a_field.id
handler.update_field(user, a_field, name="other")
formula_field.refresh_from_db()
assert formula_field.dependencies.get().id == starting_dep.id
assert formula_field.field_dependencies.get().id == a_field.id
assert formula_field.formula == "field('other')"
@pytest.mark.django_db
def test_can_insert_and_update_rows_with_formula_referencing_single_select(
data_fixture,
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
option_field = data_fixture.create_single_select_field(
table=table, name="option_field", order=1
)
option_a = data_fixture.create_select_option(
field=option_field, value="A", color="blue"
)
option_b = data_fixture.create_select_option(
field=option_field, value="B", color="red"
)
formula_field = handler.create_field(
user=user,
table=table,
type_name="formula",
name="2",
formula="field('option_field')",
)
row = RowHandler().create_row(
user=user, table=table, values={f"field_{option_field.id}": option_a.id}
)
row.refresh_from_db()
result = getattr(row, f"field_{formula_field.id}")
assert result == {
"id": option_a.id,
"color": option_a.color,
"value": option_a.value,
}
row = RowHandler().update_row(
user=user,
table=table,
row_id=row.id,
values={f"field_{option_field.id}": option_b.id},
)
row.refresh_from_db()
result = getattr(row, f"field_{formula_field.id}")
assert result == {
"id": option_b.id,
"color": option_b.color,
"value": option_b.value,
}
row = RowHandler().create_row(user=user, table=table, values={})
row.refresh_from_db()
result = getattr(row, f"field_{formula_field.id}")
assert result is None
@pytest.mark.django_db
def test_cannot_create_view_filter_or_sort_on_invalid_field(data_fixture):
user = data_fixture.create_user()
table, other_table, link = data_fixture.create_two_linked_tables(user=user)
grid_view = data_fixture.create_grid_view(user, table=table)
first_formula_field = FieldHandler().create_field(
user, table, "formula", formula="1", name="source"
)
broken_formula_field = FieldHandler().create_field(
user, table, "formula", formula="field('source')", name="a"
)
FieldHandler().delete_field(user, first_formula_field)
option_field = data_fixture.create_single_select_field(
table=table, name="option_field", order=1
)
data_fixture.create_select_option(field=option_field, value="A", color="blue")
data_fixture.create_select_option(field=option_field, value="B", color="red")
single_select_formula_field = FieldHandler().create_field(
user=user,
table=table,
type_name="formula",
name="2",
formula="field('option_field')",
)
lookup_field = FieldHandler().create_field(
user=user,
table=table,
type_name="lookup",
name="lookup",
through_field_name=link.name,
target_field_name="primary",
)
broken_formula_field = FormulaField.objects.get(id=broken_formula_field.id)
single_select_formula_field = FormulaField.objects.get(
id=single_select_formula_field.id
)
lookup_field = LookupField.objects.get(id=lookup_field.id)
assert broken_formula_field.formula_type == "invalid"
assert single_select_formula_field.formula_type == "single_select"
assert lookup_field.formula_type == "array"
fields_which_cant_yet_be_sorted_or_filtered = [
broken_formula_field,
single_select_formula_field,
lookup_field,
]
for field in fields_which_cant_yet_be_sorted_or_filtered:
for view_filter_type in view_filter_type_registry.get_all():
with pytest.raises(ViewFilterTypeNotAllowedForField):
ViewHandler().create_filter(
user,
grid_view,
field,
view_filter_type.type,
"",
)
for field in fields_which_cant_yet_be_sorted_or_filtered:
with pytest.raises(ViewSortFieldNotSupported):
ViewHandler().create_sort(user, grid_view, field, SORT_ORDER_ASC)
with pytest.raises(ViewSortFieldNotSupported):
ViewHandler().create_sort(user, grid_view, field, SORT_ORDER_DESC)
@pytest.mark.django_db
def test_can_cache_and_uncache_formula_model_field(
data_fixture,
):
user = data_fixture.create_user()
table = data_fixture.create_database_table(user=user)
handler = FieldHandler()
formula_field = handler.create_field(
user=user,
table=table,
type_name="formula",
name="2",
formula="'a'",
)
formula_field_type = field_type_registry.get_by_model(formula_field)
formula_model_field = formula_field_type.get_model_field(formula_field)
generated_models_cache.set("test_formula_key", formula_model_field)
uncached = generated_models_cache.get("test_formula_key")
assert uncached == formula_model_field
assert isinstance(uncached, BaserowExpressionField)
assert uncached.__class__ == TextField
assert str(uncached.expression) == str(formula_model_field.expression)
@pytest.mark.django_db
def test_inserting_a_row_with_lookup_field_immediately_populates_it_with_empty_list(
data_fixture,
):
user = data_fixture.create_user()
table_a, table_b, link_field = data_fixture.create_two_linked_tables(user=user)
target_field = data_fixture.create_text_field(name="target", table=table_b)
table_a_model = table_a.get_model(attribute_names=True)
table_b_model = table_b.get_model(attribute_names=True)
row_1 = table_b_model.objects.create(primary="1", target="target 1")
row_2 = table_b_model.objects.create(primary="2", target="target 2")
row_a = table_a_model.objects.create(primary="a")
row_a.link.add(row_1.id)
row_a.link.add(row_2.id)
row_a.save()
lookup = FieldHandler().create_field(
user,
table_a,
"lookup",
name="lookup",
through_field_name="link",
target_field_name="target",
)
model_with_lookup = table_a.get_model()
inserted_row = model_with_lookup.objects.create()
default_empty_value_for_lookup = getattr(inserted_row, f"field_{lookup.id}")
assert default_empty_value_for_lookup is not None
assert default_empty_value_for_lookup == "[]"
| 35.108239 | 88 | 0.649802 | 0 | 0 | 0 | 0 | 41,773 | 0.961094 | 0 | 0 | 6,133 | 0.141105 |
7eec16b6d1d11372ab91abbcfdb3714c1f54cf45 | 2,647 | py | Python | src/animation.py | ngruver/decon-hnn | 6e6c7e9962568214e1708fb933b715a39328fc7b | [
"Apache-2.0"
] | 6 | 2022-02-14T04:52:59.000Z | 2022-03-08T05:11:34.000Z | src/animation.py | ngruver/decon-hnn | 6e6c7e9962568214e1708fb933b715a39328fc7b | [
"Apache-2.0"
] | null | null | null | src/animation.py | ngruver/decon-hnn | 6e6c7e9962568214e1708fb933b715a39328fc7b | [
"Apache-2.0"
] | null | null | null | from oil.utils.utils import export
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.animation as animation
import numpy as np
@export
class Animation(object):
def __init__(self, qt,body=None):
""" [qt (T,n,d)"""
self.qt = qt.data.numpy()
T,n,d = qt.shape
assert d in (2,3), "too many dimensions for animation"
self.fig = plt.figure()
self.ax = self.fig.add_axes([0, 0, 1, 1],projection='3d') if d==3 else self.fig.add_axes([0, 0, 1, 1])
#self.ax.axis('equal')
xyzmin = self.qt.min(0).min(0)#.min(dim=0)[0].min(dim=0)[0]
xyzmax = self.qt.max(0).max(0)#.max(dim=0)[0].max(dim=0)[0]
delta = xyzmax-xyzmin
lower = xyzmin-.1*delta; upper = xyzmax+.1*delta
self.ax.set_xlim((min(lower),max(upper)))
self.ax.set_ylim((min(lower),max(upper)))
if d==3: self.ax.set_zlim((min(lower),max(upper)))
if d!=3: self.ax.set_aspect("equal")
#elf.ax.auto_scale_xyz()
empty = d*[[]]
self.colors = np.random.choice([f"C{i}" for i in range(10)],size=n,replace=False)
self.objects = {
'pts':sum([self.ax.plot(*empty, "o", ms=6,color=self.colors[i]) for i in range(n)], []),
'traj_lines':sum([self.ax.plot(*empty, "-",color=self.colors[i]) for i in range(n)], []),
}
def init(self):
empty = 2*[[]]
for obj in self.objects.values():
for elem in obj:
elem.set_data(*empty)
if self.qt.shape[-1]==3: elem.set_3d_properties([])
return sum(self.objects.values(),[])
def update(self, i=0):
T,n,d = self.qt.shape
trail_len = 150
for j in range(n):
# trails
xyz = self.qt[max(i - trail_len,0): i + 1,j,:]
#chunks = xyz.shape[0]//10
#xyz_chunks = torch.chunk(xyz,chunks)
#for i,xyz in enumerate(xyz_chunks):
self.objects['traj_lines'][j].set_data(*xyz[...,:2].T)
if d==3: self.objects['traj_lines'][j].set_3d_properties(xyz[...,2].T)
self.objects['pts'][j].set_data(*xyz[-1:,...,:2].T)
if d==3: self.objects['pts'][j].set_3d_properties(xyz[-1:,...,2].T)
#self.fig.canvas.draw()
return sum(self.objects.values(),[])
def animate(self):
return self._animate().to_html5_video()
def _animate(self):
return animation.FuncAnimation(self.fig,self.update,frames=self.qt.shape[0],
interval=33,init_func=self.init,blit=True,)
| 41.359375 | 111 | 0.548546 | 2,462 | 0.93011 | 0 | 0 | 2,471 | 0.93351 | 0 | 0 | 371 | 0.140159 |
7eedd09fb3c8d92730036810d13bd71098a0604a | 1,791 | py | Python | asyncpg_opentracing/tracing.py | condorcet/asyncpg_opentracing | 7e8342c2ab9d360507695f802b9a74803f76675e | [
"MIT"
] | 3 | 2021-02-07T02:55:46.000Z | 2021-11-25T21:32:19.000Z | asyncpg_opentracing/tracing.py | condorcet/asyncpg_opentracing | 7e8342c2ab9d360507695f802b9a74803f76675e | [
"MIT"
] | null | null | null | asyncpg_opentracing/tracing.py | condorcet/asyncpg_opentracing | 7e8342c2ab9d360507695f802b9a74803f76675e | [
"MIT"
] | null | null | null | from functools import wraps
from opentracing import global_tracer, tags, logs
from contextlib import contextmanager
def operation_name(query: str):
# TODO: some statement should contain two words. For example CREATE TABLE.
query = query.strip().split(' ')[0].strip(';').upper()
return 'asyncpg ' + query
@contextmanager
def con_context(handler, query, query_args):
_tags = {
tags.DATABASE_TYPE: 'SQL',
tags.DATABASE_STATEMENT: query,
tags.DATABASE_USER: handler._params.user,
tags.DATABASE_INSTANCE: handler._params.database,
'db.params': query_args,
tags.SPAN_KIND: tags.SPAN_KIND_RPC_CLIENT,
}
with global_tracer().start_active_span(
operation_name=operation_name(query),
tags=_tags
) as scope:
try:
yield
except Exception as e:
scope.span.log_kv({
logs.EVENT: 'error',
logs.ERROR_KIND: type(e).__name__,
logs.ERROR_OBJECT: e,
logs.MESSAGE: str(e)
})
raise
def wrap(coro):
@wraps(coro)
async def wrapped(self, query, *args, **kwargs):
with con_context(self, query, args):
return await coro(self, query, *args, **kwargs)
return wrapped
def wrap_executemany(coro):
@wraps(coro)
async def wrapped(self, query, args, *_args, **kwargs):
with con_context(self, query, args):
return await coro(self, query, args, *_args, **kwargs)
return wrapped
def tracing_connection(cls):
cls.fetch = wrap(cls.fetch)
cls.fetchval = wrap(cls.fetchval)
cls.fetchrow = wrap(cls.fetchrow)
cls.execute = wrap(cls.execute)
cls.executemany = wrap_executemany(cls.executemany)
return cls
| 27.553846 | 78 | 0.627024 | 0 | 0 | 754 | 0.420994 | 1,124 | 0.627582 | 320 | 0.178671 | 113 | 0.063093 |
7eee18f21f85e2ef6c713447b04ed57350a47292 | 3,281 | py | Python | pysnmp-with-texts/Juniper-V35-CONF.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 8 | 2019-05-09T17:04:00.000Z | 2021-06-09T06:50:51.000Z | pysnmp-with-texts/Juniper-V35-CONF.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 4 | 2019-05-31T16:42:59.000Z | 2020-01-31T21:57:17.000Z | pysnmp-with-texts/Juniper-V35-CONF.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module Juniper-V35-CONF (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/Juniper-V35-CONF
# Produced by pysmi-0.3.4 at Wed May 1 14:04:44 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
OctetString, ObjectIdentifier, Integer = mibBuilder.importSymbols("ASN1", "OctetString", "ObjectIdentifier", "Integer")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ValueRangeConstraint, ConstraintsUnion, SingleValueConstraint, ValueSizeConstraint, ConstraintsIntersection = mibBuilder.importSymbols("ASN1-REFINEMENT", "ValueRangeConstraint", "ConstraintsUnion", "SingleValueConstraint", "ValueSizeConstraint", "ConstraintsIntersection")
juniAgents, = mibBuilder.importSymbols("Juniper-Agents", "juniAgents")
ModuleCompliance, AgentCapabilities, NotificationGroup = mibBuilder.importSymbols("SNMPv2-CONF", "ModuleCompliance", "AgentCapabilities", "NotificationGroup")
Bits, Integer32, MibIdentifier, Counter32, Gauge32, NotificationType, IpAddress, ModuleIdentity, iso, ObjectIdentity, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, TimeTicks, Unsigned32 = mibBuilder.importSymbols("SNMPv2-SMI", "Bits", "Integer32", "MibIdentifier", "Counter32", "Gauge32", "NotificationType", "IpAddress", "ModuleIdentity", "iso", "ObjectIdentity", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "TimeTicks", "Unsigned32")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
juniV35Agent = ModuleIdentity((1, 3, 6, 1, 4, 1, 4874, 5, 2, 54))
juniV35Agent.setRevisions(('2002-09-06 16:54', '2002-01-25 21:43',))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
if mibBuilder.loadTexts: juniV35Agent.setRevisionsDescriptions(('Replaced Unisphere names with Juniper names.', 'The initial release of this management information module.',))
if mibBuilder.loadTexts: juniV35Agent.setLastUpdated('200209061654Z')
if mibBuilder.loadTexts: juniV35Agent.setOrganization('Juniper Networks, Inc.')
if mibBuilder.loadTexts: juniV35Agent.setContactInfo(' Juniper Networks, Inc. Postal: 10 Technology Park Drive Westford, MA 01886-3146 USA Tel: +1 978 589 5800 E-mail: mib@Juniper.net')
if mibBuilder.loadTexts: juniV35Agent.setDescription('The agent capabilities definitions for the X.21/V.35 server component of the SNMP agent in the Juniper E-series family of products.')
juniV35AgentV1 = AgentCapabilities((1, 3, 6, 1, 4, 1, 4874, 5, 2, 54, 1))
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniV35AgentV1 = juniV35AgentV1.setProductRelease('Version 1 of the X.21/V.35 component of the JUNOSe SNMP agent. This\n version of the X.21/V.35 component is supported in JUNOSe 4.0 and\n subsequent system releases.')
if getattr(mibBuilder, 'version', (0, 0, 0)) > (4, 4, 0):
juniV35AgentV1 = juniV35AgentV1.setStatus('current')
if mibBuilder.loadTexts: juniV35AgentV1.setDescription('The MIB supported by the SNMP agent for the X.21/V.35 application in JUNOSe.')
mibBuilder.exportSymbols("Juniper-V35-CONF", juniV35AgentV1=juniV35AgentV1, PYSNMP_MODULE_ID=juniV35Agent, juniV35Agent=juniV35Agent)
| 105.83871 | 477 | 0.772021 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,653 | 0.50381 |
7eeeeddb273daae8d265bebeae3ae172280c5d3f | 3,588 | py | Python | src/sqlfluff/core/rules/functional/segment_predicates.py | r0fls/sqlfluff | 3bc658e26758d1eb1ce35dade2e2cf064a4d6675 | [
"MIT"
] | null | null | null | src/sqlfluff/core/rules/functional/segment_predicates.py | r0fls/sqlfluff | 3bc658e26758d1eb1ce35dade2e2cf064a4d6675 | [
"MIT"
] | null | null | null | src/sqlfluff/core/rules/functional/segment_predicates.py | r0fls/sqlfluff | 3bc658e26758d1eb1ce35dade2e2cf064a4d6675 | [
"MIT"
] | null | null | null | """Defines commonly used segment predicates for rule writers.
For consistency, all the predicates in this module are implemented as functions
returning functions. This avoids rule writers having to remember the
distinction between normal functions and functions returning functions.
This is not necessarily a complete set of predicates covering all possible
requirements. Rule authors can define their own predicates as needed, either
as regular functions, `lambda`, etc.
"""
from typing import Callable
from sqlfluff.core.parser import BaseSegment
def is_type(*seg_type: str) -> Callable[[BaseSegment], bool]:
"""Returns a function that determines if segment is one of the types."""
def _(segment: BaseSegment):
return segment.is_type(*seg_type)
return _
def is_name(*seg_name: str) -> Callable[[BaseSegment], bool]:
"""Returns a function that determines if segment is one of the names."""
def _(segment: BaseSegment):
return segment.is_name(*seg_name)
return _
def is_keyword(*keyword_name) -> Callable[[BaseSegment], bool]:
"""Returns a function that determines if it's a matching keyword."""
return and_(is_type("keyword"), is_name(*keyword_name))
def is_code() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is code."""
def _(segment: BaseSegment) -> bool:
return segment.is_code
return _
def is_comment() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is comment."""
def _(segment: BaseSegment) -> bool:
return segment.is_comment
return _
def is_expandable() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is expandable."""
def _(segment: BaseSegment) -> bool:
return segment.is_expandable
return _
def is_meta() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is meta."""
def _(segment: BaseSegment) -> bool:
return segment.is_meta
return _
def is_raw() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is raw."""
def _(segment: BaseSegment) -> bool:
return segment.is_raw()
return _
def is_whitespace() -> Callable[[BaseSegment], bool]:
"""Returns a function that checks if segment is whitespace."""
def _(segment: BaseSegment) -> bool:
return segment.is_whitespace
return _
def get_type() -> Callable[[BaseSegment], str]:
"""Returns a function that gets segment type."""
def _(segment: BaseSegment) -> str:
return segment.get_type()
return _
def get_name() -> Callable[[BaseSegment], str]:
"""Returns a function that gets segment name."""
def _(segment: BaseSegment) -> str:
return segment.get_name()
return _
def and_(*functions: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]:
"""Returns a function that computes the functions and-ed together."""
def _(segment: BaseSegment):
return all(function(segment) for function in functions)
return _
def or_(*functions: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]:
"""Returns a function that computes the functions or-ed together."""
def _(segment: BaseSegment):
return any(function(segment) for function in functions)
return _
def not_(fn: Callable[[BaseSegment], bool]) -> Callable[[BaseSegment], bool]:
"""Returns a function that computes: not fn()."""
def _(segment: BaseSegment):
return not fn(segment)
return _
| 26.382353 | 85 | 0.687291 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,330 | 0.37068 |
7eef35921fa0ede03616e146e1295177bb83c0f6 | 28,152 | py | Python | acro/train_uncond_dcgan.py | udibr/dcgan_code | b80e8b97193ef57ea86ecb3be684b452655fe2ac | [
"MIT"
] | 9 | 2015-12-18T09:55:35.000Z | 2018-12-02T07:04:07.000Z | acro/train_uncond_dcgan.py | udibr/dcgan_code | b80e8b97193ef57ea86ecb3be684b452655fe2ac | [
"MIT"
] | null | null | null | acro/train_uncond_dcgan.py | udibr/dcgan_code | b80e8b97193ef57ea86ecb3be684b452655fe2ac | [
"MIT"
] | 4 | 2016-01-18T08:16:38.000Z | 2019-02-12T02:29:47.000Z | """
uncond_dcgan1 made with 64x64 images from https://s3.amazonaws.com/udipublic/acro.images.tgz for train.tar.gz
"""
import argparse
parser = argparse.ArgumentParser(description='train uncoditional dcgan')
parser.add_argument('--desc',
default='uncond_dcgan',
help='name to uniquely describe this run')
parser.add_argument('--path',
default='data/jpg.hdf5',
help='where to read fuel hdf5 data file with training')
parser.add_argument('--val', type=float,
default=0.,
help="what part of the training data to use for validation")
parser.add_argument('--model',
help='start from a pre-existing model.'
' The suffixes _gen_params.jl'
' and _discrim_params.jl'
' are added to the path you supply')
parser.add_argument('--batch', type=int,
default=128,
help='batch size')
parser.add_argument('-k', type=int,
default=0,
help='# of discrim updates for each gen update.'
' 0 - alternate > 0 more d, < 0 more g')
parser.add_argument('--maxk', type=int,
default=1,
help='max value for k')
parser.add_argument('--mink', type=int,
default=-1,
help='min value for k')
parser.add_argument('--l2d', type=float,
default=1.e-5,
help="discriminator l2")
parser.add_argument('--l2decay', type=float,
default=0.,
help="reduce l2d by 1-l2decay")
parser.add_argument('--l2step', type=float,
default=0.,
help="increase(decrease) discriminator's l2"
" when generator cost is above 1.3(below 0.9)")
parser.add_argument('--dropout', type=float,
default=0.,
help="discriminator dropout")
parser.add_argument('--lr', type=float,
default=0.0002,
help="initial learning rate for adam")
parser.add_argument('--lrstep', type=float,
default=1.,
help="increa/decrease g/d learning rate")
parser.add_argument('--dbn', action='store_false',
help='dont perfrom batch normalization on discriminator')
parser.add_argument('--db1', action='store_true',
help='add bias to first layer of discriminator')
parser.add_argument('--ngf', type=int,
default=128,
help='# of gen filters')
parser.add_argument('--ndf', type=int,
default=128,
help='# of discriminator filters')
parser.add_argument('--updates', type=int,
default=100,
help='compute score every n_updates')
parser.add_argument('-z', type=int,
default=100,
help='number of hidden variables')
parser.add_argument('--znorm', action='store_true',
help='normalize z values to unit sphere')
parser.add_argument('--generate', action='store_true',
help='generate sample png and gif')
parser.add_argument('--ngif', type=int, default=1,
help='# of png images to generate. If 1 then no gif')
parser.add_argument('--nvis2', type=int,
default=14,
help='number of rows/cols of sub-images to generate')
parser.add_argument('--generate_d', type=float, default=0.,
help="minimal discrimation score when generating samples")
parser.add_argument('--generate_c', type=float, default=0.,
help="minimal classification score when generating samples")
parser.add_argument('--generate_v', type=float,
help='generate sample along a random direction with this step size')
parser.add_argument('--classify', action='store_true',
help='classify target')
parser.add_argument('--onlyclassify', action='store_true',
help='just do classify target')
parser.add_argument('--seed', type=int,
default=123,
help='seed all random generators')
parser.add_argument('--filter_label', type=int,
help='take only training data with this label (does not work with classify')
parser.add_argument('--nepochs', type=int,
default=25,
help='total number of epochs')
parser.add_argument('--niter', type=int,
default=25,
help='# of iter at starting learning rate')
parser.add_argument('--start', type=int,
default=0,
help='If not 0 then start from this epoch after loading the last model')
args = parser.parse_args()
if args.onlyclassify:
args.classify = True
if args.classify:
assert args.filter_label is None, "you can't classify and limit your data to one lable"
if args.model is None and args.start > 0:
args.model = 'models/%s/%d'%(args.desc, args.start)
import random
random.seed(args.seed)
import numpy as np
np.random.seed(args.seed)
import sys
sys.path.append('..')
import os
import json
from time import sleep
from time import time
from tqdm import tqdm, trange
from matplotlib import pyplot as plt
from sklearn.externals import joblib
import theano
import theano.tensor as T
from theano.sandbox.cuda.dnn import dnn_conv
from lib import activations
from lib import updates
from lib import inits
from lib.vis import color_grid_vis
from lib.rng import py_rng, np_rng
from lib.ops import batchnorm, conv_cond_concat, deconv, dropout, l2normalize
from lib.metrics import nnc_score, nnd_score
from lib.theano_utils import floatX, sharedX
from lib.data_utils import OneHot, shuffle, iter_data, center_crop, patch
from load import streams
def transform(X):
# X = [center_crop(x, npx) for x in X] # only works for (H,W,3)
assert X[0].shape == (npx,npx,3) or X[0].shape == (3,npx,npx)
if X[0].shape == (npx,npx,3):
X = X.transpose(0, 3, 1, 2)
return floatX(X/127.5 - 1.)
def inverse_transform(X):
X = (X.reshape(-1, nc, npx, npx).transpose(0, 2, 3, 1)+1.)/2.
return X
k = 0 # # of discrim updates for each gen update. 0 - alternate > 0 more d, < 0 more g
l2 = 1e-5 # l2 weight decay
l2d = args.l2d # discriminator l2
l2step = args.l2step # increase(decrease) discriminator l2 when generator cost is above 1.3(below 0.9)
margin = 0.3 # Dont optimize discriminator(generator) when classification error below margin(above 1-margin)
nvis2 = args.nvis2
nvis = nvis2*nvis2 # # of samples to visualize during training
b1 = 0.5 # momentum term of adam
nc = 3 # # of channels in image
nbatch = args.batch # # of examples in batch
npx = 64 # # of pixels width/height of images
nz = args.z # # of dim for Z
ngf = args.ngf # # of gen filters in first conv layer
ndf = args.ndf # # of discrim filters in first conv layer
nx = npx*npx*nc # # of dimensions in X
niter = args.niter # # of iter at starting learning rate
niter_decay = args.nepochs - niter # # of iter to linearly decay learning rate to zero
lr = args.lr # initial learning rate for adam
ntrain = None # # of examples to train on. None take all
ngif = args.ngif # # of images in a gif
desc = args.desc
model_dir = 'models/%s'%desc
samples_dir = 'samples/%s'%desc
if not os.path.exists('logs/'):
os.makedirs('logs/')
if not os.path.exists(model_dir):
os.makedirs(model_dir)
if not os.path.exists(samples_dir):
os.makedirs(samples_dir)
###########################################
# data
if not args.generate:
tr_data, tr_stream, val_stream, ntrain_s, nval_s = streams(ntrain=ntrain,
batch_size=args.batch,
path=args.path,
val = args.val,
filter_label=args.filter_label)
if ntrain is None:
ntrain = tr_data.num_examples
print '# examples', tr_data.num_examples
print '# training examples', ntrain_s
print '# validation examples', nval_s
tr_handle = tr_data.open()
vaX,labels = tr_data.get_data(tr_handle, slice(0, 10000))
vaX = transform(vaX)
means = labels.mean(axis=0)
print('labels ',labels.shape,means,means[0]/means[1])
vaY,labels = tr_data.get_data(tr_handle, slice(10000, min(ntrain, 20000)))
vaY = transform(vaY)
va_nnd_1k = nnd_score(vaY.reshape((len(vaY),-1)), vaX.reshape((len(vaX),-1)), metric='euclidean')
print 'va_nnd_1k = %.2f'%(va_nnd_1k)
means = labels.mean(axis=0)
print('labels ',labels.shape,means,means[0]/means[1])
#####################################
# shared variables
gifn = inits.Normal(scale=0.02)
difn = inits.Normal(scale=0.02)
gain_ifn = inits.Normal(loc=1., scale=0.02)
bias_ifn = inits.Constant(c=0.)
gw = gifn((nz, ngf*8*4*4), 'gw')
gg = gain_ifn((ngf*8*4*4), 'gg')
gb = bias_ifn((ngf*8*4*4), 'gb')
gw2 = gifn((ngf*8, ngf*4, 5, 5), 'gw2')
gg2 = gain_ifn((ngf*4), 'gg2')
gb2 = bias_ifn((ngf*4), 'gb2')
gw3 = gifn((ngf*4, ngf*2, 5, 5), 'gw3')
gg3 = gain_ifn((ngf*2), 'gg3')
gb3 = bias_ifn((ngf*2), 'gb3')
gw4 = gifn((ngf*2, ngf, 5, 5), 'gw4')
gg4 = gain_ifn((ngf), 'gg4')
gb4 = bias_ifn((ngf), 'gb4')
gwx = gifn((ngf, nc, 5, 5), 'gwx')
dw = difn((ndf, nc, 5, 5), 'dw')
db = bias_ifn((ndf), 'db')
dw2 = difn((ndf*2, ndf, 5, 5), 'dw2')
dg2 = gain_ifn((ndf*2), 'dg2')
db2 = bias_ifn((ndf*2), 'db2')
dw3 = difn((ndf*4, ndf*2, 5, 5), 'dw3')
dg3 = gain_ifn((ndf*4), 'dg3')
db3 = bias_ifn((ndf*4), 'db3')
dw4 = difn((ndf*8, ndf*4, 5, 5), 'dw4')
dg4 = gain_ifn((ndf*8), 'dg4')
db4 = bias_ifn((ndf*8), 'db4')
dwy = difn((ndf*8*4*4, 1), 'dwy')
dwy1 = difn((ndf*8*4*4, 1), 'dwy')
# models
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
tanh = activations.Tanh()
bce = T.nnet.binary_crossentropy
# generator model
gen_params = [gw, gg, gb, gw2, gg2, gb2, gw3, gg3, gb3, gw4, gg4, gb4, gwx]
def gen(Z, w, g, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wx):
h = relu(batchnorm(T.dot(Z, w), g=g, b=b))
h = h.reshape((h.shape[0], ngf*8, 4, 4))
h2 = relu(batchnorm(deconv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
h3 = relu(batchnorm(deconv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
h4 = relu(batchnorm(deconv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
x = tanh(deconv(h4, wx, subsample=(2, 2), border_mode=(2, 2)))
return x
# discriminator model
"""
#old model
if args.dbn:
if args.db1:
print "Bias on layer 1 + batch normalization"
discrim_params = [dw, db, dw2, dg2, db2, dw3, dg3, db3, dw4, dg4, db4, dwy, dwy1]
def discrim(X, w, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))+b.dimshuffle('x', 0, 'x', 'x'))
h = dropout(h, args.dropout)
h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
h2 = dropout(h2, args.dropout)
h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
h3 = dropout(h3, args.dropout)
h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
h4 = dropout(h4, args.dropout)
h4 = T.flatten(h4, 2)
y = sigmoid(T.dot(h4, wy))
y1 = sigmoid(T.dot(h4, wy1))
return y, y1
else:
print "Batch normalization"
discrim_params = [dw, dw2, dg2, db2, dw3, dg3, db3, dw4, dg4, db4, dwy, dwy1]
def discrim(X, w, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
h = dropout(h, args.dropout)
h2 = lrelu(batchnorm(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2)), g=g2, b=b2))
h2 = dropout(h2, args.dropout)
h3 = lrelu(batchnorm(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2)), g=g3, b=b3))
h3 = dropout(h3, args.dropout)
h4 = lrelu(batchnorm(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2)), g=g4, b=b4))
h4 = dropout(h4, args.dropout)
h4 = T.flatten(h4, 2)
y = sigmoid(T.dot(h4, wy))
y1 = sigmoid(T.dot(h4, wy1))
return y, y1
else:
if args.db1:
print "Bias on layer 1"
discrim_params = [dw, db, dw2, db2, dw3, db3, dw4, db4, dwy, dwy1]
def discrim(X, w, b, w2, b2, w3, b3, w4, b4, wy, wy1):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))+b.dimshuffle('x', 0, 'x', 'x'))
h = dropout(h, args.dropout)
h2 = lrelu(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))+b2.dimshuffle('x', 0, 'x', 'x'))
h2 = dropout(h2, args.dropout)
h3 = lrelu(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))+b3.dimshuffle('x', 0, 'x', 'x'))
h3 = dropout(h3, args.dropout)
h4 = lrelu(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))+b4.dimshuffle('x', 0, 'x', 'x'))
h4 = dropout(h4, args.dropout)
h4 = T.flatten(h4, 2)
y = sigmoid(T.dot(h4, wy))
y1 = sigmoid(T.dot(h4, wy1))
return y, y1
else:
discrim_params = [dw, dw2, db2, dw3, db3, dw4, db4, dwy, dwy1]
def discrim(X, w, w2, b2, w3, b3, w4, b4, wy, wy1):
h = lrelu(dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2)))
h = dropout(h, args.dropout)
h2 = lrelu(dnn_conv(h, w2, subsample=(2, 2), border_mode=(2, 2))+b2.dimshuffle('x', 0, 'x', 'x'))
h2 = dropout(h2, args.dropout)
h3 = lrelu(dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))+b3.dimshuffle('x', 0, 'x', 'x'))
h3 = dropout(h3, args.dropout)
h4 = lrelu(dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))+b4.dimshuffle('x', 0, 'x', 'x'))
h4 = dropout(h4, args.dropout)
h4 = T.flatten(h4, 2)
y = sigmoid(T.dot(h4, wy))
y1 = sigmoid(T.dot(h4, wy1))
return y, y1
"""
#new model
discrim_params = [dw, db, dw2, dg2, db2, dw3, dg3, db3, dw4, dg4, db4, dwy, dwy1]
def discrim(X, w, b, w2, g2, b2, w3, g3, b3, w4, g4, b4, wy, wy1):
h0 = dnn_conv(X, w, subsample=(2, 2), border_mode=(2, 2))
if args.db1:
h0 += b.dimshuffle('x', 0, 'x', 'x')
h1 = lrelu(h0)
h1 = dropout(h1, args.dropout)
h1 = dnn_conv(h1, w2, subsample=(2, 2), border_mode=(2, 2))
if args.dbn:
h1 = batchnorm(h1, g=g2, b=b2)
else:
h1 += b2.dimshuffle('x', 0, 'x', 'x')
h2 = lrelu(h1)
h2 = dropout(h2, args.dropout)
h2 = dnn_conv(h2, w3, subsample=(2, 2), border_mode=(2, 2))
if args.dbn:
h2 = batchnorm(h2, g=g3, b=b3)
else:
h2 += b3.dimshuffle('x', 0, 'x', 'x')
h3 = lrelu(h2)
h3 = dropout(h3, args.dropout)
h3 = dnn_conv(h3, w4, subsample=(2, 2), border_mode=(2, 2))
if args.dbn:
h3 = batchnorm(h3, g=g4, b=b4)
else:
h3 += b4.dimshuffle('x', 0, 'x', 'x')
h4 = lrelu(h3)
h4 = dropout(h4, args.dropout)
h4 = T.flatten(h4, 2)
y = sigmoid(T.dot(h4, wy))
y1 = sigmoid(T.dot(h4, wy1))
return y, y1
X = T.tensor4()
Z = T.matrix()
Y = T.matrix()
MASK = T.matrix()
gX = gen(Z, *gen_params)
p_gen, p_gen_classify = discrim(gX, *discrim_params)
p_real, p_classify = discrim(X, *discrim_params)
if args.model is not None:
print 'loading',args.model
from itertools import izip
gen_params_values = joblib.load(args.model + '_gen_params.jl')
for p, v in izip(gen_params, gen_params_values):
p.set_value(v)
discrim_params_values = joblib.load(args.model + '_discrim_params.jl')
if len(discrim_params) == len(discrim_params_values):
load_params = discrim_params
else: # support old save format
print 'loading old format',len(discrim_params),len(discrim_params_values)
if args.dbn and args.db1:
raise Exception('impossible')
load_params = [dw, db, dw2, dg2, db2, dw3, dg3, db3, dw4, dg4, db4, dwy, dwy1]
elif args.dbn:
load_params = [dw, dw2, dg2, db2, dw3, dg3, db3, dw4, dg4, db4, dwy, dwy1]
elif args.db1:
load_params = [dw, db, dw2, db2, dw3, db3, dw4, db4, dwy, dwy1]
else:
load_params = [dw, dw2, db2, dw3, db3, dw4, db4, dwy, dwy1]
assert len(discrim_params_values) == len(load_params), "# params in model does not match"
for p, v in izip(load_params, discrim_params_values):
p.set_value(v)
###############################
# generate
_gen = theano.function([Z], gX)
from sklearn.preprocessing import normalize
def gen_z(n):
if args.znorm:
return floatX(normalize(np_rng.uniform(-1., 1., size=(n, nz))))
else:
return floatX(np_rng.uniform(-1., 1., size=(n, nz)))
if args.generate:
_genscore = theano.function([Z], [gX, p_gen, p_gen_classify])
t = iter(trange(nvis))
pgs = []
pcs = []
zmbs = []
samples = []
while len(zmbs) < nvis:
zmb = gen_z(args.batch)
xmb, pg, pc = _genscore(zmb)
pgs.append(pg)
pcs.append(pc)
for i in range(args.batch):
if pg[i] >= args.generate_d and pc[i] >= args.generate_c:
zmbs.append(zmb[i])
samples.append(xmb[i])
t.next()
if len(zmbs) >= nvis:
break
pgs = np.concatenate(pgs)
pcs = np.concatenate(pcs)
print 'generate_d',pgs.mean(),pgs.std(),'generate_c',pcs.mean(),pcs.std()
samples = np.asarray(samples)
color_grid_vis(inverse_transform(samples), (nvis2, nvis2),
'%s/Z_%03d.png'%(samples_dir,0))
if args.generate_v is None:
sample_zmb0 = np.array(zmbs)
sample_zmb1 = np.roll(sample_zmb0, 1, axis=0)
for i in tqdm(range(1,ngif)):
z = abs(1.-2.*i/(ngif-1.)) # from 1 to 0 and back to almost 1
sample_zmb = z * sample_zmb0 + (1-z) * sample_zmb1
samples = np.asarray(_gen(sample_zmb))
color_grid_vis(inverse_transform(samples), (nvis2, nvis2),
'%s/Z_%03d.png'%(samples_dir,i))
else:
sample_zmb = np.array(zmbs)
v = gen_z(nvis)
for i in tqdm(range(1,ngif)):
sample_zmb += args.generate_v * v
samples = np.asarray(_gen(sample_zmb))
color_grid_vis(inverse_transform(samples), (nvis2, nvis2),
'%s/Z_%03d.png'%(samples_dir,i))
if ngif > 1:
os.system("convert -delay 15 -loop 0 {0}/Z_*.png {0}/Z.gif".format(samples_dir))
exit(0)
def gen_samples(n, nbatch=128):
samples = []
n_gen = 0
for i in range(n/nbatch):
zmb = gen_z(nbatch)
xmb = _gen(zmb)
samples.append(xmb)
n_gen += len(xmb)
n_left = n-n_gen
if n_left:
zmb = gen_z(n_left)
xmb = _gen(zmb)
samples.append(xmb)
return np.concatenate(samples, axis=0)
####################
d_cost_real = bce(p_real, T.ones(p_real.shape)).mean()
d_classify = (bce(p_classify, Y) * MASK).sum() / MASK.sum()
d_classify_error = (T.neq(p_classify > 0.5, Y) * MASK).sum() / MASK.sum()
d_error_real = 1.-T.mean(p_real)
d_cost_gen = bce(p_gen, T.zeros(p_gen.shape)).mean()
d_error_gen = T.mean(p_gen)
g_cost_d = bce(p_gen, T.ones(p_gen.shape)).mean()
d_cost = d_cost_real + d_cost_gen
if args.onlyclassify:
d_cost = d_classify
elif args.classify:
d_cost += d_classify
g_cost = g_cost_d
cost_target = [g_cost, d_cost, g_cost_d, d_cost_real, d_cost_gen, d_error_real, d_error_gen, d_classify, d_classify_error]
lrg = sharedX(lr)
lrd = sharedX(lr)
l2t = sharedX(l2d)
d_updater = updates.Adam(lr=lrd, b1=b1, regularizer=updates.Regularizer(l2=l2t))
g_updater = updates.Adam(lr=lrg, b1=b1, regularizer=updates.Regularizer(l2=l2))
"""
#old model
if args.onlyclassify:
d_updates = d_updater(discrim_params[:-2]+discrim_params[-1:], d_cost)
elif args.classify:
d_updates = d_updater(discrim_params, d_cost)
else:
d_updates = d_updater(discrim_params[:-1], d_cost)
"""
#new model
d_updates = d_updater(discrim_params, d_cost)
g_updates = g_updater(gen_params, g_cost)
updates = d_updates + g_updates
_train_g = theano.function([X, Z, Y, MASK], cost_target, updates=g_updates)
_train_d = theano.function([X, Z, Y, MASK], cost_target, updates=d_updates)
if args.onlyclassify:
_train_classify = theano.function([X, Y, MASK], [d_classify, d_classify_error], updates=d_updates)
if args.classify:
_classify_d = theano.function([X, Y, MASK], [d_classify, d_classify_error])
log_fields = [
'n_epochs',
'n_updates',
'n_examples',
'n_seconds',
'1k_va_nnd',
# '10k_va_nnd',
# '100k_va_nnd',
'g_cost',
'd_cost',
'error_r',
'error_g',
'd_cost_real',
'd_cost_gen',
'd_classify',
'd_classify_error',
'lrg','lrd',
'l2d',
]
n_updates = 0
n_epochs = 0
n_examples = 0
do_initial_valid = True
log_lines = []
if args.start > 0:
f_log = open('logs/%s.ndjson'%desc, 'rb')
for l in f_log:
j = json.loads(l.strip())
if 'valid_classify' in j:
do_initial_valid = False
continue
if j['n_epochs'] > args.start:
break
do_initial_valid = True
n_epochs = j['n_epochs']
n_updates = j['n_updates']
n_examples = j['n_examples']
lrg.set_value(floatX(j['lrg']))
lrd.set_value(floatX(j['lrd']))
l2t.set_value(floatX(j['l2d']))
log_lines.append(l)
f_log.close()
f_log = open('logs/%s.ndjson'%desc, 'wb')
for l in log_lines:
f_log.write(l)
vis_idxs = py_rng.sample(np.arange(len(vaX)), nvis)
vaX_vis = inverse_transform(vaX[vis_idxs])
color_grid_vis(vaX_vis, (args.nvis2, args.nvis2), 'samples/%s_etl_test.png'%desc)
sample_zmb = gen_z(nvis)
vaX = vaX.reshape(len(vaX), -1)
print desc.upper()
t = time()
costs = []
label_sums = np.zeros(2)
def validate():
if args.classify and args.val > 0.:
sleep(5.)
valid_label_sums = np.zeros(2)
val_costs = []
for imb,labels in tqdm(val_stream.get_epoch_iterator(), total=nval_s/nbatch):
valid_label_sums += labels.sum(axis=0)
y = labels[:,0].reshape((-1,1))
mask = labels[:,1].reshape((-1,1))
imb = transform(imb)
cost = _classify_d(imb, y, mask)
val_costs.append(cost)
print 'valid label sums',valid_label_sums,valid_label_sums[0]/(valid_label_sums[1]+1e-8)
val_cost = np.array(val_costs).mean(axis=0)
d_cost_class = float(val_cost[0])
d_error_class = float(val_cost[1])
print("val_d_classify=%f val_d_classify_error=%f"%(d_cost_class, d_error_class))
log = [d_cost_class, d_error_class]
f_log.write(json.dumps(dict(zip(['valid_classify', 'valid_classify_error'], log)))+'\n')
f_log.flush()
sleep(5.)
if do_initial_valid:
validate()
for epoch in range(args.start,args.nepochs):
for imb,labels in tqdm(tr_stream.get_epoch_iterator(), total=ntrain_s/nbatch):
label_sums += labels.sum(axis=0)
y = labels[:,0].reshape((-1,1))
mask = labels[:,1].reshape((-1,1))
imb = transform(imb)
if args.onlyclassify:
cost = _train_classify(imb, y, mask)
cost = [0]*(len(cost_target)-len(cost)) + cost
else:
zmb = gen_z(len(imb))
if k >= 0:
if n_updates % (k+2) == 0:
cost = _train_g(imb, zmb, y, mask)
else:
cost = _train_d(imb, zmb, y, mask)
else:
if n_updates % (-k+2) == 0:
cost = _train_d(imb, zmb, y, mask)
else:
cost = _train_g(imb, zmb, y, mask)
n_updates += 1
n_examples += len(imb)
costs.append(cost)
if n_updates % args.updates == 0:
cost = np.array(costs).mean(axis=0)
# [g_cost, d_cost, g_cost_d, d_cost_real, d_cost_gen, d_error_real, d_error_gen,d_classify, d_classify_error]
print 'label sums',label_sums,label_sums[0]/(label_sums[1]+1e-8)
label_sums = np.zeros(2)
costs = []
g_cost = float(cost[0])
d_cost = float(cost[1])
d_cost_real = float(cost[3])
d_cost_gen = float(cost[4])
d_error_r = float(cost[5])
d_error_g = float(cost[6])
d_cost_class = float(cost[7])
d_error_class = float(cost[8])
gX = gen_samples(10000)
gX = gX.reshape(len(gX), -1)
va_nnd_1k = nnd_score(gX[:1000], vaX, metric='euclidean')
# va_nnd_10k = nnd_score(gX[:10000], vaX, metric='euclidean')
# va_nnd_100k = nnd_score(gX[:100000], vaX, metric='euclidean')
log = [n_epochs, n_updates, n_examples, time()-t,
va_nnd_1k, g_cost, d_cost,
d_error_r, d_error_g,d_cost_real,d_cost_gen,
d_cost_class, d_error_class,
float(lrg.get_value()),float(lrd.get_value()),float(l2t.get_value())
]
print '%d %d %.2f'%(epoch, n_updates, va_nnd_1k)
print 'gc=%.4f dc=%.4f dcr=%.4f dcg=%.4f er=%.4f eg=%.4f cls=%.4f err=%.4f'%(
g_cost, d_cost, d_cost_real, d_cost_gen,
d_error_r,d_error_g, d_cost_class, d_error_class)
f_log.write(json.dumps(dict(zip(log_fields, log)))+'\n')
f_log.flush()
# if g_cost > d_cost + .3:
# k -= 1
# elif g_cost < d_cost - .3:
# k += 1
# k = max(-3, min(3,k))
# k poistive is do more d, k negative is do more g
if d_error_r < margin or d_error_g < margin: # d is too good
k += args.k
lrg.set_value(floatX(lrg.get_value()*args.lrstep))
lrd.set_value(floatX(lrd.get_value()/args.lrstep))
elif d_error_r > 1.-margin or d_error_g > 1.-margin: # d is too bad
k -= args.k
lrg.set_value(floatX(lrg.get_value()/args.lrstep))
lrd.set_value(floatX(lrd.get_value()*args.lrstep))
elif k > 0: # unwind d
k -= 1
# lrd.set_value(floatX(lrd.get_value()/args.lrstep))
elif k < 0: # unwind g
k += 1
# lrg.set_value(floatX(lrg.get_value()/args.lrstep))
k = max(args.mink,min(args.maxk,k))
# http://torch.ch/blog/2015/11/13/gan.html#balancing-the-gan-game
if g_cost > 1.3: # g is bad -> increase regularization on d
l2t.set_value(floatX(l2t.get_value() + l2step))
elif g_cost < 0.9: # g is good -> decrease regularization on d
l2t.set_value(floatX(l2t.get_value() - l2step))
else:
l2t.set_value(floatX(l2t.get_value() * (1.-args.l2decay)))
if l2t.get_value() < 0:
l2t.set_value(floatX(0.))
print k, l2t.get_value()
validate()
samples = np.asarray(_gen(sample_zmb))
color_grid_vis(inverse_transform(samples), (args.nvis2, args.nvis2), 'samples/%s/%d.png'%(desc, n_epochs))
n_epochs += 1
if n_epochs > niter:
lrg.set_value(floatX(lrg.get_value() - lr/niter_decay))
lrd.set_value(floatX(lrd.get_value() - lr/niter_decay))
if n_epochs <= 5 or n_epochs % 5 == 0:
joblib.dump([p.get_value() for p in gen_params], 'models/%s/%d_gen_params.jl'%(desc, n_epochs))
joblib.dump([p.get_value() for p in discrim_params], 'models/%s/%d_discrim_params.jl'%(desc, n_epochs))
| 39.318436 | 122 | 0.577472 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 8,896 | 0.315999 |
7ef212a3bbd72af3407c75992543ad244f5853aa | 686 | py | Python | tests/test_base_testclass.py | FrNecas/requre | 110ad5c42b6bbb087a28bcaf7d7b7834825ec65a | [
"MIT"
] | 4 | 2019-09-11T10:39:19.000Z | 2020-01-26T14:46:04.000Z | tests/test_base_testclass.py | FrNecas/requre | 110ad5c42b6bbb087a28bcaf7d7b7834825ec65a | [
"MIT"
] | 134 | 2020-08-04T06:56:25.000Z | 2022-03-28T19:59:10.000Z | tests/test_base_testclass.py | FrNecas/requre | 110ad5c42b6bbb087a28bcaf7d7b7834825ec65a | [
"MIT"
] | 8 | 2019-09-11T09:52:01.000Z | 2020-05-15T07:49:20.000Z | # Copyright Contributors to the Packit project.
# SPDX-License-Identifier: MIT
import os
import shutil
from requre import RequreTestCase
from requre.utils import get_datafile_filename
class CheckBaseTestClass(RequreTestCase):
def tearDown(self):
super().tearDown()
data_file_path = get_datafile_filename(self)
self.assertTrue(os.path.exists(data_file_path))
# use just class and test name instead of full ID
self.assertIn(".".join(self.id().split(".")[-2:]), data_file_path.name)
self.assertIn("test_data", str(data_file_path))
shutil.rmtree(os.path.dirname(get_datafile_filename(self)))
def test(self):
pass
| 29.826087 | 79 | 0.708455 | 497 | 0.72449 | 0 | 0 | 0 | 0 | 0 | 0 | 143 | 0.208455 |
7ef2a68b302b035e6c0797abb0c9b533ef0cc00f | 1,098 | py | Python | sabueso/tools/sabueso_UniProtKB_XMLDict/get_name.py | dprada/sabueso | 14843cf3522b5b89db5b61c1541a7015f114dd53 | [
"MIT"
] | null | null | null | sabueso/tools/sabueso_UniProtKB_XMLDict/get_name.py | dprada/sabueso | 14843cf3522b5b89db5b61c1541a7015f114dd53 | [
"MIT"
] | 2 | 2022-01-31T21:22:17.000Z | 2022-02-04T20:20:12.000Z | sabueso/tools/sabueso_UniProtKB_XMLDict/get_name.py | dprada/sabueso | 14843cf3522b5b89db5b61c1541a7015f114dd53 | [
"MIT"
] | 1 | 2021-07-20T15:01:14.000Z | 2021-07-20T15:01:14.000Z | from collections import OrderedDict
from evidence import Evidence
def get_name(item, entity='all'):
from ._add_reference_to_evidence import _add_reference_to_evidence
evidence = Evidence()
fullName = item['uniprot']['entry']['protein']['recommendedName']['fullName']
if type(fullName)==str:
evidence.value=fullName
elif type(fullName)==OrderedDict:
if '#text' in fullName:
evidence.value = fullName['#text']
if '@evidence' in fullName:
evidence_numbers_in_db = fullName['@evidence'].split()
for evidence_number_in_db in evidence_numbers_in_db:
evidence_in_db = item['uniprot']['entry']['evidence'][int(evidence_number_in_db)-1]
if evidence_in_db['@key']!=evidence_number_in_db:
raise ValueError('Evidence number does not match evidence @key')
_add_reference_to_evidence(evidence, evidence_in_db)
accession = item['uniprot']['entry']['accession'][0]
evidence.add_reference({'database':'UniProtKB', 'id':accession})
return evidence
| 36.6 | 99 | 0.666667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 223 | 0.203097 |
7ef2ed2970e3ddb3c9692d6c9d5d53f3c44e44e9 | 93 | py | Python | amadeus/shopping/availability/__init__.py | minjikarin/amadeus-python | 14a004912ee8c36ee4fd79651ea1b23afe6b2a6e | [
"MIT"
] | 125 | 2018-04-09T07:27:24.000Z | 2022-02-22T11:45:20.000Z | amadeus/shopping/availability/__init__.py | minjikarin/amadeus-python | 14a004912ee8c36ee4fd79651ea1b23afe6b2a6e | [
"MIT"
] | 58 | 2018-03-29T14:58:01.000Z | 2022-03-17T10:18:07.000Z | amadeus/shopping/availability/__init__.py | minjikarin/amadeus-python | 14a004912ee8c36ee4fd79651ea1b23afe6b2a6e | [
"MIT"
] | 58 | 2018-04-06T10:56:20.000Z | 2022-03-04T01:23:24.000Z | from ._flight_availabilities import FlightAvailabilities
__all__ = ['FlightAvailabilities']
| 23.25 | 56 | 0.849462 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 22 | 0.236559 |
7ef39b3ab7e84f40561ab285091ba87a5ffe2c50 | 6,523 | py | Python | examples/scripts/quickstart.py | dcslin/rafiki | b617ac2536ac13095c4930d6d3f1f9b3c231b5e7 | [
"Apache-2.0"
] | 35 | 2018-10-07T09:51:42.000Z | 2021-09-08T14:13:38.000Z | examples/scripts/quickstart.py | dcslin/rafiki | b617ac2536ac13095c4930d6d3f1f9b3c231b5e7 | [
"Apache-2.0"
] | 119 | 2018-10-05T14:49:39.000Z | 2022-03-11T23:49:51.000Z | examples/scripts/quickstart.py | dcslin/rafiki | b617ac2536ac13095c4930d6d3f1f9b3c231b5e7 | [
"Apache-2.0"
] | 32 | 2018-10-18T12:02:55.000Z | 2020-03-01T10:27:06.000Z | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from pprint import pprint
import time
import requests
import argparse
import os
from rafiki.client import Client
from rafiki.config import SUPERADMIN_EMAIL
from rafiki.constants import BudgetOption, InferenceBudgetOption, InferenceJobStatus, ModelDependency
from rafiki.model import utils
from examples.scripts.utils import gen_id, wait_until_train_job_has_stopped
from examples.datasets.image_files.load_fashion_mnist import load_fashion_mnist
# Returns `predictor_host` of inference job
def get_predictor_host(client, app):
while True:
inference_job = client.get_running_inference_job(app)
status = inference_job.get('status')
if status == InferenceJobStatus.RUNNING:
return inference_job.get('predictor_host')
else:
time.sleep(10)
def make_predictions(client, predictor_host, queries):
predictions = []
for query in queries:
res = requests.post(
url='http://{}/predict'.format(predictor_host),
json={ 'query': query }
)
if res.status_code != 200:
raise Exception(res.text)
predictions.append(res.json()['prediction'])
return predictions
def quickstart(client, train_dataset_path, val_dataset_path, gpus, hours, query_paths):
'''
Conducts a full train-inference flow on the Fashion MNIST dataset with
models `SkDt` and `TfFeedForward` for the task `IMAGE_CLASSIFICATION`.
'''
task = 'IMAGE_CLASSIFICATION'
# Randomly generate app & model names to avoid naming conflicts
app_id = gen_id()
app = 'image_classification_app_{}'.format(app_id)
tf_model_name = 'TfFeedForward_{}'.format(app_id)
sk_model_name = 'SkDt_{}'.format(app_id)
print('Preprocessing datasets...')
load_fashion_mnist(train_dataset_path, val_dataset_path)
print('Creating & uploading datasets onto Rafiki...')
train_dataset = client.create_dataset('{}_train'.format(app), task, train_dataset_path)
pprint(train_dataset)
val_dataset = client.create_dataset('{}_val'.format(app), task, val_dataset_path)
pprint(val_dataset)
print('Adding models "{}" and "{}" to Rafiki...'.format(tf_model_name, sk_model_name))
tf_model = client.create_model(tf_model_name, task, 'examples/models/image_classification/TfFeedForward.py',
'TfFeedForward', dependencies={ ModelDependency.TENSORFLOW: '1.12.0' })
pprint(tf_model)
sk_model = client.create_model(sk_model_name, task, 'examples/models/image_classification/SkDt.py',
'SkDt', dependencies={ ModelDependency.SCIKIT_LEARN: '0.20.0' })
pprint(sk_model)
model_ids = [tf_model['id'], sk_model['id']]
print('Creating train job for app "{}" on Rafiki...'.format(app))
budget = {
BudgetOption.TIME_HOURS: hours,
BudgetOption.GPU_COUNT: gpus
}
train_job = client.create_train_job(app, task, train_dataset['id'], val_dataset['id'],
budget, models=model_ids)
pprint(train_job)
print('Waiting for train job to complete...')
print('This might take a few minutes')
wait_until_train_job_has_stopped(client, app)
print('Train job has been stopped')
print('Listing best trials of latest train job for app "{}"...'.format(app))
pprint(client.get_best_trials_of_train_job(app))
print('Creating inference job for app "{}" on Rafiki...'.format(app))
pprint(client.create_inference_job(app))
predictor_host = get_predictor_host(client, app)
if not predictor_host: raise Exception('Inference job has errored')
print('Inference job is running!')
print('Making predictions for query images:')
print(query_paths)
queries = utils.dataset.load_images(query_paths).tolist()
predictions = make_predictions(client, predictor_host, queries)
print('Predictions are:')
print(predictions)
print('Stopping inference job...')
pprint(client.stop_inference_job(app))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--host', type=str, default='localhost', help='Host of Rafiki instance')
parser.add_argument('--web_admin_port', type=int, default=os.environ.get('WEB_ADMIN_EXT_PORT', 3001), help='Port for Rafiki Web Admin on host')
parser.add_argument('--email', type=str, default=SUPERADMIN_EMAIL, help='Email of user')
parser.add_argument('--password', type=str, default=os.environ.get('SUPERADMIN_PASSWORD'), help='Password of user')
parser.add_argument('--gpus', type=int, default=0, help='How many GPUs to use for training')
parser.add_argument('--hours', type=float, default=0.1, help='How long the train job should run for (in hours)') # 6min
parser.add_argument('--query_path', type=str,
default='examples/data/image_classification/fashion_mnist_test_1.png,examples/data/image_classification/fashion_mnist_test_2.png',
help='Path(s) to query image(s), delimited by commas')
(args, _) = parser.parse_known_args()
out_train_dataset_path = 'data/fashion_mnist_train.zip'
out_val_dataset_path = 'data/fashion_mnist_val.zip'
# Initialize client
client = Client()
client.login(email=args.email, password=args.password)
web_admin_url = 'http://{}:{}'.format(args.host, args.web_admin_port)
print('During training, you can view the status of the train job at {}'.format(web_admin_url))
print('Login with email "{}" and password "{}"'.format(args.email, args.password))
# Run quickstart
quickstart(client, out_train_dataset_path, out_val_dataset_path, args.gpus, args.hours, args.query_path.split(','))
| 43.198675 | 155 | 0.706883 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,569 | 0.393837 |
7ef3adf21ab396cb7c21eb61dae297a4ea72dd49 | 20,228 | py | Python | facetool.py | hay/facetool | 3e296f7b177ebbcceb4b25f12f3327c3f6612f14 | [
"MIT"
] | 29 | 2018-12-10T22:40:07.000Z | 2022-03-30T02:56:28.000Z | facetool.py | hay/facetool | 3e296f7b177ebbcceb4b25f12f3327c3f6612f14 | [
"MIT"
] | 2 | 2020-02-21T09:48:37.000Z | 2021-03-06T22:33:45.000Z | facetool.py | hay/facetool | 3e296f7b177ebbcceb4b25f12f3327c3f6612f14 | [
"MIT"
] | 7 | 2019-08-09T09:19:12.000Z | 2022-03-30T02:56:27.000Z | #!/usr/bin/env python3
from dataknead import Knead
from facetool import config, media, util
from facetool.constants import *
from facetool.path import Path
from facetool.profiler import Profiler
from facetool.errors import ArgumentError
from facetool.util import message, force_mkdir, sample_remove, is_json_path
from random import random
from tqdm import tqdm
import argparse
import logging
import json
import os
import pandas as pd
import pdb
import shutil
import sys
COMMANDS = (
"average",
"classify",
"cluster",
"combineaudio",
"combineframes",
"count",
"distance",
"crop",
"encode",
"extractframes",
"landmarks",
"locate",
"pose",
"probe",
"sample",
"swap",
)
OUTPUT_FORMAT_CHOICES = (
"default",
"csv",
"json"
)
SWAP_METHODS = [
"faceswap",
"faceswap3d"
]
logger = logging.getLogger(__name__)
# Note that we always profile, we just don't print the output if the
# option is not enabled
profiler = Profiler("facetool.py")
def get_parser():
parser = argparse.ArgumentParser(description = "Manipulate faces in videos and images")
# Essentials
parser.add_argument("command", choices = COMMANDS, nargs = "?")
parser.add_argument("-i", "--input", type = str,
required = True,
help = "Input file or folder, 'face' when swapping"
)
parser.add_argument("-o", "--output", type = str,
help = "Output file or folder",
default = None
)
parser.add_argument("-t", "--target", type = str,
help = "'Head' when swapping"
)
# Extra arguments
parser.add_argument("-ai", "--audio-input", type = str,
default = None,
help = "Add a separate audio file with the end result movie"
)
parser.add_argument("--as-percentage", action = "store_true",
help = "Show face distances as percentages"
)
parser.add_argument("-bl", "--blur", type = float,
default = BLUR_AMOUNT,
help = "Amount of blur to use during colour correction"
)
parser.add_argument("-dd", "--data-directory", type = str,
default = DATA_DIRECTORY,
help = "Directory where the data files are located"
)
parser.add_argument("-f", "--force", action = "store_true",
help = "Force commands and ignore warnings, like with sample"
)
parser.add_argument("-fr", "--framerate", type = str,
default = DEFAULT_FRAMERATE
)
parser.add_argument("-fa", "--feather", type = int,
default = FEATHER_AMOUNT,
help = "Softness of edges on a swapped face"
)
parser.add_argument("-if", "--ignore-nofaces", action = "store_true",
default = False,
help = "When having no faces to swap, keep the original input image"
)
parser.add_argument("-ih", "--image-height", type = int,
default = DEFAULT_IMAGE_HEIGHT,
help = "Height of output image / height"
)
parser.add_argument("-iw", "--image-width", type = int,
default = DEFAULT_IMAGE_WIDTH,
help = "Width of output image / video"
)
parser.add_argument("-kt", "--keep-temp", action = "store_true",
help = "Keep temporary files (used with video swapping)"
)
parser.add_argument("-m", "--model", type = str,
help = "Use a precalculated model (for calculating distances)"
)
parser.add_argument("--no-audio", action = "store_true")
parser.add_argument("-nocc", "--no-colour-correct", action = "store_true",
help = "Don't colour correct"
)
parser.add_argument("--no-eyesbrows", action = "store_true")
parser.add_argument("--no-nosemouth", action = "store_true")
parser.add_argument("--no-threading", action = "store_true",
help = "Don't use multithreading"
)
parser.add_argument("--only-mouth", action="store_true")
parser.add_argument("-of", "--output-format",
choices = OUTPUT_FORMAT_CHOICES,
help = "Specify output format"
)
parser.add_argument("-pp", "--predictor-path", type = str,
default = PREDICTOR_PATH
)
parser.add_argument("--profile", action = "store_true",
help = "Show profiler information"
)
parser.add_argument("-q", "--quiet", action = "store_true",
help = "Don't print output to the console"
)
parser.add_argument("-s", "--swap", action = "store_true",
help = "Swap input and target"
)
parser.add_argument("--save-originals", action = "store_true",
help = "Save original images when averaging faces"
)
parser.add_argument("--save-warped", action = "store_true",
help = "Save warped images when averaging faces"
)
parser.add_argument("--swap-method",
choices = SWAP_METHODS,
default = SWAP_METHODS[0],
help = f"Swap method for faceswap (options are: {SWAP_METHODS}"
)
parser.add_argument("-so", "--swap-order", type = str,
help = "Comma-separated list with order of faceswaps on target, implies a multiswap"
)
parser.add_argument("-sp", "--sample-percentage", type = float,
help = "Percentage of files in a directory to randomly remove (used for the sample command)"
)
parser.add_argument("-sr", "--swap-order-repeat", action = "store_true", default = False,
help = "When using --swap-order and there are not enough target faces, repeat the sequence"
)
parser.add_argument("--temp-dir", type = str,
help = "Define the directory where temporary files should be placed"
)
parser.add_argument("-v", "--verbose", action = "store_true",
help = "Show debug information"
)
parser.add_argument("-vv", "--extra-verbose", action = "store_true",
help = "Show debug information AND raise / abort on exceptions"
)
parser.add_argument("--warp-3d", action="store_true",
help = "Swap faces and morph to coordinates of target face"
)
return parser
def main(args):
if args.verbose or args.extra_verbose:
logging.basicConfig(level=logging.DEBUG)
logging.debug(args)
config.PROFILE = args.profile
config.QUIET = args.quiet
config.VERBOSE = args.verbose or args.extra_verbose
# Check for invalid argument combinations
if any([args.output_format == "csv", args.output_format == "json"]) and not args.output:
raise ArgumentError("With CSV as output format, a filename (-o) is required")
# Swap around input and target
if args.swap:
args.input, args.target = args.target, args.input
# Okay, the main stuff, get the command
# Extract all frames from a movie to a set of jpg files
if args.command == "extractframes":
util.mkdir_if_not_exists(args.output)
media.extractframes(args.input, args.output)
# Combine all frames from a set of jpg files to a movie
elif args.command == "combineframes":
media.combineframes(args.input, args.output, framerate = args.framerate)
# Combine audio with an input movie
elif args.command == "combineaudio":
media.combineaudio(args.input, args.audio_input, args.output)
# Randomly remove (sample) a percentage of files from a given directory
elif args.command == "sample":
if not args.sample_percentage:
raise ArgumentError("The sample command needs a sample percentage (-sp)")
sample_remove(args.input, args.sample_percentage, force_delete = args.force)
# Show metadata on a media file
elif args.command == "probe":
try:
data = media.probe(args.input)
except:
raise ArgumentError(f"Could not probe '{args.input}', probably not a video/image file")
else:
jsondata = json.dumps(data, indent = 4)
message(jsondata)
elif args.command == "landmarks":
from facetool.landmarks import Landmarks
landmarks = Landmarks(predictor_path = args.predictor_path)
save_data = args.output_format and args.output_format != "default"
if save_data:
data = []
# Check if we *could* have an output directory, and if so,
# create it
if args.output and Path(args.output).could_be_dir():
Path(args.output).mkdir_if_not_exists()
for pathobj in Path(args.input).images():
path = str(pathobj)
logging.debug(f"Processing {path}")
logging.debug(f"Getting landmarks of {path}")
if not args.output:
outpath = None
else:
out = Path(args.output)
if out.is_dir():
outpath = f"{out}/{Path(path).name}"
else:
outpath = str(out)
marks = landmarks.get_landmarks(str(path), outpath = outpath)
if marks and save_data:
points = [str(path)]
[points.extend([m.x, m.y]) for m in marks]
data.append(points)
message(path, marks)
if save_data:
df = pd.DataFrame(data)
if args.output_format == "csv":
df.to_csv(args.output)
elif args.output_format == "json":
df.to_json(args.output)
else:
raise ArgumentError(f"Invalid output format: {args.output_format}")
elif args.command == "pose":
from facetool.poser import Poser
poser = Poser(predictor_path = args.predictor_path)
# Check if we *could* have an output directory, and if so,
# create it
if args.output and Path(args.output).could_be_dir():
Path(args.output).mkdir_if_not_exists()
for pathobj in Path(args.input).images():
path = str(pathobj)
logging.debug(f"Processing {path}")
if not args.output:
outpath = None
else:
out = Path(args.output)
if out.is_dir():
outpath = f"{out}/{Path(path).name}"
else:
outpath = str(out)
poses = poser.get_poses(path, outpath = outpath)
message(f"{path}: {poses}")
elif args.command == "count":
from facetool.detect import Detect
detect = Detect()
if args.output_format == "csv":
csv = []
for path in Path(args.input).images():
count = detect.count(path)
message(f"Number of faces in '{path}': {count}")
if args.output_format == "csv":
csv.append({
"path" : path,
"count" : count
})
if args.output_format == "csv":
df = pd.DataFrame(csv)
df.to_csv(args.output)
elif args.command == "locate":
from facetool.detect import Detect
detect = Detect()
for path in Path(args.input).images():
to_directory = os.path.isdir(args.input)
locations = detect.locate(path, args.output, to_directory = to_directory)
message(f"Face locations in '{args.input}': {locations}")
elif args.command == "crop":
from facetool.detect import Detect
from facetool.media import extractframes
# We can't crop to an image path, because an input image might
# have multiple faces, so throw an error in that case
if Path(args.output).is_image():
raise ArgumentError(f"Can't crop with an image as output")
detect = Detect()
# FIXME: we need some general mechanism for juggling frames around
TMP_DIR = "crop-tmp"
IS_VIDEO = Path(args.input).is_video()
logging.debug(f"Cropping. Input is video? {IS_VIDEO}")
if IS_VIDEO:
force_mkdir(TMP_DIR)
extractframes(args.input, TMP_DIR)
images = Path(TMP_DIR).images()
else:
images = Path(args.input).images()
for path in images:
logging.debug(f"Cropping <{path}>")
detect.crop(str(path), args.output)
if IS_VIDEO:
shutil.rmtree(TMP_DIR)
elif args.command == "classify":
from facetool.classifier import Classifier
classifier = Classifier(
data_directory = args.data_directory,
output_format = args.output_format,
predictor_path = args.predictor_path
)
for path in Path(args.input).images():
logging.debug(f"Classifying <{path}>")
classifier.classify(str(path))
if args.output_format == "csv":
classifier.to_csv(args.output)
elif args.command == "average":
from facetool.averager import Averager
profiler.tick("start averaging")
averager = Averager(
predictor_path = args.predictor_path,
img_height = args.image_height,
img_width = args.image_width,
save_originals = args.save_originals,
save_warped = args.save_warped
)
TMP_DIR = "average-tmp"
path = Path(args.input)
# If this is a video, extract all images and average those
if path.is_file() and path.is_video():
# First create a temporary directory to hold all frames
util.mkdir_if_not_exists(TMP_DIR)
media.extractframes(args.input, TMP_DIR)
# Now average
averager.average(TMP_DIR, args.output)
# And remove the temporary directory
logging.debug(f"Removing {TMP_DIR}")
shutil.rmtree(TMP_DIR)
# Not a video, so if it's a file it's probably an image
# extract all faces and average those
elif path.is_file():
# First create a temporary directory
util.mkdir_if_not_exists(TMP_DIR)
# Now extract all the images to said directory
from facetool.detect import Detect
detect = Detect()
logging.debug(f"Cropping <{args.input}> to {TMP_DIR}")
detect.crop(str(args.input), TMP_DIR)
# Average the stuff
averager.average(TMP_DIR, args.output)
# And remove the temporary directory
logging.debug(f"Removing {TMP_DIR}")
shutil.rmtree(TMP_DIR)
elif path.is_dir():
# Just a directory, use this
averager.average(args.input, args.output)
else:
raise ArgumentError("Invalid input for averaging")
profiler.tick("done averaging")
elif args.command == "distance":
from facetool.recognizer import Recognizer
if not all([args.input, any([args.target, args.model])]):
raise ArgumentError("For the recognizer you need an input and target/model")
logging.debug(f"Trying to recognize {args.input} in {args.target}{args.model}")
recognizer = Recognizer()
results = recognizer.recognize(
input_path = args.input,
model_path = args.model,
target_path = args.target,
as_percentage = args.as_percentage
)
if args.output_format == "csv":
pd.Series(results).to_csv(args.output, header = False)
elif args.output_format == "json":
pd.Series(results).to_json(args.output)
else:
message(f"{args.input} distance to {args.target}")
for path, distance in results.items():
message(f"{path}: {distance}")
elif args.command == "encode":
from facetool.recognizer import Recognizer
if not all([args.input, args.output]):
raise ArgumentError("For encoding faces you need both input and output")
recognizer = Recognizer()
encodings = recognizer.encode_path(args.input)
with open(args.output, "w") as f:
f.write(encodings)
message(f"Written encodings of {args.input} to {args.output}")
elif args.command == "cluster":
from facetool.clusterer import Clusterer
# A .json file with encodings is also valid, if that is give, use that
# instead
if is_json_path(args.input):
encodings = Knead(args.input).data()["encodings"]
else:
from facetool.recognizer import Recognizer
recognizer = Recognizer()
encodings = recognizer.encode_path(args.input, return_type = "dict")
encodings = encodings["encodings"]
clusterer = Clusterer()
output = clusterer.cluster_encodings(encodings)
if args.output:
if is_json_path(args.output):
Knead(output).write(args.output)
else:
force_mkdir(args.output)
clusterer.move_files(output, args.output)
else:
# Just print the output
Knead(output).print()
elif args.command == "swap":
from facetool.swapper import Swapper
profiler.tick("start swapping")
# First check if all arguments are given
arguments = [args.input, args.target]
if not all(arguments + [args.output]):
raise ArgumentError("Input, target and output are required for swapping")
# And if these things are paths or files
if not all([os.path.exists(a) for a in arguments]):
raise ArgumentError("Input and target should be valid files or directories")
pbar = tqdm()
def update_pbar():
pbar.total = swapper.filecount
pbar.update()
if args.verbose:
pbar.write(swapper.last_message)
# That is out of the way, set up the swapper
swapper = Swapper(
predictor_path = args.predictor_path,
feather = args.feather,
blur = args.blur,
keep_temp = args.keep_temp,
swap_audio = not args.no_audio,
overlay_eyesbrows = not args.no_eyesbrows,
overlay_nosemouth = not args.no_nosemouth,
only_mouth = args.only_mouth,
reporthook = update_pbar,
swap_method = args.swap_method,
warp_3d = args.warp_3d,
swap_order = args.swap_order,
swap_order_repeat = args.swap_order_repeat,
ignore_nofaces = args.ignore_nofaces,
concurrent = not args.no_threading,
colour_correct = not args.no_colour_correct,
temp_dir = args.temp_dir
)
# Directory of faces to directory of heads
if Path(args.input).is_dir() and Path(args.target).is_dir():
swapper.swap_directory_to_directory(args.input, args.target, args.output)
# Face to directory of heads
elif media.is_image(args.input) and Path(args.target).is_dir():
swapper.swap_image_to_directory(args.input, args.target, args.output)
# Directory of faces to head
elif Path(args.input).is_dir() and media.is_image(args.target):
swapper.swap_directory_to_image(args.input, args.target, args.output)
# Face in image to video
elif media.is_video(args.target) and media.is_image(args.input):
swapper.swap_image_to_video(args.target, args.input, args.output)
# Face of video to head in other video
elif media.is_video(args.target) and media.is_video(args.input):
swapper.swap_video_to_video(args.target, args.input, args.output)
# Image to image
elif media.is_image(args.target) and media.is_image(args.input):
swapper.swap_image_to_image(args.target, args.input, args.output)
# I don't even know if there is an option that isn't in the list above,
# but if it isn't, you'll get this
else:
raise ArgumentError("Invalid swap options")
pbar.close()
profiler.tick("done swapping")
else:
# No arguments, just display help
parser.print_help()
if __name__ == "__main__":
parser = get_parser()
args = parser.parse_args()
try:
main(args)
except IsADirectoryError as e:
print(f"Can't use a directory as an argument: {e}")
if config.PROFILE:
profiler.dump_events() | 33.939597 | 100 | 0.606041 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 5,689 | 0.281244 |
7ef3b3d381094bf85b4cc0de564bd90a29ffa484 | 26,538 | py | Python | webapp/views.py | manas11/foodex | d78f13e49e6ee51083eb4e91d0a7237d7960c276 | [
"MIT"
] | 1 | 2022-02-04T08:47:40.000Z | 2022-02-04T08:47:40.000Z | webapp/views.py | manas11/foodex | d78f13e49e6ee51083eb4e91d0a7237d7960c276 | [
"MIT"
] | null | null | null | webapp/views.py | manas11/foodex | d78f13e49e6ee51083eb4e91d0a7237d7960c276 | [
"MIT"
] | 3 | 2020-07-14T18:41:50.000Z | 2022-01-27T17:52:25.000Z | from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.http import JsonResponse
from django.shortcuts import render, redirect, get_object_or_404
from _datetime import datetime
# from django.http import HttpResponse, HttpResponseNotAllowed
from django.contrib.auth import authenticate, login, logout
# from xdg import Menu
from webapp.models import Location, RestaurantOwner, Restaurant, FoodRestaurant, FoodItem, ItemType, User, Order, \
Customer, OrderDetail, Offer, Payment, Favourite
from .forms import CustomerRegisterForm, CustomerRegisterProfileForm, RestaurantRegisterForm, \
RestaurantRegisterProfileForm, RestaurantDetailForm
# from django.contrib.auth.decorators import login_required
from collections import Counter
# from django.urls import reverse
# from django.db.models import Q
#
#
# # from .models import Customer, Restaurant, Item, Menu, Order, orderItem, User
#
#
# #### ---------- General Side -------------------#####
#
# Showing index page
def index(request):
return render(request, 'webapp/index.html', {})
def logout_view(request):
logout(request)
return redirect("index")
def customer_register(request):
form = CustomerRegisterForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
email = form.cleaned_data['username']
password = form.cleaned_data['password']
user.is_customer = True
user.set_password(password)
user.save()
user = authenticate(email=email, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('customer_profile_register')
context = {
'form': form
}
return render(request, 'webapp/customer_register.html', context)
def customer_profile_register(request):
form = CustomerRegisterProfileForm(request.POST or None)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
# print(instance)
instance.location_id = 1
instance.save()
return redirect("index")
loc = Location.objects.all()
locations = []
for x in loc:
lps = [x.LocationId, x.LocationName]
locations.append(lps)
context = {
'locations': locations,
'form': form,
'title': "Complete Your profile"
}
return render(request, 'webapp/customer_profile_register.html', context)
def customer_login(request):
if request.method == "POST":
email = request.POST['username']
password = request.POST['pass']
user = authenticate(username=email, password=password)
print(user)
if user is not None:
login(request, user)
return redirect('index')
else:
return render(request, 'webapp/customer_login.html', {'error_message': 'Your account disable'})
else:
return render(request, 'webapp/customer_login.html', {'error_message': 'Your account disable'})
def restaurant_register(request):
form = RestaurantRegisterForm(request.POST or None)
if form.is_valid():
user = form.save(commit=False)
username = form.cleaned_data['username']
password = form.cleaned_data['password']
user.is_restaurant_owner = True
user.set_password(password)
user.save()
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
return redirect('restaurant_profile_register')
context = {
'form': form
}
return render(request, 'webapp/restaurant_register.html', context)
def restaurant_profile_register(request):
form = RestaurantRegisterProfileForm(request.POST or None)
if form.is_valid():
instance = form.save(commit=False)
instance.user = request.user
# print(instance)
instance.location_id = 1
instance.save()
return redirect("restaurant_detail")
loc = Location.objects.all()
locations = []
for x in loc:
lps = [x.LocationId, x.LocationName]
locations.append(lps)
context = {
'locations': locations,
'form': form,
'title': "Complete Your profile"
}
return render(request, 'webapp/restaurant_profile_register.html', context)
def restaurant_login(request):
if request.method == "POST":
email = request.POST['email']
password = request.POST['pass']
user = authenticate(email=email, password=password)
print(user)
if user is not None:
login(request, user)
return redirect('rest_index')
else:
return render(request, 'webapp/restaurant_login.html', {'error_message': 'Your account disable'})
else:
return render(request, 'webapp/restaurant_login.html', {'error_message': 'Your account disable'})
def restaurant_detail(request):
form = RestaurantDetailForm(request.POST or None, request.FILES or None)
print("qe")
if form.is_valid():
instance = form.save(commit=False)
print("e")
restaurantowner = RestaurantOwner.objects.get(user_id=request.user.id)
instance.owner = restaurantowner
# print(restaurantowner)
instance.location_id = 1
instance.offer_id = 1
instance.cuisine_id = 1
instance.save()
return redirect("index")
context = {
'form': form,
'title': "Complete Your profile"
}
return render(request, 'webapp/restaurant_detail.html', context)
# def ajax_is_favorite(request):
# if not request.is_ajax() or not request.method == 'POST':
# return HttpResponseNotAllowed(['POST'])
# else:
# # Here you have to get the data and update the object
# # update favourite table
# fav = Favourite()
# # fav.restaurant_id = request.
# fav.user_id = request.user.id
#
# return HttpResponse({"success": True})
# def favorite_ajax(request):
# data = {'success': False}
# print("a1")
# if request.method == 'POST':
# print("a2")
# user_id = request.POST.get('user_id')
# rest_id = request.POST.get('rest_id')
# fav = Favourite()
# fav.restaurant_id = rest_id
# fav.user_id = user_id
# fav.save()
# data['success'] = True
# return JsonResponse(data)
def rest_index(request):
return render(request, 'webapp/rest_index.html', {})
def restaurant_index(request):
if request.POST:
rest_id = request.POST['rest_id']
user_id = request.POST['user_id']
customer = Customer.objects.get(user_id=user_id)
# Customer.objects.get(user_id)
fav = Favourite()
fav.user_id = int(customer.id)
fav.restaurant_id = int(rest_id)
fav.save()
# try:
# offer = Offer.objects.get(offer_id=offerid)
# ownner = RestaurantOwner.objects.get(user_id=request.user.id)
# rest = Restaurant.objects.get(owner=ownner)
# rest.offer = offer
# rest.save()
# except Offer.DoesNotExist:
# print("i23")
# select = request.POST['orderstatus']
# print("manas")
# print(oid)
# select = int(
if request.user.is_authenticated:
customer = Customer.objects.get(user=request.user)
r_object = Restaurant.objects.filter(location=customer.location)
user_id = customer.user_id
temp = 1
location = customer.location.LocationName
fav = Favourite.objects.filter(user_id=customer.id)
else:
r_object = None
user_id = None
temp = 1
location = None
fav = None
# query = request.GET.get('q')
# if query:
# r_object = Restaurant.objects.filter(Q(location_id__iins=query)).distinct()
context = {
'r_object': r_object,
'location': location,
'user_id': user_id,
'temp': temp,
'fav': fav,
}
return render(request, 'webapp/restaurant_index.html', context)
def restaurantProfile(request, pk=None):
if pk:
user = User.objects.get(pk=pk)
else:
user = request.user
restaurant_owner = RestaurantOwner.objects.get(user_id=request.user.id)
restaurant = Restaurant.objects.get(owner_id=restaurant_owner)
Context = {
'user': user,
'restaurant': restaurant,
'restaurant_owner': restaurant_owner,
}
return render(request, 'webapp/rest_profile.html', Context)
def orderplaced(request):
return render(request, 'webapp/orderplaced.html', {})
# #
# #
# # # Showing Restaurants list to Customer
# # def restuarent(request):
# # r_object = Restaurant.objects.all()
# # query = request.GET.get('q')
# # if query:
# # r_object = Restaurant.objects.filter(Q(rname__iins=query)).distinct()
# # return render(request, 'webapp/restaurents.html', {'r_object': r_object})
# # return render(request, 'webapp/restaurents.html', {'r_object': r_object})
# #
# #
# logout
# # # customer profile view
# # def customerProfile(request, pk=None):
# # if pk:
# # user = User.objects.get(pk=pk)
# # else:
# # user = request.user
# #
# # return render(request, 'webapp/profile.html', {'user': user})
# #
# #
#
# #
# # # Update customer detail
# # def updateCustomer(request, id):
# # form = CustomerForm(request.POST or None, instance=request.user.customer)
# # if form.is_valid():
# # form.save()
# # return redirect('profile')
# # context = {
# # 'form': form,
# # 'title': "Update Your profile"
# # }
# # return render(request, 'webapp/customer_profile_register.html', context)
# #
# #
def restuarantMenu(request, pk=None):
menu = FoodRestaurant.objects.filter(restaurant_id=pk)
rest = Restaurant.objects.filter(restaurant_id=pk)
items = []
for i in menu:
item = FoodItem.objects.filter(food_item_id=i.food_item_id)
for content in item:
ml = ItemType.objects.get(type_id=content.type_id)
temp = [content.name, content.is_veg, ml.name, i.cost, i.food_item_id]
items.append(temp)
context = {
'items': items,
'r_id': pk,
'r_name': rest[0].name,
'r_ex': rest[0].is_exclusive,
'r_cost': rest[0].avg_cost,
'r_time': rest[0].avg_time,
'r_phone': rest[0].phone,
'r_logo': rest[0].r_logo,
'r_cuisine': rest[0].cuisine.cuisine_name,
'r_add': rest[0].address,
# 'rlocation': rest[0].location,
}
return render(request, 'webapp/menu.html', context)
# #
# #
@login_required(login_url='/login/user/')
def checkout(request):
if request.POST:
ptype = request.POST['submit']
ordid = request.POST['oid']
order = Order.objects.get(order_id=ordid)
order.status = Order.ORDER_STATE_PLACED
order.payment_hash_id = 1
order.instructions = request.POST['instruct']
order.save()
if ptype == "Pay Later":
order.payment_mode_online = False
order.save()
return render(request, 'webapp/orderplaced.html', {})
else:
payment = Payment()
payment.amount = request.POST['total_price']
payment.save()
order.payment_hash_id = payment.hash
order.save()
return render(request, 'webapp/online_pay.html', {})
else:
cart = request.COOKIES['cart'].split(",")
cart = dict(Counter(cart))
items = []
totalprice = 0
order = Order()
# order.save()
print(order.order_id)
order.tax = 0.05 * totalprice
order.user = Customer.objects.get(user_id=request.user.id)
order.datetime = datetime.now()
# order.offer = Offer.objects.get(offer_id=1)
for x, y in cart.items():
it = FoodItem.objects.get(food_item_id=int(x))
print(it.name)
item_rest = FoodRestaurant.objects.get(food_item_id=it.food_item_id)
order.restaurant = Restaurant.objects.get(restaurant_id=item_rest.restaurant.restaurant_id)
yu = Offer.objects.get(
offer_id=Restaurant.objects.get(restaurant_id=item_rest.restaurant.restaurant_id).offer_id)
print(yu.discount)
order.offer_id = yu.offer_id
order.payment_hash_id = 1
order.save()
for x, y in cart.items():
item = []
it = FoodItem.objects.get(food_item_id=int(x))
print(it.name)
item_rest = FoodRestaurant.objects.get(food_item_id=it.food_item_id)
print(order.order_id)
order_detail = OrderDetail()
order_detail.food_item_id = it.food_item_id
order_detail.order_id = order.order_id
order_detail.quantity = int(y)
order_detail.save()
item.append(it.name)
item.append(y)
totalprice += item_rest.cost * y
item.append(item_rest.cost * y)
items.append(item)
order.tax = int(0.05 * totalprice)
withouttax = totalprice
totalprice += order.tax
totalprice -= int(yu.discount)
if totalprice < order.tax:
totalprice = order.tax
order.save()
context = {
"items": items,
"yu": yu,
"totalprice": totalprice,
"withouttax": withouttax,
"order": order,
"oid": order.order_id
}
return render(request, 'webapp/order.html', context)
def pay(request):
if request.POST:
# return redirect('/orderplaced/')
return render(request, 'webapp/orderplaced.html', {})
# #
# #
# # ####### ------------------- Restaurant Side ------------------- #####
# #
# # # creating restuarant account
# # def restRegister(request):
# # form = RestuarantSignUpForm(request.POST or None)
# # if form.is_valid():
# # user = form.save(commit=False)
# # username = form.cleaned_data['username']
# # password = form.cleaned_data['password']
# # user.is_restaurant = True
# # user.set_password(password)
# # user.save()
# # user = authenticate(username=username, password=password)
# # if user is not None:
# # if user.is_active:
# # login(request, user)
# # return redirect("rcreate")
# # context = {
# # 'form': form
# # }
# # return render(request, 'webapp/restsignup.html', context)
# #
# #
# # # restuarant login
# # def restLogin(request):
# # if request.method == "POST":
# # username = request.POST['username']
# # password = request.POST['password']
# # user = authenticate(username=username, password=password)
# # if user is not None:
# # if user.is_active:
# # login(request, user)
# # return redirect("rprofile")
# # else:
# # return render(request, 'webapp/restlogin.html', {'error_message': 'Your account disable'})
# # else:
# # return render(request, 'webapp/restlogin.html', {'error_message': 'Invalid Login'})
# # return render(request, 'webapp/restlogin.html')
# #
# #
# # # restaurant profile view
# # def restaurantProfile(request, pk=None):
# # if pk:
# # user = User.objects.get(pk=pk)
# # else:
# # user = request.user
# #
# # return render(request, 'webapp/rest_profile.html', {'user': user})
# #
# #
# # # create restaurant detail
# # @login_required(login_url='/login/restaurant/')
# # def createRestaurant(request):
# # form = RestuarantForm(request.POST or None, request.FILES or None)
# # if form.is_valid():
# # instance = form.save(commit=False)
# # instance.user = request.user
# # instance.save()
# # return redirect("rprofile")
# # context = {
# # 'form': form,
# # 'title': "Complete Your Restaurant profile"
# # }
# # return render(request, 'webapp/rest_profile_form.html', context)
# #
# #
# # # Update restaurant detail
# # @login_required(login_url='/login/restaurant/')
# # def updateRestaurant(request, id):
# # form = RestuarantForm(request.POST or None, request.FILES or None, instance=request.user.restaurant)
# # if form.is_valid():
# # form.save()
# # return redirect('rprofile')
# # context = {
# # 'form': form,
# # 'title': "Update Your Restaurant profile"
# # }
# # return render(request, 'webapp/rest_profile_form.html', context)
# #
# #
# add menu item for restaurant
@login_required(login_url='/login/restaurant/')
def menu_manipulation(request):
if not request.user.is_authenticated:
return redirect("rlogin")
rest = Restaurant.objects.get(owner=RestaurantOwner.objects.get(user_id=request.user.id))
if request.POST:
print("8")
rtype = request.POST['submit']
print(rtype)
if rtype == "Modify":
print("23")
foodid = int(request.POST['fooditemid'])
food = FoodRestaurant.objects.get(food_item_id=foodid)
food.cost = int(request.POST['cost'])
foodItem = FoodItem.objects.get(food_item_id=foodid)
foodItem.name = request.POST['name']
is_veg = request.POST['is_veg']
print(is_veg)
if is_veg:
foodItem.is_veg = True
else:
foodItem.is_veg = False
ittype = ItemType.objects.get(type_id=request.POST['type'])
foodItem.type = ittype
foodItem.save()
food.save()
elif rtype == "Add":
print("13")
foodrest = FoodRestaurant()
name = request.POST['name']
try:
item = FoodItem.objects.get(name=name)
except FoodItem.DoesNotExist:
item = None
if item is not None:
print("6")
foodrest.food_item_id = item.food_item_id
else:
print("2")
fooditem = FoodItem()
fooditem.name = name
is_veg = request.POST['is_veg']
if int(is_veg) == 1:
print("3")
fooditem.is_veg = True
else:
print("4")
fooditem.is_veg = False
fooditem.type_id = int(request.POST['type_id'])
fooditem.save()
foodrest.food_item_id = fooditem.food_item_id
print("5")
print("7")
foodrest.restaurant_id = rest.restaurant_id
foodrest.cost = request.POST['cost']
foodrest.save()
elif rtype == "Select":
offerid = int(request.POST['offerid'])
try:
offer = Offer.objects.get(offer_id=offerid)
ownner = RestaurantOwner.objects.get(user_id=request.user.id)
rest = Restaurant.objects.get(owner=ownner)
rest.offer = offer
rest.save()
except Offer.DoesNotExist:
print("i23")
else:
foodid = int(request.POST['fooditemid'])
try:
food = FoodRestaurant.objects.get(food_item_id=foodid)
food.delete()
except FoodRestaurant.DoesNotExist:
print("d")
food = FoodRestaurant.objects.filter(restaurant=rest)
menu = []
for x in food:
y = FoodItem.objects.get(food_item_id=x.food_item_id)
cmenu = []
cmenu.append(x.food_item_id)
cmenu.append(y.name)
cmenu.append(x.cost)
cmenu.append(x.restaurant)
cmenu.append(y.is_veg)
print("yello")
print(y.type)
itype = ItemType.objects.get(type_id=y.type.type_id)
cmenu.append(itype.name)
cmenu.append(itype.type_id)
if y.is_veg == 1:
cmenu.append("veg")
else:
cmenu.append("non veg")
menu.append(cmenu)
offers = Offer.objects.all()
appliedoffer = Offer.objects.get(offer_id=rest.offer_id)
i1 = ItemType.objects.all()
itemtypes = []
vegarray = [[0, "non veg"], [1, "veg"]]
for x in i1:
itemtypes.append([x.type_id, x.name])
context = {
"menu": menu,
"user": request.user,
"itemtypes": itemtypes,
"vegarray": vegarray,
"offer": offers,
"applied": appliedoffer
}
return render(request, 'webapp/menu_modify.html', context)
def orderlist(request):
if request.POST:
oid = request.POST['orderid']
select = request.POST['orderstatus']
print("manas")
print(oid)
select = int(select)
print(select)
try:
order = Order.objects.get(order_id=oid)
except Order.DoesNotExist:
order = None
# print(order.restaurant.name)
if order is not None:
# x = Order.ORDER_STATE_WAITING
if select == 1:
x = Order.ORDER_STATE_PLACED
elif select == 2:
x = Order.ORDER_STATE_ACKNOWLEDGED
elif select == 3:
x = Order.ORDER_STATE_COMPLETED
elif select == 4:
x = Order.ORDER_STATE_DISPATCHED
elif select == 5:
x = Order.ORDER_STATE_CANCELLED
else:
x = 1
order.status = x
print("ml")
order.save()
ownner = RestaurantOwner.objects.get(user_id=request.user.id)
restaurant = Restaurant.objects.get(owner=ownner)
orders = Order.objects.filter(restaurant=restaurant).order_by('-datetime')
print("hi")
# print(orders[0].instructions)
corders = []
for order in orders:
# user = User.objects.get(id=order.user_id)
corder = []
customer = Customer.objects.get(id=order.user_id)
# print('cust')
# print(customer.f_name)
corder.append(customer.f_name + ' ' + customer.l_name)
corder.append(customer.phone)
items_list = OrderDetail.objects.filter(order_id=order.order_id)
print("item")
# print(items_list[0].)
items = []
without_tax = 0
for item in items_list:
citem = []
item_name = FoodItem.objects.get(food_item_id=item.food_item_id)
citem.append(item_name.name)
citem.append(item.quantity)
fooditem = FoodRestaurant.objects.get(food_item_id=item.food_item_id)
print("ok")
print(fooditem.cost)
without_tax += fooditem.cost * item.quantity
citem.append(fooditem.cost * item.quantity)
items.append(citem)
corder.append(items)
yu = Offer.objects.get(
offer_id=order.offer_id)
if (int(without_tax) + int(order.tax) - int(yu.discount)) < int(order.tax):
corder.append(int(order.tax))
else:
corder.append(int(without_tax) + int(order.tax) - int(yu.discount))
# corder.append(without_tax + order.tax)
corder.append(without_tax)
corder.append(order.tax)
corder.append(order.instructions)
corder.append(order.order_id)
x = order.status
if x == Order.ORDER_STATE_PLACED:
x = 1
elif x == Order.ORDER_STATE_ACKNOWLEDGED:
x = 2
elif x == Order.ORDER_STATE_COMPLETED:
x = 3
elif x == Order.ORDER_STATE_DISPATCHED:
x = 4
elif x == Order.ORDER_STATE_CANCELLED:
x = 5
else:
continue
# x = 1
print('i am here')
corder.append(x)
corder.append(order.review)
corder.append(yu.discount)
corders.append(corder)
context = {
"orders": corders,
}
return render(request, "webapp/order-list.html", context)
def myorders(request):
if request.POST:
oid = request.POST['orderid']
review = request.POST.get('review', '')
print(review)
print('review')
rate = request.POST.get('rating', 4)
try:
order = Order.objects.get(order_id=oid)
except Order.DoesNotExist:
order = None
print('order')
if order is not None:
order.review = review
order.rating = int(rate)
order.save()
customer = Customer.objects.get(user_id=request.user.id)
orders = Order.objects.filter(user_id=customer.id).order_by('-datetime')
corders = []
for order in orders:
corder = []
rest = Restaurant.objects.get(restaurant_id=order.restaurant_id)
corder.append(rest.name)
corder.append(customer.phone)
items_list = OrderDetail.objects.filter(order_id=order.order_id)
items = []
without_tax = 0
for item in items_list:
citem = []
item_name = FoodItem.objects.get(food_item_id=item.food_item_id)
citem.append(item_name.name)
citem.append(item.quantity)
fooditem = FoodRestaurant.objects.get(food_item_id=item.food_item_id)
without_tax += fooditem.cost * item.quantity
citem.append(fooditem.cost * item.quantity)
items.append(citem)
corder.append(items)
yu = Offer.objects.get(
offer_id=order.offer_id)
if (int(without_tax) + int(order.tax) - int(yu.discount)) < int(order.tax):
corder.append(int(order.tax))
else:
corder.append(int(without_tax) + int(order.tax) - int(yu.discount))
corder.append(without_tax)
corder.append(order.tax)
corder.append(order.instructions)
corder.append(order.order_id)
corder.append(order.rating)
corder.append(order.review)
x = order.status
corder.append(x)
corder.append(yu.discount)
corders.append(corder)
context = {
"orders": corders,
}
return render(request, "webapp/my_order.html", context)
| 32.601966 | 115 | 0.585915 | 0 | 0 | 0 | 0 | 6,968 | 0.262567 | 0 | 0 | 7,901 | 0.297724 |
7ef4e804662096ec1a9ee780599e15a0cae458b8 | 3,106 | py | Python | src/view/services_read_page.py | nbilbo/services_manager | 74e0471a1101305303a96d39963cc98fc0645a64 | [
"MIT"
] | null | null | null | src/view/services_read_page.py | nbilbo/services_manager | 74e0471a1101305303a96d39963cc98fc0645a64 | [
"MIT"
] | null | null | null | src/view/services_read_page.py | nbilbo/services_manager | 74e0471a1101305303a96d39963cc98fc0645a64 | [
"MIT"
] | null | null | null | """Frame to show all service\'s register\'s.
"""
import tkinter.ttk
from src.view import constants
from src.view.services_page import ServicesPage
class ServicesReadPage(ServicesPage):
def __init__(self, parent, controller, *args, **kwargs):
super(ServicesReadPage, self).__init__(parent, *args, **kwargs)
self.handler = Handler(self, controller)
self.create_treeview()
self.create_crud_buttons()
self.create_binds()
self.set_title("Services")
def create_treeview(self):
"""Create treeview to show data.
"""
self.treeview = tkinter.ttk.Treeview(self)
self.treeview.pack(side="top", fill="both", expand=True, padx=constants.PADX, pady=constants.PADY)
def create_crud_buttons(self):
"""Create crud buttons.
"""
container = tkinter.ttk.Frame(self)
container.pack(side="top", fill="both")
self.add_button = tkinter.ttk.Button(
container,
text="Add")
self.update_button = tkinter.ttk.Button(
container,
text="update")
self.delete_button = tkinter.ttk.Button(
container,
text="delete")
for button in (
self.add_button,
self.update_button,
self.delete_button):
button.pack(
side="left",
fill="both",
expand=True,
padx=constants.PADX,
pady=constants.PADY)
def create_binds(self):
"""Connect events and handler.
"""
self.back_button["command"] = self.handler.inicialize_home_page
self.add_button["command"] = self.handler.inicialize_services_add_page
self.delete_button["command"] = self.handler.inicialize_services_delete_page
self.update_button["command"] = self.handler.inicialize_services_update_page
def get_add_button(self):
"""
return
tkinter.ttk.Button
"""
return self.add_button
def get_update_button(self):
"""
return
tkinter.ttk.Button
"""
return self.update_button
def get_delete_button(self):
"""
return
tkinter.ttk.Button
"""
return self.delete_button
def get_treeview(self):
"""
return
tkinter.ttk.Treeview
"""
return self.treeview
class Handler(object):
def __init__(self, widget, controller):
super(Handler).__init__()
self.widget = widget
self.controller = controller
def inicialize_home_page(self):
self.controller.inicialize_home_page()
def inicialize_services_add_page(self):
self.controller.inicialize_services_add_page()
def inicialize_services_delete_page(self):
self.controller.inicialize_services_delete_page()
def inicialize_services_update_page(self):
self.controller.inicialize_services_update_page()
| 28.236364 | 106 | 0.59369 | 2,950 | 0.949775 | 0 | 0 | 0 | 0 | 0 | 0 | 516 | 0.16613 |
7ef51c0080ffee9c24edcdbb55e7295f1c8931e0 | 6,649 | py | Python | datamodules/dataset.py | ayhokuyan/CartooNet | 61f0ed752a52a9667bc0dd4f8eff2ba708765594 | [
"MIT"
] | 2 | 2021-04-25T19:04:38.000Z | 2021-04-26T01:13:15.000Z | datamodules/dataset.py | ofirkris/CartooNet | 3bda4a4a57148fc1ee9edaccbae25e921132c2ce | [
"MIT"
] | 2 | 2021-01-09T20:43:45.000Z | 2021-10-12T16:23:19.000Z | datamodules/dataset.py | ofirkris/CartooNet | 3bda4a4a57148fc1ee9edaccbae25e921132c2ce | [
"MIT"
] | 3 | 2021-01-07T10:35:47.000Z | 2021-12-12T03:45:58.000Z | from torchvision.datasets import VisionDataset
from datamodules.dsfunction import imread
from torch.utils.data import Dataset, RandomSampler, Sampler, DataLoader, TensorDataset, random_split, ConcatDataset
import os
import glob
from typing import List, Sequence, Tuple
from itertools import cycle, islice
import torch
from math import ceil
class DataFolder(VisionDataset):
def __init__(self, root, loader: callable, pattern: str, transforms=None, transform=None, target_transform=None):
super().__init__(root, transforms, transform, target_transform)
self.loader = loader
self.samples = glob.glob(os.path.join(root, pattern))
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, index: int):
path = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(**sample)
return sample
def size(self, idx):
return len(self.samples)
class ImageFolder(VisionDataset):
def __init__(self, root, transforms=None, transform=None, target_transform=None):
super().__init__(root, transforms, transform, target_transform)
self.loader = imread
self.samples = os.listdir(root)
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, index: int):
path = self.samples[index]
sample = self.loader(self.root + '/' + path)
if self.transform is not None:
sample = self.transform(sample)
return sample
def size(self, idx):
return len(self.samples)
class ImagePaths(VisionDataset):
def __init__(self, paths=List[str], transforms=None, transform=None, target_transform=None):
super().__init__('.', transforms, transform, target_transform)
self.loader = imread
self.samples = paths
def __len__(self) -> int:
return len(self.samples)
def __getitem__(self, index: int):
path = self.samples[index]
sample = self.loader(path)
if self.transform is not None:
sample = self.transform(sample)
return sample
def size(self, idx):
return len(self.samples)
class MergeDataset(Dataset):
def __init__(self, *tensors):
"""Merge two dataset to one Dataset
"""
self.tensors = tensors
self.sizes = [len(tensor) for tensor in tensors]
def __getitem__(self, indexs: List[int]):
return tuple(tensor[idx] for idx, tensor in zip(indexs, self.tensors))
def __len__(self):
return max(self.sizes)
class MultiRandomSampler(RandomSampler):
def __init__(self, data_source: MergeDataset, replacement=True, num_samples=None, generator=None):
""" a Random Sampler for MergeDataset. NOTE will padding all dataset to same length
Args:
data_source (MergeDataset): MergeDataset object
replacement (bool, optional): shuffle index use replacement. Defaults to True.
num_samples ([type], optional): Defaults to None.
generator ([type], optional): Defaults to None.
"""
self.data_source: MergeDataset = data_source
self.replacement = replacement
self._num_samples = num_samples
self.generator = generator
self.maxn = len(self.data_source)
@property
def num_samples(self):
# dataset size might change at runtime
if self._num_samples is None:
self._num_samples = self.data_source.sizes
return self._num_samples
def __iter__(self):
rands = []
for size in self.num_samples:
if self.maxn == size:
rands.append(torch.randperm(size, generator=self.generator).tolist())
else:
rands.append(torch.randint(high=size, size=(self.maxn,),
dtype=torch.int64, generator=self.generator).tolist())
return zip(*rands)
def __len__(self):
return len(self.data_source)
class MultiSequentialSampler(Sampler):
r"""Samples elements sequentially, always in the same order.
NOTE: it whill expand all dataset to same length
Arguments:
data_source (Dataset): dataset to sample from
"""
def __init__(self, data_source: MergeDataset):
self.data_source: MergeDataset = data_source
self.num_samples = data_source.sizes
self.maxn = len(data_source)
def __iter__(self):
ls = []
for size in self.num_samples:
if self.maxn == size:
ls.append(range(size))
else:
ls.append(islice(cycle(range(size)), self.maxn))
return zip(*ls)
def __len__(self):
return len(self.data_source)
class MultiBatchDataset(MergeDataset):
"""MultiBatchDataset for MultiBatchSampler
NOTE inputs type must be MergeDataset
"""
def __getitem__(self, indexs: List[int]):
dataset_idxs, idxs = indexs
return self.tensors[dataset_idxs][idxs]
class MultiBatchSampler(Sampler):
r"""Sample another sampler by repeats times of mini-batch indices.
NOTE always drop last !
Args:
samplers (Sampler or Iterable): Base sampler. Can be any iterable object
with ``__len__`` implemented.
repeats (list): repeats time
batch_size (int): Size of mini-batch.
"""
def __init__(self, samplers: list, repeats: list, batch_size):
# Since collections.abc.Iterable does not check for `__getitem__`, which
# is one way for an object to be an iterable, we don't do an `isinstance`
# check here.
if not isinstance(batch_size, int) or isinstance(batch_size, bool) or \
batch_size <= 0:
raise ValueError("batch_size should be a positive integer value, "
"but got batch_size={}".format(batch_size))
assert len(samplers) == len(repeats), 'Samplers number must equal repeats number'
minweight = min(repeats)
minlength = len(samplers[repeats.index(minweight)])
self.sampler_loop = cycle([i for i, w in enumerate(repeats) for _ in range(w)])
# expand to target length
self.repeats = repeats
self.sizes = [minlength * ceil(w / minweight) for w in repeats]
self.size = sum(self.sizes)
self.batch_size = batch_size
self.samplers: List[Sampler] = samplers
self.new_samplers = []
def __iter__(self):
self.new_samplers.clear()
self.new_samplers = [islice(cycle(smp), size)
for smp, size in
zip(self.samplers, self.sizes)]
return self
def __next__(self):
# NOTE sampler_idx choice dataset
sampler_idx = next(self.sampler_loop)
sampler: Sampler = self.new_samplers[sampler_idx]
return [(sampler_idx, next(sampler)) for _ in range(self.batch_size)]
def __len__(self):
# NOTE find min batch scale factor
scale = ((min(self.sizes) // self.batch_size) // min(self.repeats))
return sum([n * scale for n in self.repeats])
| 31.661905 | 116 | 0.689277 | 6,285 | 0.945255 | 0 | 0 | 189 | 0.028425 | 0 | 0 | 1,394 | 0.209656 |
7efb31a8e8af90737b8da1f5791e1e718e4838b5 | 5,251 | py | Python | anarky/interface.py | MulberryBeacon/anarky | 54684e4422d36c6ea3c0bb3fab5af56002864690 | [
"MIT"
] | 1 | 2015-05-12T13:05:04.000Z | 2015-05-12T13:05:04.000Z | anarky/interface.py | MulberryBeacon/anarky | 54684e4422d36c6ea3c0bb3fab5af56002864690 | [
"MIT"
] | null | null | null | anarky/interface.py | MulberryBeacon/anarky | 54684e4422d36c6ea3c0bb3fab5af56002864690 | [
"MIT"
] | null | null | null | # -*- coding: utf8 -*-
"""
Common user interface operations.
Author: Eduardo Ferreira
License: MIT (see LICENSE for details)
"""
# Module import
# --------------------------------------------------------------------------------------------------
from os import walk
from os.path import isdir, isfile, join
import argparse
import logging
import sys
from .__version__ import __version__
# Constants
# --------------------------------------------------------------------------------------------------
ERROR = "{} '{}' is not available (doesn't exist or no privileges to access it)!"
ERROR_INVALID = "{} '{}' is invalid!"
ERROR_INVALID_LIST = 'The list of input files is invalid!'
ERROR_EMPTY_LIST = 'The list of input files is empty!'
# Logger
# --------------------------------------------------------------------------------------------------
logging.basicConfig(level=logging.INFO)
_logger = logging.getLogger(__name__)
def keyboard_interrupt():
_logger.warn('\nThe program execution was interrupted!\n')
# Methods :: Command line options and instructions
# --------------------------------------------------------------------------------------------------
def parse_options(program, description, decode=False):
"""
Parses and retrieves the values for the full set of command line arguments.
:param program: The name of the program
:param description: The description of the program
:param decode: Flag the indicates if it's an encoding or decoding operation
:return: The list of command line arguments
"""
# Defines the parent parser
parser = argparse.ArgumentParser(prog=program, description=description)
parser.add_argument('-v', '--version', action='version', version='%(prog)s ' + __version__)
group = parser.add_argument_group('options')
group.add_argument('-f', '--files', nargs='+', metavar='FILES', dest='input_files',
help='input files', required=True)
# TODO: the destination probably shouldn't be a required parameter. And the name could be
# changed to "output"...
group.add_argument('-o', '--output', metavar='OUTPUT', dest='output_dir', help='output directory')
return parser.parse_args()
def get_options(program, description, decode=False):
"""
Parses, retrieves and validates the values for the full set of command line arguments.
:param program: The name of the program
:param description: The description of the program
:param decode: Flag the indicates if it's an encoding or decoding operation
:return: The fully parsed and validated list of command line arguments
"""
args = parse_options(program, description, decode)
# Checks the input files
files = get_input_files(args.input_files)
if len(files) == 0:
_logger.error(ERROR_EMPTY_LIST)
sys.exit(1)
# TODO: this bit needs to be completely reviewed!
# Checks the output directory, cover and tag parameters
"""
if not (directory_exists(args.output_dir) and not (
not decode and args.cover is not None and not file_exists(args.cover))):
sys.exit(1)
"""
if not directory_exists(args.output_dir):
_logger.error(ERROR.format('Directory', args.output_dir))
sys.exit(1)
#return files, args.output_dir, args.cover, args.tags, args.playlist
return files, args.output_dir
# Methods :: File system library
# --------------------------------------------------------------------------------------------------
def file_exists(filename):
"""
Checks if a file is a valid file system entry.
:param filename: The name of a file
:return: True if the given file name matches an actual file; False otherwise
"""
try:
if not isfile(filename):
_logger.error(ERROR.format('File', filename))
return False
except TypeError:
_logger.error(ERROR_INVALID.format('File', filename))
return False
return True
def directory_exists(directory):
"""
Checks if a directory is a valid file system entry.
:param directory: The name of a directory
:return: True if the given directory name matches an actual directory; False otherwise
"""
try:
if not isdir(directory):
_logger.error(ERROR.format('Directory', directory))
return False
except TypeError:
_logger.error(ERROR_INVALID.format('Directory', directory))
return False
return True
def get_input_files(entries):
"""
Checks and stores the input files provided in the command line interface.
:param entries: The set of input entries (can be either files or directories)
:return: A complete list of the input files
"""
result = []
try:
for entry in entries:
if isfile(entry):
result.append(entry)
elif isdir(entry):
for root, directories, files in walk(entry):
for filename in files:
file_path = join(root, filename)
result.append(file_path)
else:
_logger.error(ERROR.format('File system entry', entry))
except TypeError:
_logger.error(ERROR_INVALID_LIST)
return result | 34.546053 | 102 | 0.607503 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,952 | 0.562179 |
7efc02867d62088c24fe38cb0e01aef98549a474 | 585 | py | Python | source/intentionally_blank/ext/formatters/eof_newline/formatter.py | sixty-north/intentionally-blank | ef00c91003811b05170f417a3acbcf4bf92bd643 | [
"MIT"
] | null | null | null | source/intentionally_blank/ext/formatters/eof_newline/formatter.py | sixty-north/intentionally-blank | ef00c91003811b05170f417a3acbcf4bf92bd643 | [
"MIT"
] | null | null | null | source/intentionally_blank/ext/formatters/eof_newline/formatter.py | sixty-north/intentionally-blank | ef00c91003811b05170f417a3acbcf4bf92bd643 | [
"MIT"
] | null | null | null | from intentionally_blank.formatter import Formatter
class NewlineAtEofFormatter(Formatter):
"""Ensure the last line of the file has a newline terminator.
"""
def format(self, lines):
"""
Args:
lines: An iterable series of strings, each with a newline terminator.
Yields:
An iterable series of strings, each with a newline terminator.
"""
return map(ensure_newline_terminator, lines)
def ensure_newline_terminator(line):
if line.endswith("\n"):
return line
return f"{line}\n"
| 25.434783 | 81 | 0.635897 | 416 | 0.711111 | 0 | 0 | 0 | 0 | 0 | 0 | 295 | 0.504274 |
7efefef66bd1feb3647085e8c53cf1e989e233d5 | 5,049 | py | Python | scripts/analyze.py | PuchatekwSzortach/voc_fcn | db881196208f280c47ccfa5743ff2f1e7d9bd009 | [
"MIT"
] | 5 | 2018-04-23T02:57:02.000Z | 2019-06-17T01:26:34.000Z | scripts/analyze.py | PuchatekwSzortach/voc_fcn | db881196208f280c47ccfa5743ff2f1e7d9bd009 | [
"MIT"
] | null | null | null | scripts/analyze.py | PuchatekwSzortach/voc_fcn | db881196208f280c47ccfa5743ff2f1e7d9bd009 | [
"MIT"
] | null | null | null | """
Script for analyzing model's performance
"""
import argparse
import sys
import collections
import yaml
import tensorflow as tf
import tqdm
import numpy as np
import net.data
import net.ml
import net.utilities
def report_iou_results(categories_intersections_counts_map, categories_unions_counts_map):
"""
Reports iou analysis results
:param categories_intersections_counts_map: dictionary mapping categories to a list of intersection counts
for different images for that category
:param categories_unions_counts_map: dictionary mapping categories to a list of unions counts
for different images for that category
"""
categories = sorted(categories_intersections_counts_map.keys())
categories_means = []
for category in categories:
category_intersections_counts = categories_intersections_counts_map[category]
category_unions_counts = categories_unions_counts_map[category]
category_mean = np.sum(category_intersections_counts) / np.sum(category_unions_counts)
print("{} mean iou -> {:.5f}".format(category, category_mean))
categories_means.append(category_mean)
print("\nMean iou across all categories: {:.5f}".format(np.mean(categories_means)))
def get_segmentation_cubes_generator(samples_generator, model, indices_to_colors_map, void_color):
"""
Get a generator that uses samples_generator to obtain (image, segmentation) tuple and yields a tuple
(ground_truth_segmentation_cube, predicted_segmentation_cube)
:param samples_generator: generator that yields (image, segmentation) tuple
:param model: net.ml.Model instance
:param indices_to_colors_map: dictionary mapping categories indices to their colors in segmentation images
:param void_color: 3-elements tuple that represents color of pixels without a category
:return: generator that yields (ground_truth_segmentation_cube, predicted_segmentation_cube) tuples
"""
while True:
image, segmentation = next(samples_generator)
ground_truth_segmentation_cube = net.data.get_segmentation_cube(segmentation, indices_to_colors_map)
# Raw predictions are floats before thresholding
raw_predicted_segmentation_cube = model.predict(image)
predicted_segmentation_image = net.data.get_segmentation_image(
raw_predicted_segmentation_cube, indices_to_colors_map, void_color)
predicted_segmentation_cube = net.data.get_segmentation_cube(
predicted_segmentation_image, indices_to_colors_map)
yield ground_truth_segmentation_cube, predicted_segmentation_cube
def analyze_iou(model, generator_factory, config):
"""
Analyses intersection over union of model predictions with ground truth using VOC validation dataset
:param model: net.ml.Model instance
:param generator_factory: VOCSamplesGeneratorFactory instance
:param config: object with configuration details
"""
indices_to_colors_map, void_color = net.data.get_colors_info(len(config["categories"]))
segmentation_cubes_generator = get_segmentation_cubes_generator(
generator_factory.get_generator(), model, indices_to_colors_map, void_color)
categories_intersections_counts_map = collections.defaultdict(list)
categories_unions_counts_map = collections.defaultdict(list)
# for _ in tqdm.tqdm(range(10)):
for _ in tqdm.tqdm(range(generator_factory.get_size())):
ground_truth_segmentation_cube, predicted_segmentation_cube = next(segmentation_cubes_generator)
# Get iou for each category that is present in ground truth cube
for index, category in enumerate(config["categories"]):
intersection_pixels = np.logical_and(
ground_truth_segmentation_cube[:, :, index], predicted_segmentation_cube[:, :, index])
categories_intersections_counts_map[category].append(np.sum(intersection_pixels))
union_pixels = np.logical_or(
ground_truth_segmentation_cube[:, :, index], predicted_segmentation_cube[:, :, index])
categories_unions_counts_map[category].append(np.sum(union_pixels))
report_iou_results(categories_intersections_counts_map, categories_unions_counts_map)
def main():
"""
Script entry point
"""
parser = argparse.ArgumentParser()
parser.add_argument('--config', action="store", required=True)
arguments = parser.parse_args(sys.argv[1:])
with open(arguments.config) as file:
config = yaml.safe_load(file)
network = net.ml.FullyConvolutionalNetwork(categories_count=len(config["categories"]))
session = tf.keras.backend.get_session()
model = net.ml.Model(session, network, config["categories"])
model.load(config["model_checkpoint_path"])
generator_factory = net.data.VOCSamplesGeneratorFactory(
config["voc"]["data_directory"], config["voc"]["validation_set_path"], config["size_factor"])
analyze_iou(model, generator_factory, config)
if __name__ == "__main__":
main()
| 37.4 | 110 | 0.754011 | 0 | 0 | 1,377 | 0.272727 | 0 | 0 | 0 | 0 | 1,673 | 0.331353 |
7eff1983d7fbe1e52b15da6db6b21e34c240b7dd | 5,095 | py | Python | 12_solution.py | kng/AoC2020 | 236865234f4fbf780ec289c15f9d678202b047cf | [
"MIT"
] | null | null | null | 12_solution.py | kng/AoC2020 | 236865234f4fbf780ec289c15f9d678202b047cf | [
"MIT"
] | null | null | null | 12_solution.py | kng/AoC2020 | 236865234f4fbf780ec289c15f9d678202b047cf | [
"MIT"
] | null | null | null | # --- Day 12: Rain Risk ---
# https://adventofcode.com/2020/day/12
import time
simple = False
verbose = 1
if simple:
data = 'F10\nN3\nF7\nR90\nF11'.splitlines()
else:
file = open('12_input.txt', 'r')
data = file.read().splitlines()
class Ship(object):
def __init__(self, d=0, x=0, y=0, m=0, wx=0, wy=0):
self.dir = d # 0=N, 1=E, 2=S, 3=W
self.dirAsc = ['north', 'east', 'south', 'west']
self.x = x # +N / -S
self.wx = wx
self.y = y # +E / -W
self.wy = wy
self.m = m # ship mode: 0=part1, 1=part2
self.validCmd = ['N', 'S', 'E', 'W', 'L', 'R', 'F']
def reset(self, d=0, x=0, y=0, m=0, wx=0, wy=0):
self.dir = d
self.m = m
self.x = x
self.wx = wx
self.y = y
self.wy = wy
def command(self, cmd):
if len(cmd) > 1:
if cmd[0] in self.validCmd:
dist = int(cmd[1:])
if self.m == 0: # part1
if cmd[0] == 'N':
self.x += dist
elif cmd[0] == 'S':
self.x -= dist
elif cmd[0] == 'E':
self.y += dist
elif cmd[0] == 'W':
self.y -= dist
elif cmd[0] == 'L':
self.dir -= int(dist/90)
self.dir %= 4
elif cmd[0] == 'R':
self.dir += int(dist/90)
self.dir %= 4
elif cmd[0] == 'F':
if self.dir == 0:
self.x += dist
elif self.dir == 1:
self.y += dist
elif self.dir == 2:
self.x -= dist
else:
self.y -= dist
else: # part2
if cmd[0] == 'N':
self.wx += dist
elif cmd[0] == 'S':
self.wx -= dist
elif cmd[0] == 'E':
self.wy += dist
elif cmd[0] == 'W':
self.wy -= dist
elif cmd[0] == 'L':
self.dir = -int(dist/90) % 4
elif cmd[0] == 'R': # todo
self.dir = int(dist/90) % 4
elif cmd[0] == 'F':
self.x += dist * self.wx
self.y += dist * self.wy
if self.dir > 0:
if self.dir == 1: # 90 CW
tmp = self.wx
self.wx = -self.wy
self.wy = tmp
elif self.dir == 2: # 180
self.wx = -self.wx
self.wy = -self.wy
else: # 90 CCW
tmp = self.wx
self.wx = self.wy
self.wy = -tmp
self.dir = 0
else:
print('invalid command')
else:
print('command too short')
def print(self):
if self.m == 0:
print('Ship position: {} units {}, {} units {}, facing {}'
.format(abs(self.y), self.dirAsc[1] if self.y >= 0 else self.dirAsc[3],
abs(self.x), self.dirAsc[0] if self.x >= 0 else self.dirAsc[2],
self.dirAsc[self.dir]))
else:
print('Ship position: {} units {}, {} units {}\n'
'Waypoint position: {} units {}, {} units {}'
.format(abs(self.y), self.dirAsc[1] if self.y >= 0 else self.dirAsc[3],
abs(self.x), self.dirAsc[0] if self.x >= 0 else self.dirAsc[2],
abs(self.wy), self.dirAsc[1] if self.wy >= 0 else self.dirAsc[3],
abs(self.wx), self.dirAsc[0] if self.wx >= 0 else self.dirAsc[2]))
def main():
start_time = time.time()
# part 1
ship = Ship(d=1)
for row in data:
ship.command(row)
if verbose > 1:
print('cmd: {}'.format(row))
ship.print()
if verbose > 0:
ship.print()
print('distance {}'.format(abs(ship.x) + abs(ship.y)))
middle_time = time.time()
print("time elapsed: %s" % (middle_time - start_time))
# part 2
ship.reset(m=1, wx=1, wy=10)
for row in data:
ship.command(row)
if verbose > 1:
print('cmd: {}'.format(row))
ship.print()
if verbose > 0:
ship.print()
print('distance {}'.format(abs(ship.x) + abs(ship.y)))
end_time = time.time()
print("time elapsed: %s" % (end_time - middle_time))
if __name__ == '__main__':
main()
| 35.137931 | 93 | 0.366045 | 3,981 | 0.781354 | 0 | 0 | 0 | 0 | 0 | 0 | 597 | 0.117174 |
7d0038100c8c0111fa664f6eb6cc9dd2beee4fca | 335 | py | Python | chatting/models.py | aliakbars/tbdc | ac8fe28b781cbc5e6e9cf7dc9579cc94c7e9ec55 | [
"Apache-2.0"
] | null | null | null | chatting/models.py | aliakbars/tbdc | ac8fe28b781cbc5e6e9cf7dc9579cc94c7e9ec55 | [
"Apache-2.0"
] | null | null | null | chatting/models.py | aliakbars/tbdc | ac8fe28b781cbc5e6e9cf7dc9579cc94c7e9ec55 | [
"Apache-2.0"
] | null | null | null | from __future__ import unicode_literals
from django.db import models
from django.contrib.auth.models import User
# Create your models here.
class Chat(models.Model):
content = models.TextField()
sender = models.ForeignKey(User)
receiver = models.ForeignKey(User)
date_created = models.DateTimeField(auto_now_add=True) | 30.454545 | 58 | 0.776119 | 193 | 0.576119 | 0 | 0 | 0 | 0 | 0 | 0 | 26 | 0.077612 |
7d01524893d64dff4161903aeece165ee10064d8 | 4,118 | py | Python | src/neo_loader/helpers/tflite_model_helper.py | minlu1021/neo-loader | dcee791380c95b6c7bd5ae580fb252eefa6ae2ab | [
"Apache-2.0"
] | null | null | null | src/neo_loader/helpers/tflite_model_helper.py | minlu1021/neo-loader | dcee791380c95b6c7bd5ae580fb252eefa6ae2ab | [
"Apache-2.0"
] | null | null | null | src/neo_loader/helpers/tflite_model_helper.py | minlu1021/neo-loader | dcee791380c95b6c7bd5ae580fb252eefa6ae2ab | [
"Apache-2.0"
] | null | null | null | from collections import OrderedDict
from .abstract_model_helper import ModelHelper
from tflite.Tensor import Tensor
from tflite.Model import Model
from tflite.TensorType import TensorType
from typing import List
class TFLiteModelHelper(ModelHelper):
TFLITE_TENSOR_TYPE_TO_DTYPE = {}
TFLITE_TENSOR_TYPE_TO_DTYPE[TensorType.UINT8] = "uint8"
TFLITE_TENSOR_TYPE_TO_DTYPE[TensorType.FLOAT32] = "float32"
TFLITE_TENSOR_TYPE_TO_DTYPE[TensorType.INT32] = "int32"
TFLITE_TENSOR_TYPE_TO_DTYPE[TensorType.INT64] = "int64"
def __init__(self, model_path: str) -> None:
super(TFLiteModelHelper, self).__init__(model_path)
self.__tflite_model = None
self.__input_dtypes_dict = {}
self.__input_tensors = []
self.__output_tensors = []
@property
def input_tensors(self) -> List[Tensor]:
return self.__input_tensors
@property
def output_tensors(self) -> List[Tensor]:
return self.__output_tensors
@property
def input_dtypes_dict(self) -> {str: str}:
dtypes_inputs = {}
for tensor in self.input_tensors:
dtypes_inputs[tensor.Name().decode("utf-8")] = self.TFLITE_TENSOR_TYPE_TO_DTYPE[tensor.Type()]
return dtypes_inputs
@property
def tflite_model(self) -> Model:
return self.__tflite_model
@staticmethod
def get_supported_tflite_input_tensor_type() -> List[TensorType]:
return [TensorType.FLOAT32, TensorType.UINT8]
def load_model(self) -> None:
try:
import tflite.Model
except ImportError:
raise ImportError("The tflite package must be installed")
with open(self.model_path, "rb") as f:
tflite_model_buf = f.read()
self.__tflite_model = tflite.Model.Model.GetRootAsModel(tflite_model_buf, 0)
def extract_input_and_output_tensors(self, user_shape_dict=None) -> None:
if user_shape_dict is None:
raise Exception("Model input names and shapes must be provided")
subgraph = self.tflite_model.Subgraphs(0)
input_tensors = self.__get_input_tensors(subgraph, user_shape_dict)
output_tensors = self.__get_output_tensors(subgraph)
self.__input_tensors = list(input_tensors.values())
self.__output_tensors = list(output_tensors.values())
def __get_input_tensors(self, subgraph, user_shape_dict):
input_tensors = OrderedDict()
model_inputs = subgraph.InputsAsNumpy()
for model_input in model_inputs:
model_input_tensor = subgraph.Tensors(model_input)
model_input_name = model_input_tensor.Name().decode("utf-8")
if model_input_tensor.Type() not in self.get_supported_tflite_input_tensor_type():
raise Exception("Unsupported input data type for input {} with tflite tensor type {}".format(model_input_name, str(model_input_tensor.Type())))
if model_input_name not in user_shape_dict:
raise Exception("Please specify all input layers in data_shape.")
input_tensors[model_input_name] = model_input_tensor
return input_tensors
def __get_output_tensors(self, subgraph):
output_tensors = OrderedDict()
model_outputs = subgraph.OutputsAsNumpy()
for model_output in model_outputs:
model_output_tensor = subgraph.Tensors(model_output)
model_output_name = model_output_tensor.Name().decode("utf-8")
output_tensors[model_output_name] = model_output_tensor
return output_tensors
def get_metadata(self) -> {str: List}:
return {
"Inputs": [
{'name': tensor.Name().decode("utf-8"), 'dtype': self.TFLITE_TENSOR_TYPE_TO_DTYPE[tensor.Type()], 'shape': tensor.ShapeAsNumpy().tolist()}
for tensor in self.input_tensors
],
"Outputs": [
{'name': tensor.Name().decode("utf-8"), 'dtype': self.TFLITE_TENSOR_TYPE_TO_DTYPE[tensor.Type()], 'shape': tensor.ShapeAsNumpy().tolist()}
for tensor in self.output_tensors
]
}
| 41.59596 | 159 | 0.678728 | 3,904 | 0.948033 | 0 | 0 | 661 | 0.160515 | 0 | 0 | 328 | 0.07965 |
7d02e19d26499fd9832ccc2e030ef3666288dfd1 | 584 | py | Python | wingstructure/aero/aero_moment.py | helo9/wingstructure | ff82eb0b87e3b5ececff39895f959bfef468e7c3 | [
"MIT"
] | 7 | 2019-01-02T16:47:31.000Z | 2020-10-10T10:06:15.000Z | wingstructure/aero/aero_moment.py | helo9/wingstructure | ff82eb0b87e3b5ececff39895f959bfef468e7c3 | [
"MIT"
] | 9 | 2019-01-13T20:11:23.000Z | 2019-10-10T21:38:58.000Z | wingstructure/aero/aero_moment.py | helo9/wingstructure | ff82eb0b87e3b5ececff39895f959bfef468e7c3 | [
"MIT"
] | 1 | 2018-12-27T14:20:36.000Z | 2018-12-27T14:20:36.000Z | import numpy as np
def mean_momentcoefficient(wing, airfoil_db):
"""calculate mean coefficient of moment for wing
Parameters
----------
wing : Wing
object describing wing
airfoil_db : dict
dictionary containing airfoil data
"""
try:
c_m0s = [airfoil_db[sec.airfoil].c_m0 for sec in wing.sections]
except KeyError:
raise KeyError('Not all airfoils used in wing are defined in airfoil_db!')
ys = wing.ys
cs = wing.chords
C_m0 = 2 / (wing.area * wing.mac) * np.trapz(c_m0s * cs**2, ys)
return C_m0 | 24.333333 | 82 | 0.630137 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 261 | 0.446918 |
7d037a52977d0f0954d115a4089354137b1a38db | 22,333 | py | Python | backend/vqa_benchmarking_backend/metrics/metrics.py | patilli/vqa_benchmarking | 53a05d8956e71e99de6d97db5e7a7e400b6cc65f | [
"MIT"
] | 1 | 2021-12-09T06:32:14.000Z | 2021-12-09T06:32:14.000Z | backend/vqa_benchmarking_backend/metrics/metrics.py | patilli/vqa_benchmarking | 53a05d8956e71e99de6d97db5e7a7e400b6cc65f | [
"MIT"
] | null | null | null | backend/vqa_benchmarking_backend/metrics/metrics.py | patilli/vqa_benchmarking | 53a05d8956e71e99de6d97db5e7a7e400b6cc65f | [
"MIT"
] | null | null | null | from typing import List, Tuple
import sqlite3
import os
import random
import json
from vqa_benchmarking_backend.datasets.dataset import DatasetModelAdapter, DiagnosticDataset, DataSample
from vqa_benchmarking_backend.metrics.bias import eval_bias, inputs_for_image_bias_featurespace, inputs_for_image_bias_wordspace, inputs_for_question_bias_featurespace, inputs_for_question_bias_imagespace
from vqa_benchmarking_backend.metrics.robustness import eval_robustness, inputs_for_image_robustness_featurespace, inputs_for_image_robustness_imagespace, inputs_for_question_robustness_featurespace, inputs_for_question_robustness_wordspace
from vqa_benchmarking_backend.metrics.sear import eval_sears, inputs_for_question_sears
from vqa_benchmarking_backend.metrics.uncertainty import certainty
from vqa_benchmarking_backend.metrics.model_info import model_info
from tqdm import tqdm
import torch
def _reduce_min(tensor: torch.FloatTensor):
reduced = tensor.clone()
while len(reduced.size()) > 1:
reduced = reduced.min(dim=0)[0]
return reduced
def _reduce_max(tensor: torch.FloatTensor):
reduced = tensor.clone()
while len(reduced.size()) > 1:
reduced = reduced.max(dim=0)[0]
return reduced
def _get_img_feature_range(adapter: DatasetModelAdapter, dataset: DiagnosticDataset, output_path: str, num_samples: int = 500) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""
Returns:
Tuple
* minimum feature values (per feature column) across dataset (FloatTensor: feature_dim)
* maximum feature values (per feature column) across dataset (FloatTensor: feature_dim)
"""
# store this in between sessions, so that it does not have to be recalculated for every run
filename = os.path.join(output_path, f"{dataset.get_name()}_{adapter.get_name()}_imgfeat_range.pt")
if os.path.isfile(filename):
data = torch.load(filename)
return data['min_feats'], data['max_feats'], data['std']
print('Calculating image feature range...')
if num_samples <= 0:
sample_indices = range(len(dataset))
else:
sample_indices = random.sample(range(0, len(dataset)), num_samples)
min_feats = None # feature_dim
max_feats = None # feature_dim
feats = []
for sample_idx in tqdm(sample_indices):
sample = dataset[sample_idx]
embedding = adapter.get_image_embedding(sample).cpu()
feats.append(embedding[-1])
if isinstance(min_feats, type(None)):
min_feats = _reduce_min(embedding)
max_feats = _reduce_max(embedding)
min_feats = torch.minimum(min_feats, _reduce_min(embedding))
max_feats = torch.maximum(max_feats, _reduce_max(embedding))
feats = torch.stack(feats, dim=0) # num_samples x feature_dim
std = feats.std(dim=0)
torch.save({'min_feats': min_feats, 'max_feats': max_feats, 'std': std}, filename)
return min_feats, max_feats, std
def _get_question_feature_range(adapter: DatasetModelAdapter, dataset: DiagnosticDataset, output_path: str, num_samples: int = 500) -> Tuple[torch.FloatTensor, torch.FloatTensor]:
"""
Returns:
Tuple
* minimum feature values (per feature column) across dataset (FloatTensor: feature_dim)
* maximum feature values (per feature column) across dataset (FloatTensor: feature_dim)
"""
# store this in between sessions, so that it does not have to be recalculated for every run
filename = os.path.join(output_path, f"{dataset.get_name()}_{adapter.get_name()}_quesfeat_range.pt")
if os.path.isfile(filename):
data = torch.load(filename)
return data['min_feats'], data['max_feats'], data['std']
print('Calculating question feature range...')
if num_samples <= 0:
sample_indices = range(len(dataset))
else:
sample_indices = random.sample(range(0, len(dataset)), num_samples)
min_feats = None # feature_dim
max_feats = None # feature_dim
feats = []
for sample_idx in tqdm(sample_indices):
sample = dataset[sample_idx]
embedding = adapter.get_question_embedding(sample).cpu()
feats.append(embedding[-1])
if isinstance(min_feats, type(None)):
min_feats = _reduce_min(embedding)
max_feats = _reduce_max(embedding)
min_feats = torch.minimum(min_feats, _reduce_min(embedding))
max_feats = torch.maximum(max_feats, _reduce_max(embedding))
feats = torch.stack(feats, dim=0) # num_samples x feature_dim
std = feats.std(dim=0)
torch.save({'min_feats': min_feats, 'max_feats': max_feats, 'std': std}, filename)
return min_feats, max_feats, std
def _get_db_connection(output_path: str, adapter: DatasetModelAdapter, dataset: DiagnosticDataset) -> sqlite3.Connection:
db_file_name = os.path.join(output_path, f"{dataset.get_name()}_{adapter.get_name()}.db")
print("Opening DB at", db_file_name)
conn = sqlite3.connect(db_file_name)
# disable file caching because of our super slow network drives
conn.execute('PRAGMA synchronous = 0')
conn.execute('PRAGMA journal_mode = OFF')
return conn
def _write_class_answer_mapping(db: sqlite3.Connection, adapter: DatasetModelAdapter, dataset: DiagnosticDataset):
cur = db.cursor()
cur.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='answers'")
exists = cur.fetchone()[0]==1
cur.close()
if not exists:
print("Writing answer mapping...")
# create new table
db.execute("CREATE TABLE answers(class INTEGER PRIMARY KEY, answer TEXT)")
sql_insert = "INSERT INTO answers VALUES(?,?)"
insert_values = []
answer_to_class = {}
for class_idx in range(adapter.get_output_size()):
answer_str = dataset.class_idx_to_answer(class_idx)
if answer_str:
insert_values.append((class_idx, answer_str))
answer_to_class[answer_str] = class_idx
db.executemany(sql_insert, insert_values)
db.commit()
else:
print('Found existing answer mapping')
answer_to_class = {}
for class_idx in range(adapter.get_output_size()):
answer_str = dataset.class_idx_to_answer(class_idx)
if answer_str:
answer_to_class[answer_str] = class_idx
cur = db.cursor()
cur.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='ground_truth'")
exists = cur.fetchone()[0]==1
cur.close()
if not exists:
print("Writing ground truth mapping...")
insert_values = []
db.execute("CREATE TABLE ground_truth(question_id INTEGER, class TEXT, score REAL)")
sql_insert = "INSERT INTO ground_truth VALUES(?,?,?)"
for sample in dataset:
for answer in sample.answers:
insert_values.append((sample.question_id, answer, sample.answers[answer]))
db.executemany(sql_insert, insert_values)
db.commit()
else:
print("Found existing ground truth mapping")
def _write_qid_question_mapping(db: sqlite3.Connection, adapter: DatasetModelAdapter, dataset: DiagnosticDataset):
cur = db.cursor()
cur.execute("SELECT count(name) FROM sqlite_master WHERE type='table' AND name='questions'")
if cur.fetchone()[0]==1:
return # answer table exists already
cur.close()
print("Writing question mapping...")
# create new table
db.execute("CREATE TABLE questions(question_id INTEGER PRIMARY KEY, question TEXT, image_id TEXT)")
sql_insert = "INSERT INTO questions VALUES(?,?,?)"
insert_values = []
for sample in dataset:
insert_values.append((int(sample.question_id), sample.question, sample.image_id))
db.executemany(sql_insert, insert_values)
db.commit()
def _write_table(db: sqlite3.Connection, metric_name: str, data: dict, overwrite: bool = True):
if len(data) == 0:
return # don't write if there's nothing to write
if overwrite:
try:
# delete table, if exists
delete_table = f"DROP TABLE {metric_name};"
db.execute(delete_table)
print(f'Deleted old table {metric_name}')
except:
pass # table did not exist in the first place
# create new table
sql_table = f"CREATE TABLE {metric_name}(question_id INTEGER"
sql_insert = f"INSERT INTO {metric_name} VALUES(?"
if 'bias' in metric_name or 'robustness' in metric_name:
sql_table += ", predicted_class TEXT, prediction_frequency REAL, score REAL"
sql_insert += ", ?, ?, ?"
elif metric_name == 'sears':
sql_table += ", sear_1_predicted_class TEXT, sear_1_applied INTEGER, sear_1_flipped INTEGER"
sql_table += ", sear_2_predicted_class TEXT, sear_2_applied INTEGER, sear_2_flipped INTEGER"
sql_table += ", sear_3_predicted_class TEXT, sear_3_applied INTEGER, sear_3_flipped INTEGER"
sql_table += ", sear_4_predicted_class TEXT, sear_4_applied INTEGER, sear_4_flipped INTEGER"
sql_insert += ", ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?"
elif metric_name == 'uncertainty':
sql_table += ", predicted_class TEXT, prediction_fequency REAL, certainty_score REAL, entropy REAL"
sql_insert += ", ?, ?, ?, ?"
elif metric_name == 'accuracy':
sql_table += ", top_1_class TEXT, top_1_prob REAL, top_1_accuracy REAL"
sql_table += ", top_2_class TEXT, top_2_prob REAL, top_2_accuracy REAL"
sql_table += ", top_3_class TEXT, top_3_prob REAL, top_3_accuracy REAL"
sql_insert += ", ?, ?, ?, ?, ?, ?, ?, ?, ?"
else:
raise Exception('unknown metric name', metric_name)
sql_table += ");"
sql_insert += ");"
if overwrite:
db.execute(sql_table)
# write data to table
insert_values = []
for question_id in data:
if 'bias' in metric_name or 'robustness' in metric_name:
score = data[question_id]['bias'] if 'bias' in metric_name else data[question_id]['robustness']
for class_idx in data[question_id]['class_pred_counter']:
insert_values.append((int(question_id), class_idx, data[question_id]['class_pred_counter'][class_idx], score))
elif metric_name == 'sears':
insert_values.append((int(question_id),
data[question_id]['sear_1']['predicted_class'], data[question_id]['sear_1']['applied'], data[question_id]['sear_1']['flipped'],
data[question_id]['sear_2']['predicted_class'], data[question_id]['sear_2']['applied'], data[question_id]['sear_2']['flipped'],
data[question_id]['sear_3']['predicted_class'], data[question_id]['sear_3']['applied'], data[question_id]['sear_3']['flipped'],
data[question_id]['sear_4']['predicted_class'], data[question_id]['sear_4']['applied'], data[question_id]['sear_4']['flipped']))
elif metric_name == 'uncertainty':
entropy = data[question_id]['entropy']
for class_idx in data[question_id]['class_pred_counter']:
insert_values.append((int(question_id), class_idx, data[question_id]['class_pred_counter'][class_idx], data[question_id]['class_certainty_scores'][class_idx], entropy))
elif metric_name == 'accuracy':
insert_values.append((int(question_id),
data[question_id]['top_1_class'], data[question_id]['top_1_prob'], data[question_id]['top_1_accuracy'],
data[question_id]['top_2_class'], data[question_id]['top_2_prob'], data[question_id]['top_2_accuracy'],
data[question_id]['top_3_class'], data[question_id]['top_3_prob'], data[question_id]['top_3_accuracy']))
else:
raise Exception('unknown metric name', metric_name)
db.executemany(sql_insert, insert_values)
db.commit()
def write_model_info(output_path: str, adapter: DatasetModelAdapter):
model_info = {}
file = os.path.join(output_path, 'model_info.json')
if os.path.isfile(file):
# read existing model info from other models to not forget (overwrite)
with open(file, 'r') as f:
model_info = json.load(f)
# append model info
model_info[adapter.get_name()] = model_info(net=adapter.get_torch_module(), only_trainable=True)
with open(file, 'w') as f:
json.dump(model_info)
@torch.no_grad()
def calculate_metrics(adapter: DatasetModelAdapter, dataset: DiagnosticDataset, metrics: List[str], output_path: str, trials: int = 15,
min_tokens: int = 3, max_tokens: int = 10, start_sample: int = 0, max_samples: int = -1):
"""
Args:
metrics: choice between [p'accuracy',
'question_bias_featurespace', 'question_bias_imagespace',
'image_bias_featurespace', 'image_bias_wordspace',
'image_robustness_imagespace', 'image_robustness_featurespace',
'question_robustness_wordspace', 'question_robustness_featurespace',
'sears',
'uncertainty']
"""
overwrite = start_sample == 0
cache_accuracy = {}
cache_question_bias_featurespace = {}
cache_question_bias_imagespace = {}
cache_image_bias_featurespace = {}
cache_image_bias_wordspace = {}
cache_image_robustness_imagespace = {}
cache_image_robustness_featurespace = {}
cache_question_robustness_featurespace = {}
cache_sear_flips = {}
cache_certainty = {}
db = _get_db_connection(output_path=output_path, adapter=adapter, dataset=dataset)
write_model_info(output_path=output_path, adapter=adapter)
adapter.eval()
if 'question_bias_featurespace' in metrics or 'image_robustness_featurespace' in metrics:
min_img_feat_vals, max_img_feat_vals, img_feat_std = _get_img_feature_range(adapter, dataset, output_path)
if 'image_bias_featurespace' in metrics or 'question_robustness_featurespace' in metrics:
min_ques_feat_vals, max_ques_feat_vals, ques_feat_std = _get_question_feature_range(adapter, dataset, output_path)
if overwrite:
_write_class_answer_mapping(db, adapter, dataset)
_write_qid_question_mapping(db, adapter, dataset)
print("Calculating metrics...")
counter = 0
for sample_idx, sample in enumerate(tqdm(dataset)):
if sample_idx < start_sample:
continue # restart at specific index
top_3_probs, top_3_classes = adapter.forward([sample]).squeeze().topk(k=3, dim=-1, sorted=True)
original_pred_class = top_3_classes[0].item()
pred_answer_1_text = dataset.class_idx_to_answer(original_pred_class)
if 'accuracy' in metrics:
pred_answer_2_text = dataset.class_idx_to_answer(top_3_classes[1].item())
pred_answer_3_text = dataset.class_idx_to_answer(top_3_classes[2].item())
cache_accuracy[sample.question_id] = {
f'top_1_class': pred_answer_1_text, 'top_1_prob': top_3_probs[0].item(), 'top_1_accuracy': sample.answers[pred_answer_1_text] if pred_answer_1_text in sample.answers else 0.0,
f'top_2_class': pred_answer_2_text, 'top_2_prob': top_3_probs[1].item(), 'top_2_accuracy': sample.answers[pred_answer_2_text] if pred_answer_2_text in sample.answers else 0.0,
f'top_3_class': pred_answer_3_text, 'top_3_prob': top_3_probs[2].item(), 'top_3_accuracy': sample.answers[pred_answer_3_text] if pred_answer_3_text in sample.answers else 0.0,
}
if 'question_bias_featurespace' in metrics:
inputs = inputs_for_question_bias_featurespace(current_sample=sample, min_img_feat_val=min_img_feat_vals, max_img_feat_val=max_img_feat_vals, trials=trials)
preds = adapter.forward(inputs).cpu()
class_pred_counter, bias = eval_bias(dataset=dataset, original_class_prediction=pred_answer_1_text, predictions=preds)
cache_question_bias_featurespace[sample.question_id] = {'class_pred_counter': class_pred_counter, 'bias': bias}
del preds
if 'question_bias_imagespace' in metrics:
inputs = inputs_for_question_bias_imagespace(current_sample=sample, dataset=dataset, trials=trials)
preds = adapter.forward(inputs).cpu()
class_pred_counter, bias = eval_bias(dataset=dataset, original_class_prediction=pred_answer_1_text, predictions=preds)
cache_question_bias_imagespace[sample.question_id] = {'class_pred_counter': class_pred_counter, 'bias': bias}
del preds
if 'image_bias_featurespace' in metrics:
inputs = inputs_for_image_bias_featurespace(current_sample=sample, min_question_feat_val=min_ques_feat_vals, max_question_feat_val=max_ques_feat_vals, min_tokens=min_tokens, max_tokens=max_tokens, trials=trials)
preds = adapter.forward(inputs).cpu()
class_pred_counter, bias = eval_bias(dataset=dataset, original_class_prediction=pred_answer_1_text, predictions=preds)
cache_image_bias_featurespace[sample.question_id] = {'class_pred_counter': class_pred_counter, 'bias': bias}
del preds
if 'image_bias_wordspace' in metrics:
inputs = inputs_for_image_bias_wordspace(current_sample=sample, dataset=dataset, trials=trials)
preds = adapter.forward(inputs).cpu()
class_pred_counter, bias = eval_bias(dataset=dataset, original_class_prediction=pred_answer_1_text, predictions=preds)
cache_image_bias_wordspace[sample.question_id] = {'class_pred_counter': class_pred_counter, 'bias': bias}
del preds
if 'image_robustness_imagespace' in metrics:
inputs = inputs_for_image_robustness_imagespace(current_sample=sample, trials=trials//4, noise_types=['gaussian', 'poisson', 's&p', 'speckle'])
preds = adapter.forward(inputs).cpu()
class_pred_counter, robustness = eval_robustness(dataset=dataset, original_class_prediction=pred_answer_1_text, predictions=preds)
cache_image_robustness_imagespace[sample.question_id] = {'class_pred_counter': class_pred_counter, 'robustness': robustness}
del preds
if 'image_robustness_featurespace' in metrics:
inputs = inputs_for_image_robustness_featurespace(current_sample=sample, std=img_feat_std, trials=trials)
preds = adapter.forward(inputs).cpu()
class_pred_counter, robustness = eval_robustness(dataset=dataset, original_class_prediction=pred_answer_1_text, predictions=preds)
cache_image_robustness_featurespace[sample.question_id] = {'class_pred_counter': class_pred_counter, 'robustness': robustness}
del preds
if 'question_robustness_featurespace' in metrics:
inputs = inputs_for_question_robustness_featurespace(current_sample=sample, std=ques_feat_std, adapter=adapter, trials=trials)
preds = adapter.forward(inputs).cpu()
class_pred_counter, robustness = eval_robustness(dataset=dataset, original_class_prediction=pred_answer_1_text, predictions=preds)
cache_question_robustness_featurespace[sample.question_id] = {'class_pred_counter': class_pred_counter, 'robustness': robustness}
del preds
if 'sears' in metrics:
inputs = inputs_for_question_sears(current_sample=sample)
sear_1_preds = adapter.forward([inputs[0]]).cpu() if inputs[0] else None
sear_2_preds = adapter.forward([inputs[1]]).cpu() if inputs[1] else None
sear_3_preds = adapter.forward([inputs[2]]).cpu() if inputs[2] else None
sear_4_preds = adapter.forward([inputs[3]]).cpu() if inputs[3] else None
cache_sear_flips[sample.question_id] = eval_sears(dataset=dataset,
sear_inputs=inputs,
sear_predictions=(sear_1_preds, sear_2_preds, sear_3_preds, sear_4_preds),
original_class_prediction=pred_answer_1_text)
del sear_1_preds
del sear_2_preds
del sear_3_preds
del sear_4_preds
if 'uncertainty' in metrics:
class_pred_counter, certainty_scores, entropy = certainty(dataset=dataset, adapter=adapter, sample=sample, trials=trials) # batch=1, batch=1
cache_certainty[sample.question_id] = {'class_pred_counter': class_pred_counter, 'class_certainty_scores': certainty_scores, 'entropy': entropy}
counter += 1
if max_samples >= 0 and counter == max_samples:
break
print("Writing metrics to DB...")
for metric in tqdm(metrics):
if metric == 'accuracy':
_write_table(db, metric, cache_accuracy, overwrite)
elif metric == 'question_bias_featurespace':
_write_table(db, metric, cache_question_bias_featurespace, overwrite)
elif metric == 'question_bias_imagespace':
_write_table(db, metric, cache_question_bias_imagespace, overwrite)
elif metric == 'image_bias_featurespace':
_write_table(db, metric, cache_image_bias_featurespace, overwrite)
elif metric == 'image_bias_wordspace':
_write_table(db, metric, cache_image_bias_wordspace, overwrite)
elif metric == 'image_robustness_imagespace':
_write_table(db, metric, cache_image_robustness_imagespace, overwrite)
elif metric == 'image_robustness_featurespace':
_write_table(db, metric, cache_image_robustness_featurespace, overwrite)
elif metric == 'question_robustness_featurespace':
_write_table(db, metric, cache_question_robustness_featurespace, overwrite)
elif metric == 'sears':
_write_table(db, metric, cache_sear_flips, overwrite)
elif metric == 'uncertainty':
_write_table(db, metric, cache_certainty, overwrite)
else:
raise Exception('Unknown metric', metric)
db.commit()
db.close()
| 53.685096 | 240 | 0.680697 | 0 | 0 | 0 | 0 | 9,809 | 0.439216 | 0 | 0 | 5,422 | 0.24278 |
7d046d7646f874b0a9f0b4e92ebdd10a5f1eb202 | 268 | py | Python | hospitals/urls.py | gilga98/ahalya | 1c50ae3ffaf48db5b1970567028117991451d62b | [
"MIT"
] | 4 | 2020-07-18T18:09:32.000Z | 2021-05-01T02:12:40.000Z | hospitals/urls.py | gilga98/ahalya | 1c50ae3ffaf48db5b1970567028117991451d62b | [
"MIT"
] | 5 | 2021-03-30T13:56:57.000Z | 2021-09-22T19:27:22.000Z | hospitals/urls.py | gilga98/ahalya | 1c50ae3ffaf48db5b1970567028117991451d62b | [
"MIT"
] | 1 | 2020-11-15T05:08:21.000Z | 2020-11-15T05:08:21.000Z | from django.urls import path
from . import views
app_name = "hospitals"
urlpatterns = [
path("hospitalList", views.HospitalDetailedList.as_view(), name="hospital_list"),
path("hospitalDetail", views.HospitalDetailedSingle.as_view(), name="hospital_read"),
] | 26.8 | 89 | 0.75 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 71 | 0.264925 |
7d0477dd2b810ce50a53066c223439d0ca4dbaec | 335 | py | Python | miniteste7/divisoresmaior/divisoresmaior.py | Davvi-Duarte/prog1 | b22e271d3a0226ebb2cabe211d4c8e243cc93f1c | [
"Apache-2.0"
] | null | null | null | miniteste7/divisoresmaior/divisoresmaior.py | Davvi-Duarte/prog1 | b22e271d3a0226ebb2cabe211d4c8e243cc93f1c | [
"Apache-2.0"
] | null | null | null | miniteste7/divisoresmaior/divisoresmaior.py | Davvi-Duarte/prog1 | b22e271d3a0226ebb2cabe211d4c8e243cc93f1c | [
"Apache-2.0"
] | null | null | null | def maior_numero(lista):
a = lista[0]
for i in range(len(lista)):
if lista[i] > a:
a = lista[i]
return a
def remove_divisores_do_maior(lista):
maiornumero=maior_numero(lista)
for i in range(len(lista)-1,-1,-1):
if (maiornumero%lista[i])==0:
lista.pop(i)
return None
| 23.928571 | 39 | 0.570149 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7d053dd93a0611a4a0e532ea4d23bee17151532c | 1,613 | py | Python | RBSP/Pos/_ReadSPDF.py | mattkjames7/RBSP | 4827fc1fa3203463cdf994c1c979deec60fe1122 | [
"MIT"
] | null | null | null | RBSP/Pos/_ReadSPDF.py | mattkjames7/RBSP | 4827fc1fa3203463cdf994c1c979deec60fe1122 | [
"MIT"
] | null | null | null | RBSP/Pos/_ReadSPDF.py | mattkjames7/RBSP | 4827fc1fa3203463cdf994c1c979deec60fe1122 | [
"MIT"
] | null | null | null | from .. import Globals
import PyFileIO as pf
import os
import numpy as np
def _ReadSPDF(sc='a'):
'''
Reads the SPDF SSCWeb text files (scraped from their HTML output).
Input:
sc: Spacecraft 'a' or 'b'
Returns:
numpy.recarray
'''
#set up dtype to load
dtype = [('Year','int32'),('DOY','int32'),('ut','U9'),('Xgeo','float32'),('Ygeo','float32'),('Zgeo','float32'),('Latgeo','float32'),('Longeo','float32'),('LTgeo','U9'),
('Xgm','float32'),('Ygm','float32'),('Zgm','float32'),('Latgm','float32'),('Longm','float32'),('LTgm','U9'),
('Xgse','float32'),('Ygse','float32'),('Zgse','float32'),('Latgse','float32'),('Longse','float32'),('LTgse','U9'),
('Xgsm','float32'),('Ygsm','float32'),('Zgsm','float32'),('Latgsm','float32'),('Longsm','float32'),
('Xsm','float32'),('Ysm','float32'),('Zsm','float32'),('Latsm','float32'),('Lonsm','float32'),('LTsm','U9'),('L','float32')]
#find the file
fname = Globals.DataPath + 'SPDF/rbsp'+sc+'.dat'
if not os.path.isfile(fname):
print('SPDF data for spacecraft "{:s}" not found'.format(sc))
return np.recarray(0,dtype=dtype)
#data = pf.ReadASCIIData(fname,False,57,dtype=dtype)
#read the file manually
print('Reading file')
f = open(fname,'r')
lines = f.readlines()
f.close()
print('Creating output array')
skip = 57
nl = len(lines)
n = nl - skip
data = np.recarray(n,dtype=dtype)
nc = 33
for i in range(0,n):
print('\rCopying data into array {:6.2f}%'.format(100.0*(i+1)/n),end='')
s = lines[i+skip].split()
for j in range(0,nc):
data[dtype[j][0]][i] = np.array(s[j]).astype(dtype[j][1])
print()
return data
| 31.019231 | 169 | 0.610663 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 869 | 0.538748 |
7d06085df3fe41964096c1793c9b3fba869163aa | 273 | py | Python | setup.py | klaasb/python-dmenuwrap | ffc592da00d1c53200e4b391a81c423afc06c592 | [
"BSD-2-Clause"
] | 1 | 2019-06-28T17:47:43.000Z | 2019-06-28T17:47:43.000Z | setup.py | klaasb/python-dmenuwrap | ffc592da00d1c53200e4b391a81c423afc06c592 | [
"BSD-2-Clause"
] | null | null | null | setup.py | klaasb/python-dmenuwrap | ffc592da00d1c53200e4b391a81c423afc06c592 | [
"BSD-2-Clause"
] | null | null | null | from distutils.core import setup
setup(
name='python-dmenuwrap',
author='Klaas Boesche',
author_email='klaas-dev@boesche.me',
url='https://github.com/KaGeBe/python-dmenuwrap',
version='0.1.0',
license='BSD 2-clause',
py_modules=['dmenuwrap']
)
| 22.75 | 53 | 0.67033 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 131 | 0.479853 |
7d0655be98836fde86e8a90e510ed3097683aff2 | 967 | py | Python | crits/comments/urls.py | dutrow/crits | 6b357daa5c3060cf622d3a3b0c7b41a9ca69c049 | [
"MIT"
] | 738 | 2015-01-02T12:39:55.000Z | 2022-03-23T11:05:51.000Z | crits/comments/urls.py | dutrow/crits | 6b357daa5c3060cf622d3a3b0c7b41a9ca69c049 | [
"MIT"
] | 605 | 2015-01-01T01:03:39.000Z | 2021-11-17T18:51:07.000Z | crits/comments/urls.py | dutrow/crits | 6b357daa5c3060cf622d3a3b0c7b41a9ca69c049 | [
"MIT"
] | 316 | 2015-01-07T12:35:01.000Z | 2022-03-30T04:44:30.000Z | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^remove/(?P<obj_type>\S+)/(?P<obj_id>\S+)/$', views.remove_comment, name='crits-comments-views-remove_comment'),
url(r'^(?P<method>\S+)/(?P<obj_type>\S+)/(?P<obj_id>\S+)/$', views.add_update_comment, name='crits-comments-views-add_update_comment'),
url(r'^activity/$', views.activity, name='crits-comments-views-activity'),
url(r'^activity/(?P<atype>\S+)/(?P<value>\S+)/$', views.activity, name='crits-comments-views-activity'),
url(r'^activity/get_new_comments/$', views.get_new_comments, name='crits-comments-views-get_new_comments'),
url(r'^search/(?P<stype>[A-Za-z0-9\-\._]+)/(?P<sterm>.+?)/$', views.comment_search, name='crits-comments-views-comment_search'),
url(r'^list/$', views.comments_listing, name='crits-comments-views-comments_listing'),
url(r'^list/(?P<option>\S+)/$', views.comments_listing, name='crits-comments-views-comments_listing'),
]
| 64.466667 | 139 | 0.688728 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 576 | 0.595657 |
7d088e8ad48a298f700c49b458a6cd8398f17041 | 19,511 | py | Python | FortressOfSolitude/_FortressOfSolitude/NeutrinoKey/models.py | BDD16/FortressOfSolitude | 51070d3ffa78262d823ae8ccce4f8ae3c7ed83ac | [
"MIT"
] | null | null | null | FortressOfSolitude/_FortressOfSolitude/NeutrinoKey/models.py | BDD16/FortressOfSolitude | 51070d3ffa78262d823ae8ccce4f8ae3c7ed83ac | [
"MIT"
] | 6 | 2021-07-26T14:07:30.000Z | 2022-01-09T01:06:40.000Z | FortressOfSolitude/_FortressOfSolitude/NeutrinoKey/models.py | BDD16/FortressOfSolitude | 51070d3ffa78262d823ae8ccce4f8ae3c7ed83ac | [
"MIT"
] | null | null | null | """
DBA 1337_TECH, AUSTIN TEXAS © MAY 2020
Proof of Concept code, No liabilities or warranties expressed or implied.
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from datetime import datetime
from .cryptoutils import CryptoTools
from base64 import b64encode, b64decode
from django.contrib.auth import get_user_model
from random import random
# Create your models here.
# Constants
LENGTH_OF_KEK = 32 # 256 bits or 32 bytes
LENGTH_OF_DEK = 32 # 256 bits or 32 bytes
LENGTH_OF_SALT = 32 # 256 bits or 32 bytes
'''
KeyMold is a models.Manager clas extension that includes creating a Kek and retrieving a kek
no inputs
'''
class KeyMold(models.Manager):
def _create_kek(request, **kwargs):
pwd = request.user.password
# print("deriving kek")
self.kek = DeriveKek_default(pwd)
return self.kek
def get_queryset(self):
qs = models.QuerySet(self.model)
if self._db is not None:
qs = qs.using('default')
return qs
'''
TelescopeCoord is a models.Manager that allows to find the neutron star that will be used for the keyMold to make a Key Encryption Key [kek].
no inputs
'''
class TelescopeCoord(models.Manager):
def get_queryset(self):
qs = models.QuerySet(self.model)
if self._db is not None:
qs = qs.using('default')
return qs
'''
QuasiPlasma is a models.Manager that allows for deriving Data Encryption Keys [DEKs] and retrieving deks from the neutron stars plasma.
no inputs
'''
class QuasiPlasma(models.Manager):
def _create_dek(request, **kwargs):
pwd = request.user.password
self.dek = DeriveDek_default(pwd)
return self.dek
def get_queryset(self):
qs = models.QuerySet(self.model)
if self._db is not None:
qs = qs.using('default')
return qs
'''
KEK is the Key encryption Key [KEK] model.Model class extension that has the ability to derive a new KEK as well as wrap the KEK.
no inputs
'''
class KEK(models.Model):
# Never should the key be passed as clear text always use the wrap or unwrap functions
crypto = CryptoTools()
kek = None
wrappedKek = None
result_wrapped_nonce = models.CharField(max_length=128, default=b64encode(int(55).to_bytes(4, 'big')))
result_wrapped_kek = models.CharField(max_length=128, default=None)
objects = TelescopeCoord()
class Meta:
verbose_name = 'KEK'
def unwrap_key(self, password):
if isinstance(password, str) and self.kek == None and self.wrappedKek == None:
self.crypto.nonce = b64decode(self.result_wrapped_nonce)
self.kek = self.crypto.AesDecryptEAX(b64decode(self.result_wrapped_kek),
self.crypto.Sha256(password.encode()))
if isinstance(password, bytes) and self.kek == None and self.wrappedKek == None:
if isinstance(self.result_wrapped_nonce, str):
result_wrapped_nonce = (self.result_wrapped_nonce.encode()).replace(b"b'", b'')
result_wrapped_nonce = result_wrapped_nonce[:-1]
result_wrapped_nonce = result_wrapped_nonce + b'=' * (len(self.result_wrapped_nonce) % 4)
self.crypto.nonce = b64decode(result_wrapped_nonce)
else:
self.crypto.nonce = b64decode(self.result_wrapped_nonce)
# print("wrappedKek: " + self.result_wrapped_kek)
if isinstance(self.result_wrapped_kek, str):
result_wrapped_kek = (self.result_wrapped_kek.encode()).replace(b"b'", b'')
result_wrapped_kek = result_wrapped_kek[:-1]
result_wrapped_kek = result_wrapped_kek + b'=' * (len(result_wrapped_kek) % 4)
elif isinstance(self.result_wrapped_kek, bytes):
result_wrapped_kek = self.result_wrapped_kek
self.kek = self.crypto.AesDecryptEAX(b64decode(result_wrapped_kek), CryptoTools().Sha256(password))
else:
try:
self.crypto.nonce = b64decode(self.result_wrapped_nonce)
if not isinstance(password, bytes):
password = password.encode()
self.kek = self.crypto.AesDecryptEAX(b64decode(self.result_wrapped_kek), self.crypto.Sha256(password))
self.wrappedKek = None
except:
print('someone has attempted to spoof the KEK (key encryption key)')
return self.kek
def wrap_key(self, password):
if isinstance(password, str) and self.kek == None:
self.kek = self.crypto.AesEncryptEAX(data, self.crypto.Sha256(password.encode()))
self.wrappedKek = self.kek
self.kek = None
elif isinstance(password, bytes) and self.kek == None:
self.kek = self.crypto.AesEncryptEAX(data, self.crypto.Sha256(password))
self.wrappedKek = b64encode(self.kek)
self.kek = None
elif self.kek != None:
try:
# print("ATTEMPTING WRAPPING KEK")
self.crypto.nonce = b64decode(self.result_wrapped_nonce)
# print("set nonce")
if isinstance(password, bytes):
self.wrappedKek = b64encode(self.crypto.AesEncryptEAX(self.kek, self.crypto.Sha256(password)))
else:
self.wrappedKek = b64encode(
self.crypto.AesEncryptEAX(self.kek, self.crypto.Sha256(password.encode())))
self.kek = None
except OSError as ERROR:
print(ERROR)
print('Wrapping KEK (key encryption key) was unsuccessful')
return self.wrappedKek
'''
using the model of KEK unwrap and wrap the kek then unwrap the dek then pass the dek to a more useable object
perhaps this will also fetch the dek that is associated with that data model, so needs to be a manytomany relation.
DEK is a models.Model or Data Encryption Key class that allows to store, derive, and wrap Data Encryption Keys from a KEK and Salt
'''
class DEK(models.Model):
crypto = CryptoTools()
dek = None
wrappedDek = None
SALT = None
result_wrapped_nonce = models.CharField(max_length=128, default=b64encode(int(55).to_bytes(4, 'big')))
result_wrappedDek = models.CharField(max_length=128)
result_SALT = models.CharField(max_length=45)
kek_to_retrieve = models.ManyToManyField(KEK)
objects = KeyMold()
class Meta:
verbose_name = 'DEK'
def wrap_key(self, kek, password):
if isinstance(kek, KEK) and isinstance(password, str):
kek.unwrap_key(password)
self.crypto.nonce = b64decode(kek.result_wrapped_nonce)
# print(self.result_wrappedDek)
self.dek = self.crypto.AesEncryptEAX(b64decode(self.result_wrappedDek), kek.kek)
kek.wrap_key(password)
return self.dek
elif isinstance(kek, KEK) and isinstance(password, bytes):
kek.unwrap_key(password)
self.crypto.nonce = b64decode(kek.result_wrapped_nonce)
self.dek = self.crypto.AesEncryptEAX(self.result_wrappedDek, kek.kek)
kek.wrap_key(password)
return self.dek
else:
try:
kek.unwrap_key(password)
self.crypto.nonce = b64decode(kek.result_wrapped_nonce)
self.dek = self.crypto.AesEncryptEAX(self.result_wrappedDek, self.crypto.Sha256(kek.kek))
kek.wrap_key(password)
return self.dek
except:
print('someone has attempted to spoof the DEK (data encryption key)')
def unwrap_key(self, kek, password):
if isinstance(kek, KEK) and isinstance(password, str):
master = kek.unwrap_key(password.encode())
self.crypto.nonce = b64decode(self.result_wrapped_nonce)
self.dek = self.crypto.AesDecryptEAX(b64decode(self.result_wrappedDek), self.crypto.Sha256(master))
kek.wrap_key(password)
return self.dek
elif isinstance(kek, KEK) and isinstance(password, bytes):
kek.unwrap_key(password)
if isinstance(self.result_wrapped_nonce, str):
print("NONCEDEK_STR:" + self.result_wrapped_nonce)
result_wrapped_nonce = (self.result_wrapped_nonce.encode()).replace(b"b'", b'')
result_wrapped_nonce = result_wrapped_nonce[:-1]
result_wrapped_nonce = result_wrapped_nonce + b'=' * (len(result_wrapped_nonce) % 4)
self.crypto.nonce = b64decode(result_wrapped_nonce)
print(b'NONCEDEK>' + result_wrapped_nonce)
elif isinstance(self.result_wrapped_nonce, bytes):
print("YOLO")
self.crypto.nonce = b64decode(self.result_wrapped_nonce)
if (not isinstance(self.result_wrappedDek, bytes)):
print("did we make it here" + str(self.result_wrappedDek))
result_wrappedDek = (self.result_wrappedDek.encode()).replace(b"b'", b'')
result_wrappedDek = result_wrappedDek[:-1]
print("did we make it here" + str(result_wrappedDek))
wrapper = result_wrappedDek + b'=' * (len(result_wrappedDek) % 4)
print("wrapper" + str(wrapper))
else:
print(self.result_wrappedDek)
result_wrappedDek = self.result_wrappedDek.replace(b"b'", b'')
result_wrappedDek = result_wrappedDek
wrapper = result_wrappedDek + b'=' * (len(result_wrappedDek) % 4)
cryptoObj = CryptoTools()
print(wrapper)
self.dek = self.crypto.AesDecryptEAX(b64decode(wrapper), cryptoObj.Sha256(kek.kek))
kek.wrap_key(password)
return self.dek
else:
try:
if not isinstance(password, bytes):
password = password.encode()
else:
password = password
kek.unwrap_key(password)
self.crypto.nonce = b64decode(self.result_wrapped_nonce)
# print("about to decrypt dek")
self.dek = self.crypto.AesDecryptEAX(b64decode(self.result_wrappedDek), self.crypto.Sha256(kek.kek))
kek.wrap_key(password)
return self.dek
except:
print('someone has attempted to spoof the KEK2 (key encryption key)')
'''
function to DeriveKek_default from an arbitrary password
'''
def DeriveKek_default(password):
crypto = CryptoTools()
if len(crypto.Sha256(password.encode())) != LENGTH_OF_KEK:
print('ERROR> NOT ENOUGH BYTES IN PASSWORD FOR DEK, NEED 32')
if isinstance(password, str):
somekek = crypto.Sha256(bytes(password.encode()))
somekek = crypto.AesEncryptEAX(password.encode(), somekek)
k = KEK(result_wrapped_kek=b64encode(somekek))
k.save()
return k
elif isinstance(password, bytes):
somekek = crypto.Sha256(bytes(password.encode()))
somekek = crypto.AesEncryptEAX(password.encode(), somekek)
k = KEK(result_wrapped_kek=b64encode(somekek), result_wrapped_nonce=crypto.nonce)
k.save()
return k
else:
print("ERROR>UNABLE TO GENERATE WRAPPED KEK, USE A CORRECT KEY FORMAT FOR WRAPPING")
'''
NeutronCore is a models.Model type class that allow for KEKs to be generated through a kek generator, time_generated, and of course the kek object
this is the model for when you need access to multiple KEKS for a single user
USE CASE: is old data relies on older KEKs but that older KEK is still active
but the user happened to change their password which would entail creating a new password and from that time the DEK chain would change to the newly
created KEK wrapped using the newly changed password.
'''
class NeutronCore(models.Model):
kek = models.ForeignKey(
get_user_model(), related_name='KEK',
on_delete=models.CASCADE,
default=1)
kekgenerator = models.ManyToManyField(KEK, related_name='KEK')
time_generated = models.DateTimeField('date star collapsed', auto_now_add=True)
objects = KeyMold()
class Meta:
verbose_name = 'neutron core'
ordering = ['-time_generated']
get_latest_by = 'time_generated'
def DeriveKek(self, password):
crypto = CryptoTools()
if len(crypto.Sha256(password.encode())) != LENGTH_OF_KEK:
print('ERROR> NOT ENOUGH BYTES IN PASSWORD FOR DEK, NEED 32')
if isinstance(password, str):
somekek = crypto.Sha256(bytes(password.encode()))
somekek = crypto.AesEncryptEAX(password.encode(), somekek)
k = KEK(result_wrapped_kek=b64encode(somekek), result_wrapped_nonce=b64encode(crypto.nonce))
k.save()
return k
elif isinstance(password, bytes):
somekek = crypto.Sha256(bytes(password.encode()))
somekek = crypto.AesEncryptEAX(password.encode(), somekek)
k = KEK(result_wrapped_kek=b64encode(somekek), result_wrapped_nonce=b64encode(crypto.nonce))
k.save()
return k
else:
print("ERROR>UNABLE TO GENERATE WRAPPED KEK, USE A CORRECT KEY FORMAT FOR WRAPPING")
def DeriveDek_default(password):
crypto = CryptoTools()
kekForDek = NeutronCore(get_user_model()).DeriveKek(password)
if isinstance(kekForDek, KEK):
if password != None and isinstance(password, str):
# Generate DEK based off this formula sha256(256 bit SALT + KEK)
self.SALT = crypto.RandomNumber(32)
crypto.nonce = b64decode(kekForDek.result_wrapped_nonce)
DerivedDek = crypto.Sha256(bytes(kekForDek.result_SALT) + crypto.AesDecryptEAX(
bytes(b64decode(str(kekForDek.result_wrapped_kek).encode())),
crypto.Sha256(bytes(password.encode()))))
dekgenerator = DerivedDek
dek = DerivedDek
dek = DEK.wrap_key(dek, password)
newDek = DEK(result_wrappedDek=b64encode(dek), result_SALT=kekForDek.result_SALT,
kek_to_retrieve=kekForDek, result_wrapped_nonce=b64encode(crypto.nonce))
newDek.save()
return newDek
'''
NeutronMatterCollector is for generating a Data Encryption Key [DEK]
no inputs
'''
class NeutronMatterCollector(models.Model):
dekgenerator = models.ManyToManyField(DEK,
related_name='kek_for_dek_generator') # length of 32 bytes (256bits) in base64 is 44, but will need to include an = ending and null so extending to 45.
try:
# print(get_user_model().user)
kekForDek = models.ForeignKey(
KEK, related_name='KEK_obj',
on_delete=models.CASCADE, default=1)
dek = models.ForeignKey(
DEK, related_name='DEK_obj',
on_delete=models.CASCADE,
default=1)
except:
try:
print("unable to locate KEK for username creating new one, this could be due to a new user")
kekForDek = models.ForeignKey(KEK, related_name='KEK_obj',
on_delete=models.CASCADE, default=1)
dek = models.ForeignKey(DEK, related_name='DEK_obj', on_delete=models.CASCADE, default=1)
print("successfully made a KEK and DEK")
except:
print("unable to create KEK")
print(get_user_model().natural_key(get_user_model()))
time_generated = models.DateTimeField('date integrated', auto_now_add=datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
objects = QuasiPlasma()
class Meta:
verbose_name = 'neutron matter collector'
ordering = ['-time_generated']
get_latest_by = 'time_generated'
def DeriveDek(self, password):
crypto = CryptoTools()
if isinstance(NeutronMatterCollector.kekForDek, KEK):
if password != None and isinstance(password, str):
# Generate DEK based off this formula sha256(256 bit SALT + KEK)
self.SALT = crypto.RandomNumber(32)
crypto.nonce = b64decode(NeutronMatterCollector.kekForDek.result_wrapped_nonce)
DerivedDek = crypto.Sha256(bytes(self.SALT) + crypto.AesDecryptEAX(
bytes(b64decode(str(self.kekForDek.result_wrapped_kek).encode())),
crypto.Sha256(bytes(password.encode()))))
self.dekgenerator = DerivedDek
dek = DerivedDek
dek = DEK.wrap_key(dek, password)
newDek = DEK(result_wrappedDek=b64encode(dek), result_SALT=b64encode(self.SALT),
kek_to_retrieve=self.dekgenerator)
newDek.save()
return newDek
else:
self.kekForDek = NeutronCore(get_user_model()).DeriveKek(password)
if isinstance(self.kekForDek, KEK):
if password != None and isinstance(password, str):
# Generate DEK based off this formula sha256(256 bit SALT + KEK)
self.SALT = crypto.RandomNumber(32)
crypto.nonce = b64decode(self.kekForDek.result_wrapped_nonce)
# print(self.kekForDek.result_wrapped_nonce)
# print(self.kekForDek.result_wrapped_kek)
# print(password)
DerivedDek = crypto.Sha256(
bytes(self.SALT) + crypto.AesDecryptEAX(b64decode(self.kekForDek.result_wrapped_kek),
crypto.Sha256(bytes(password.encode()))))
# self.dekgenerator.id.set(self.request.user)
dek = DerivedDek
# newkey = DEK()
# newkey.dek = dek
# dek = DEK.wrap_key(newkey, kek=self.kekForDek, password=password.encode())
dek = crypto.AesEncryptEAX(dek, crypto.Sha256(
crypto.AesDecryptEAX(b64decode(self.kekForDek.result_wrapped_kek),
crypto.Sha256(bytes(password.encode())))))
newDek = DEK(result_wrappedDek=b64encode(dek), result_SALT=b64encode(self.SALT),
result_wrapped_nonce=b64encode(crypto.nonce), id=self.id)
# newDek.kek_to_retrieve.set(self.dekgenerator)
# self.time_generated = models.DateTimeField('date integrated', auto_now_add=datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
self.save()
newDek.save()
self.dekgenerator.add(newDek)
self.save()
return newDek
class KryptonianSpeak:
def db_for_read(self, model, **hints):
return 'default'
def db_for_write(self, model, **hints):
return 'default'
def allow_relation(self, obj1, obj2, **hints):
return True
'''
db_list = ('default', 'superHeros', 'icePick', 'neutronStarMatter', 'neutronStarMold')
if obj1._state.db in db_list and obj2._state.db in db_list:
return True
return None
'''
def allow_migrate(self, db, app_label, model_name=None, **hints):
return True
| 39.576065 | 194 | 0.621906 | 15,459 | 0.792282 | 0 | 0 | 0 | 0 | 0 | 0 | 4,183 | 0.214381 |
7d08e0455217002dd24efe02680fa2c013e09769 | 9,946 | py | Python | tests/test_bitshares.py | silverchen0402/python-bitshares | aafbcf5cd09e7bca99dd156fd60b9df8ba508630 | [
"MIT"
] | 102 | 2018-04-08T23:05:00.000Z | 2022-03-31T10:10:03.000Z | tests/test_bitshares.py | silverchen0402/python-bitshares | aafbcf5cd09e7bca99dd156fd60b9df8ba508630 | [
"MIT"
] | 246 | 2018-04-03T12:35:49.000Z | 2022-02-28T10:44:28.000Z | tests/test_bitshares.py | silverchen0402/python-bitshares | aafbcf5cd09e7bca99dd156fd60b9df8ba508630 | [
"MIT"
] | 128 | 2018-04-14T01:39:12.000Z | 2022-03-25T08:56:51.000Z | # -*- coding: utf-8 -*-
import mock
import string
import unittest
import random
from pprint import pprint
from bitshares import BitShares
from bitshares.account import Account
from bitsharesbase.operationids import getOperationNameForId
from bitshares.amount import Amount
from bitsharesbase.account import PrivateKey
from bitsharesbase.asset_permissions import todict
from bitshares.instance import set_shared_bitshares_instance
from .fixtures import fixture_data, bitshares
class Testcases(unittest.TestCase):
def setUp(self):
fixture_data()
def test_connect(self):
bitshares.connect()
def test_set_default_account(self):
bitshares.set_default_account("init0")
def test_info(self):
info = bitshares.info()
for key in [
"current_witness",
"head_block_id",
"head_block_number",
"id",
"last_irreversible_block_num",
"next_maintenance_time",
"recently_missed_count",
"time",
]:
self.assertTrue(key in info)
def test_finalizeOps(self):
tx1 = bitshares.new_tx()
tx2 = bitshares.new_tx()
bitshares.transfer("init1", 1, "BTS", append_to=tx1)
bitshares.transfer("init1", 2, "BTS", append_to=tx2)
bitshares.transfer("init1", 3, "BTS", append_to=tx1)
tx1 = tx1.json()
tx2 = tx2.json()
ops1 = tx1["operations"]
ops2 = tx2["operations"]
self.assertEqual(len(ops1), 2)
self.assertEqual(len(ops2), 1)
def test_transfer(self):
tx = bitshares.transfer("1.2.101", 1.33, "BTS", memo="Foobar", account="init0")
self.assertEqual(getOperationNameForId(tx["operations"][0][0]), "transfer")
op = tx["operations"][0][1]
self.assertIn("memo", op)
self.assertEqual(op["from"], "1.2.100")
self.assertEqual(op["to"], "1.2.101")
amount = Amount(op["amount"])
self.assertEqual(float(amount), 1.33)
def test_create_account(self):
name = "".join(random.choice(string.ascii_lowercase) for _ in range(12))
key1 = PrivateKey()
key2 = PrivateKey()
key3 = PrivateKey()
key4 = PrivateKey()
tx = bitshares.create_account(
name,
registrar="init0", # 1.2.100
referrer="init1", # 1.2.101
referrer_percent=33,
owner_key=format(key1.pubkey, "BTS"),
active_key=format(key2.pubkey, "BTS"),
memo_key=format(key3.pubkey, "BTS"),
additional_owner_keys=[format(key4.pubkey, "BTS")],
additional_active_keys=[format(key4.pubkey, "BTS")],
additional_owner_accounts=["committee-account"], # 1.2.0
additional_active_accounts=["committee-account"],
proxy_account="init0",
storekeys=False,
)
self.assertEqual(
getOperationNameForId(tx["operations"][0][0]), "account_create"
)
op = tx["operations"][0][1]
role = "active"
self.assertIn(format(key4.pubkey, "BTS"), [x[0] for x in op[role]["key_auths"]])
self.assertIn(format(key4.pubkey, "BTS"), [x[0] for x in op[role]["key_auths"]])
self.assertIn("1.2.0", [x[0] for x in op[role]["account_auths"]])
role = "owner"
self.assertIn(format(key4.pubkey, "BTS"), [x[0] for x in op[role]["key_auths"]])
self.assertIn(format(key4.pubkey, "BTS"), [x[0] for x in op[role]["key_auths"]])
self.assertIn("1.2.0", [x[0] for x in op[role]["account_auths"]])
self.assertEqual(op["options"]["voting_account"], "1.2.100")
self.assertEqual(op["registrar"], "1.2.100")
self.assertEqual(op["referrer"], "1.2.101")
self.assertEqual(op["referrer_percent"], 33 * 100)
def test_create_asset(self):
symbol = "FOOBAR"
precision = 7
max_supply = 100000
description = "Test asset"
is_bitasset = True
market_fee_percent = 0.1
max_market_fee = 10
blacklist_authorities = ["init1"]
blacklist_authorities_ids = [Account(a)["id"] for a in blacklist_authorities]
blacklist_markets = ["BTS"]
blacklist_markets_ids = ["1.3.0"]
permissions = {
"charge_market_fee": True,
"white_list": True,
"override_authority": True,
"transfer_restricted": True,
"disable_force_settle": True,
"global_settle": True,
"disable_confidential": True,
"witness_fed_asset": True,
"committee_fed_asset": True,
}
flags = {
"charge_market_fee": False,
"white_list": False,
"override_authority": False,
"transfer_restricted": False,
"disable_force_settle": False,
"global_settle": False,
"disable_confidential": False,
"witness_fed_asset": False,
"committee_fed_asset": False,
}
tx = bitshares.create_asset(
symbol,
precision,
max_supply,
market_fee_percent=market_fee_percent,
max_market_fee=max_market_fee,
description=description,
is_bitasset=is_bitasset,
blacklist_authorities=blacklist_authorities,
blacklist_markets=blacklist_markets,
permissions=permissions,
flags=flags,
)
self.assertEqual(getOperationNameForId(tx["operations"][0][0]), "asset_create")
op = tx["operations"][0][1]
self.assertEqual(op["issuer"], "1.2.100")
self.assertEqual(op["symbol"], symbol)
self.assertEqual(op["precision"], precision)
self.assertEqual(
op["common_options"]["max_supply"], int(max_supply * 10 ** precision)
)
self.assertEqual(
op["common_options"]["market_fee_percent"], int(market_fee_percent * 100)
)
self.assertEqual(
op["common_options"]["max_market_fee"],
int(max_market_fee * 10 ** precision),
)
self.assertEqual(op["common_options"]["description"], description)
self.assertEqual(
op["common_options"]["blacklist_authorities"], blacklist_authorities_ids
)
self.assertEqual(
op["common_options"]["blacklist_markets"], blacklist_markets_ids
)
self.assertEqual(
todict(op["common_options"]["issuer_permissions"]), permissions
)
self.assertEqual(todict(op["common_options"]["flags"]), flags)
def test_weight_threshold(self):
auth = {
"account_auths": [["1.2.0", "1"]],
"extensions": [],
"key_auths": [
["BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n", 1],
["BTS7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv", 1],
],
"weight_threshold": 3,
} # threshold fine
bitshares._test_weights_treshold(auth)
auth = {
"account_auths": [["1.2.0", "1"]],
"extensions": [],
"key_auths": [
["BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n", 1],
["BTS7GM9YXcsoAJAgKbqW2oVj7bnNXFNL4pk9NugqKWPmuhoEDbkDv", 1],
],
"weight_threshold": 4,
} # too high
with self.assertRaises(ValueError):
bitshares._test_weights_treshold(auth)
def test_allow(self):
tx = bitshares.allow(
"BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n",
weight=1,
threshold=1,
permission="owner",
)
self.assertEqual(
getOperationNameForId(tx["operations"][0][0]), "account_update"
)
op = tx["operations"][0][1]
self.assertIn("owner", op)
self.assertIn(
["BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n", "1"],
op["owner"]["key_auths"],
)
self.assertEqual(op["owner"]["weight_threshold"], 1)
def test_disallow(self):
with self.assertRaisesRegex(ValueError, ".*Changes nothing.*"):
bitshares.disallow(
"BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n",
weight=1,
threshold=1,
permission="owner",
)
with self.assertRaisesRegex(ValueError, "Cannot have threshold of 0"):
bitshares.disallow(
"BTS6MRyAjQq8ud7hVNYcfnVPJqcVpscN5So8BhtHuGYqET5GDW5CV",
weight=1,
threshold=1,
permission="owner",
)
bitshares.disallow(
"BTS5i8bEmtnN4fP4jAsBe17z9CCuQcHLkRyTuRZXYZeN2kVCL1sXa",
weight=1,
threshold=1,
permission="active",
)
def test_update_memo_key(self):
tx = bitshares.update_memo_key(
"BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n"
)
self.assertEqual(
getOperationNameForId(tx["operations"][0][0]), "account_update"
)
op = tx["operations"][0][1]
self.assertEqual(
op["new_options"]["memo_key"],
"BTS55VCzsb47NZwWe5F3qyQKedX9iHBHMVVFSc96PDvV7wuj7W86n",
)
def test_approvewitness(self):
tx = bitshares.approvewitness("1.6.1")
self.assertEqual(
getOperationNameForId(tx["operations"][0][0]), "account_update"
)
op = tx["operations"][0][1]
self.assertIn("1:0", op["new_options"]["votes"])
def test_approvecommittee(self):
tx = bitshares.approvecommittee("1.5.0")
self.assertEqual(
getOperationNameForId(tx["operations"][0][0]), "account_update"
)
op = tx["operations"][0][1]
self.assertIn("0:11", op["new_options"]["votes"])
| 36.973978 | 88 | 0.584356 | 9,467 | 0.95184 | 0 | 0 | 0 | 0 | 0 | 0 | 2,555 | 0.256887 |
7d0bbe8da578c249333be408a98ef7bf3a18a601 | 2,068 | py | Python | wms/config/wms.py | bhavesh95863/WMS | c45858a943a607e5d0b49f698e469f3362aae001 | [
"MIT"
] | null | null | null | wms/config/wms.py | bhavesh95863/WMS | c45858a943a607e5d0b49f698e469f3362aae001 | [
"MIT"
] | null | null | null | wms/config/wms.py | bhavesh95863/WMS | c45858a943a607e5d0b49f698e469f3362aae001 | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from frappe import _
def get_data():
return [
{
"label": _("WMS"),
"icon": "octicon octicon-briefcase",
"items": [
{
"type": "doctype",
"name": "WMS Lead",
"label": _("WMS Lead")
},
{
"type": "doctype",
"name": "Send SMS",
"label": _("Send SMS")
},
{
"type": "doctype",
"name": "Message Template",
"label": _("Message Template")
},
{
"type": "doctype",
"name": "Group",
"label": _("Group")
},
{
"type": "doctype",
"name": "WhatsApp Setting",
"label": _("WhatsApp Setting")
},
{
"type": "doctype",
"name": "WMS Task",
"label": _("Task")
},
{
"type": "doctype",
"name": "WMS Task Rule",
"label": _("Task Rule")
},
{
"type": "doctype",
"name": "Message Rule",
"label": _("Message Rule")
}
]
},
{
"label": _("Reports"),
"icon": "octicon octicon-briefcase",
"items": [
{
"type": "report",
"name": "Performance Report",
"doctype": "WMS Task",
"is_query_report": True
},
{
"type": "doctype",
"name": "Whatsapp Message Log",
"label": _("Whatsapp Message Log")
}
]
}
]
| 29.126761 | 55 | 0.287718 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 676 | 0.326886 |
7d0e753b01114266b3974061310dec6cde76f5b2 | 7,424 | py | Python | medicalseg/utils/utils.py | onecatcn/MedicalSeg | ba490c5c4541ac5bad0aefad6453ce0a48241ec7 | [
"Apache-2.0"
] | null | null | null | medicalseg/utils/utils.py | onecatcn/MedicalSeg | ba490c5c4541ac5bad0aefad6453ce0a48241ec7 | [
"Apache-2.0"
] | null | null | null | medicalseg/utils/utils.py | onecatcn/MedicalSeg | ba490c5c4541ac5bad0aefad6453ce0a48241ec7 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import contextlib
import filelock
import os
import tempfile
import numpy as np
import random
from urllib.parse import urlparse, unquote
import paddle
from medicalseg.utils import logger, seg_env
from medicalseg.utils.download import download_file_and_uncompress
@contextlib.contextmanager
def generate_tempdir(directory: str = None, **kwargs):
'''Generate a temporary directory'''
directory = seg_env.TMP_HOME if not directory else directory
with tempfile.TemporaryDirectory(dir=directory, **kwargs) as _dir:
yield _dir
def load_entire_model(model, pretrained):
if pretrained is not None:
load_pretrained_model(model, pretrained)
else:
logger.warning('Not all pretrained params of {} are loaded, ' \
'training from scratch or a pretrained backbone.'.format(model.__class__.__name__))
def download_pretrained_model(pretrained_model):
"""
Download pretrained model from url.
Args:
pretrained_model (str): the url of pretrained weight
Returns:
str: the path of pretrained weight
"""
assert urlparse(pretrained_model).netloc, "The url is not valid."
pretrained_model = unquote(pretrained_model)
savename = pretrained_model.split('/')[-1]
if not savename.endswith(('tgz', 'tar.gz', 'tar', 'zip')):
savename = pretrained_model.split('/')[-2]
else:
savename = savename.split('.')[0]
with generate_tempdir() as _dir:
with filelock.FileLock(os.path.join(seg_env.TMP_HOME, savename)):
pretrained_model = download_file_and_uncompress(
pretrained_model,
savepath=_dir,
extrapath=seg_env.PRETRAINED_MODEL_HOME,
extraname=savename)
pretrained_model = os.path.join(pretrained_model, 'model.pdparams')
return pretrained_model
def load_pretrained_model(model, pretrained_model):
if pretrained_model is not None:
logger.info(
'Loading pretrained model from {}'.format(pretrained_model))
if urlparse(pretrained_model).netloc:
pretrained_model = download_pretrained_model(pretrained_model)
if os.path.exists(pretrained_model):
para_state_dict = paddle.load(pretrained_model)
model_state_dict = model.state_dict()
keys = model_state_dict.keys()
num_params_loaded = 0
for k in keys:
if k not in para_state_dict:
logger.warning("{} is not in pretrained model".format(k))
elif list(para_state_dict[k].shape) != list(
model_state_dict[k].shape):
logger.warning(
"[SKIP] Shape of pretrained params {} doesn't match.(Pretrained: {}, Actual: {})"
.format(k, para_state_dict[k].shape,
model_state_dict[k].shape))
else:
model_state_dict[k] = para_state_dict[k]
num_params_loaded += 1
model.set_dict(model_state_dict)
logger.info("There are {}/{} variables loaded into {}.".format(
num_params_loaded, len(model_state_dict),
model.__class__.__name__))
else:
raise ValueError(
'The pretrained model directory is not Found: {}'.format(
pretrained_model))
else:
logger.info(
'No pretrained model to load, {} will be trained from scratch.'.
format(model.__class__.__name__))
def resume(model, optimizer, resume_model):
if resume_model is not None:
logger.info('Resume model from {}'.format(resume_model))
if os.path.exists(resume_model):
resume_model = os.path.normpath(resume_model)
ckpt_path = os.path.join(resume_model, 'model.pdparams')
para_state_dict = paddle.load(ckpt_path)
ckpt_path = os.path.join(resume_model, 'model.pdopt')
opti_state_dict = paddle.load(ckpt_path)
model.set_state_dict(para_state_dict)
optimizer.set_state_dict(opti_state_dict)
iter = resume_model.split('_')[-1]
iter = int(iter)
return iter
else:
raise ValueError(
'Directory of the model needed to resume is not Found: {}'.
format(resume_model))
else:
logger.info('No model needed to resume.')
def worker_init_fn(worker_id):
np.random.seed(random.randint(0, 100000))
def get_image_list(image_path, valid_suffix=None, filter_key=None):
"""Get image list from image name or image directory name with valid suffix.
if needed, filter_key can be used to whether 'include' the key word.
When filter_key is not None,it indicates whether filenames should include certain key.
Args:
image_path(str): the image or image folder where you want to get a image list from.
valid_suffix(tuple): Contain only the suffix you want to include.
filter_key(dict): the key and whether you want to include it. e.g.:{"segmentation": True} will futher filter the imagename with segmentation in it.
"""
if valid_suffix is None:
valid_suffix = [
'nii.gz', 'nii', 'dcm', 'nrrd', 'mhd', 'raw', 'npy', 'mha'
]
image_list = []
if os.path.isfile(image_path):
if image_path.split("/")[-1].split('.',
maxsplit=1)[-1] in valid_suffix:
if filter_key is not None:
f_name = image_path.split("/")[
-1] # TODO change to system invariant
for key, val in filter_key:
if (key in f_name) is not val:
break
else:
image_list.append(image_path)
else:
image_list.append(image_path)
else:
raise FileNotFoundError(
'{} is not a file end with supported suffix, the support suffixes are {}.'
.format(image_path, valid_suffix))
# load image in a directory
elif os.path.isdir(image_path):
for root, dirs, files in os.walk(image_path):
for f in files:
if '.ipynb_checkpoints' in root:
continue
if f.split(".", maxsplit=1)[-1] in valid_suffix:
image_list.append(os.path.join(root, f))
else:
raise FileNotFoundError(
'`--image_path` is not found. it should be a path of image, or a directory including images.'
)
if len(image_list) == 0:
raise RuntimeError(
'There are not image file in `--image_path`={}'.format(image_path))
return image_list
| 37.685279 | 151 | 0.620824 | 0 | 0 | 250 | 0.033665 | 277 | 0.037301 | 0 | 0 | 2,341 | 0.315244 |
7d10929e0f7464895fe68437ba3cbfe2769eb71c | 16,828 | py | Python | src/specific_models/bot-iot/attack_identification/lstm.py | kaylani2/sbseg2020 | 055e403cdf5a3484d4d66e5dbe20a498af6669e0 | [
"MIT"
] | 7 | 2019-11-06T14:35:37.000Z | 2022-03-06T03:55:06.000Z | src/specific_models/bot-iot/attack_identification/lstm.py | kaylani2/sbseg2020 | 055e403cdf5a3484d4d66e5dbe20a498af6669e0 | [
"MIT"
] | 10 | 2020-05-16T02:38:35.000Z | 2021-04-11T23:55:35.000Z | src/specific_models/bot-iot/attack_identification/lstm.py | kaylani2/sbseg2020 | 055e403cdf5a3484d4d66e5dbe20a498af6669e0 | [
"MIT"
] | 2 | 2020-06-26T21:39:41.000Z | 2020-09-15T03:38:32.000Z | # Author: Kaylani Bochie
# github.com/kaylani2
# kaylani AT gta DOT ufrj DOT br
### K: Model: LSTM
import sys
import time
import pandas as pd
import os
import math
import numpy as np
from numpy import mean, std
from unit import remove_columns_with_one_value, remove_nan_columns, load_dataset
from unit import display_general_information, display_feature_distribution
from collections import Counter
#from imblearn.over_sampling import RandomOverSampler, RandomUnderSampler
import sklearn
from sklearn import set_config
from sklearn.impute import SimpleImputer
from sklearn.svm import SVC, LinearSVC
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LinearRegression
from sklearn.naive_bayes import GaussianNB
from sklearn.preprocessing import LabelEncoder, OneHotEncoder, OrdinalEncoder
from sklearn.preprocessing import StandardScaler, RobustScaler, MinMaxScaler
from sklearn.metrics import confusion_matrix, precision_score, recall_score
from sklearn.metrics import f1_score, classification_report, accuracy_score
from sklearn.metrics import cohen_kappa_score, mean_squared_error
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split, PredefinedSplit, RandomizedSearchCV
from sklearn.model_selection import GridSearchCV, RepeatedStratifiedKFold
from sklearn.model_selection import cross_val_score
from sklearn.decomposition import PCA
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif, chi2, mutual_info_classif
from sklearn.utils import class_weight
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier, KerasRegressor
import keras.utils
from keras import metrics
from keras.utils import to_categorical
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import Conv2D, MaxPooling2D, Flatten, LSTM
from keras.optimizers import RMSprop, Adam
from keras.constraints import maxnorm
###############################################################################
## Define constants
###############################################################################
pd.set_option ('display.max_rows', None)
pd.set_option ('display.max_columns', 5)
BOT_IOT_DIRECTORY = '../../../../datasets/bot-iot/'
BOT_IOT_FEATURE_NAMES = 'UNSW_2018_IoT_Botnet_Dataset_Feature_Names.csv'
BOT_IOT_FILE_5_PERCENT_SCHEMA = 'UNSW_2018_IoT_Botnet_Full5pc_{}.csv' # 1 - 4
FIVE_PERCENT_FILES = 4
BOT_IOT_FILE_FULL_SCHEMA = 'UNSW_2018_IoT_Botnet_Dataset_{}.csv' # 1 - 74
FULL_FILES = 74
FILE_NAME = BOT_IOT_DIRECTORY + BOT_IOT_FILE_5_PERCENT_SCHEMA
FEATURES = BOT_IOT_DIRECTORY + BOT_IOT_FEATURE_NAMES
NAN_VALUES = ['?', '.']
TARGET = 'attack'
INDEX_COLUMN = 'pkSeqID'
LABELS = ['attack', 'category', 'subcategory']
STATE = 0
try:
STATE = int (sys.argv [1])
except:
pass
#for STATE in [1, 2, 3, 4, 5]:
np.random.seed (STATE)
print ('STATE:', STATE)
###############################################################################
## Load dataset
###############################################################################
df = load_dataset (FILE_NAME, FIVE_PERCENT_FILES, INDEX_COLUMN, NAN_VALUES)
###############################################################################
## Clean dataset
###############################################################################
###############################################################################
### Remove columns with only one value
df, log = remove_columns_with_one_value (df, verbose = False)
print (log)
###############################################################################
### Remove redundant columns, useless columns and unused targets
### K: _number columns are numerical representations of other existing columns.
### K: category and subcategory are other labels.
### K: saddr and daddr may specialize the model to a single network
redundant_columns = ['state_number', 'proto_number', 'flgs_number']
other_targets = ['category', 'subcategory']
misc_columns = ['saddr', 'daddr']
print ('Removing redundant columns:', redundant_columns)
print ('Removing useless targets:', other_targets)
print ('Removing misc columns:', misc_columns)
columns_to_remove = redundant_columns + other_targets + misc_columns
df.drop (axis = 'columns', columns = columns_to_remove, inplace = True)
###############################################################################
### Remove NaN columns (with a lot of NaN values)
df, log = remove_nan_columns (df, 1/2, verbose = False)
print (log)
###############################################################################
### Encode categorical features
print ('Encoding categorical features (ordinal encoding).')
my_encoder = OrdinalEncoder ()
df ['flgs'] = my_encoder.fit_transform (df ['flgs'].values.reshape (-1, 1))
df ['proto'] = my_encoder.fit_transform (df ['proto'].values.reshape (-1, 1))
df ['sport'] = my_encoder.fit_transform (df ['sport'].astype (str).values.reshape (-1, 1))
df ['dport'] = my_encoder.fit_transform (df ['dport'].astype (str).values.reshape (-1, 1))
df ['state'] = my_encoder.fit_transform (df ['state'].values.reshape (-1, 1))
print ('Objects:', list (df.select_dtypes ( ['object']).columns))
###############################################################################
## Quick sanity check
###############################################################################
display_general_information (df)
###############################################################################
## Split dataset into train and test sets
###############################################################################
### K: Dataset is too big? Drop.
# drop_indices = np.random.choice (df.index, int (df.shape [0] * 0.5),
# replace = False)
# df = df.drop (drop_indices)
TEST_SIZE = 3/10
VALIDATION_SIZE = 1/4
print ('Splitting dataset (test/train):', TEST_SIZE)
X_train_df, X_test_df, y_train_df, y_test_df = train_test_split (
df.loc [:, df.columns != TARGET],
df [TARGET],
test_size = TEST_SIZE,
random_state = STATE,)
print ('Splitting dataset (validation/train):', VALIDATION_SIZE)
X_train_df, X_val_df, y_train_df, y_val_df = train_test_split (
X_train_df,
y_train_df,
test_size = VALIDATION_SIZE,
random_state = STATE,)
X_train_df.sort_index (inplace = True)
y_train_df.sort_index (inplace = True)
X_val_df.sort_index (inplace = True)
y_val_df.sort_index (inplace = True)
X_test_df.sort_index (inplace = True)
y_test_df.sort_index (inplace = True)
print ('X_train_df shape:', X_train_df.shape)
print ('y_train_df shape:', y_train_df.shape)
print ('X_val_df shape:', X_val_df.shape)
print ('y_val_df shape:', y_val_df.shape)
print ('X_test_df shape:', X_test_df.shape)
print ('y_test_df shape:', y_test_df.shape)
###############################################################################
## Convert dataframe to a numpy array
###############################################################################
print ('\nConverting dataframe to numpy array.')
X_train = X_train_df.values
y_train = y_train_df.values
X_val = X_val_df.values
y_val = y_val_df.values
X_test = X_test_df.values
y_test = y_test_df.values
print ('X_train shape:', X_train.shape)
print ('y_train shape:', y_train.shape)
print ('X_val shape:', X_val.shape)
print ('y_val shape:', y_val.shape)
print ('X_test shape:', X_test.shape)
print ('y_test shape:', y_test.shape)
###############################################################################
## Apply normalization
###############################################################################
### K: NOTE: Only use derived information from the train set to avoid leakage.
print ('\nApplying normalization.')
startTime = time.time ()
scaler = StandardScaler ()
#scaler = MinMaxScaler (feature_range = (0, 1))
scaler.fit (X_train)
X_train = scaler.transform (X_train)
X_val = scaler.transform (X_val)
X_test = scaler.transform (X_test)
print (str (time.time () - startTime), 'to normalize data.')
###############################################################################
## Perform feature selection
###############################################################################
NUMBER_OF_FEATURES = 9 #'all'
print ('\nSelecting top', NUMBER_OF_FEATURES, 'features.')
startTime = time.time ()
#fs = SelectKBest (score_func = mutual_info_classif, k = NUMBER_OF_FEATURES)
### K: ~30 minutes to FAIL fit mutual_info_classif to 5% bot-iot
#fs = SelectKBest (score_func = chi2, k = NUMBER_OF_FEATURES) # X must be >= 0
### K: ~4 seconds to fit chi2 to 5% bot-iot (MinMaxScaler (0, 1))
fs = SelectKBest (score_func = f_classif, k = NUMBER_OF_FEATURES)
### K: ~4 seconds to fit f_classif to 5% bot-iot
fs.fit (X_train, y_train)
X_train = fs.transform (X_train)
X_val = fs.transform (X_val)
X_test = fs.transform (X_test)
print (str (time.time () - startTime), 'to select features.')
print ('X_train shape:', X_train.shape)
print ('y_train shape:', y_train.shape)
print ('X_val shape:', X_val.shape)
print ('y_val shape:', y_val.shape)
print ('X_test shape:', X_test.shape)
print ('y_test shape:', y_test.shape)
bestFeatures = []
for feature in range (len (fs.scores_)):
bestFeatures.append ({'f': feature, 's': fs.scores_ [feature]})
bestFeatures = sorted (bestFeatures, key = lambda k: k ['s'])
for feature in bestFeatures:
print ('Feature %d: %f' % (feature ['f'], feature ['s']))
###############################################################################
## Rearrange samples for RNN
###############################################################################
print ('\nRearranging dataset for the RNN.')
print ('X_train shape:', X_train.shape)
print ('y_train shape:', y_train.shape)
print ('X_val shape:', X_val.shape)
print ('y_val shape:', y_val.shape)
print ('y_test shape:', y_test.shape)
STEPS = 3
FEATURES = X_train.shape [1]
def window_stack (a, stride = 1, numberOfSteps = 3):
return np.hstack ( [ a [i:1+i-numberOfSteps or None:stride] for i in range (0,numberOfSteps) ])
X_train = window_stack (X_train, stride = 1, numberOfSteps = STEPS)
X_train = X_train.reshape (X_train.shape [0], STEPS, FEATURES)
X_val = window_stack (X_val, stride = 1, numberOfSteps = STEPS)
X_val = X_val.reshape (X_val.shape [0], STEPS, FEATURES)
X_test = window_stack (X_test, stride = 1, numberOfSteps = STEPS)
X_test = X_test.reshape (X_test.shape [0], STEPS, FEATURES)
y_train = y_train [ (STEPS - 1):]
y_val = y_val [ (STEPS - 1):]
y_test = y_test [ (STEPS - 1):]
print ('X_train shape:', X_train.shape)
print ('y_train shape:', y_train.shape)
print ('X_val shape:', X_val.shape)
print ('y_val shape:', y_val.shape)
print ('X_test shape:', X_test.shape)
print ('y_test shape:', y_test.shape)
###############################################################################
## Create learning model and tune hyperparameters
###############################################################################
### -1 indices -> train
### 0 indices -> validation
test_fold = np.repeat ( [-1, 0], [X_train.shape [0], X_val.shape [0]])
myPreSplit = PredefinedSplit (test_fold)
'''
def create_model (learn_rate = 0.01, dropout_rate = 0.0, weight_constraint = 0, units = 50):
model = Sequential ()
model.add (LSTM (units = units, activation = 'relu' , input_shape= (X_train.shape [1], X_train.shape [2])))
model.add (Dense (1, activation = 'sigmoid'))
model.compile (optimizer = 'adam', loss = 'binary_crossentropy',)
return model
model = KerasClassifier (build_fn = create_model, verbose = 2)
batch_size = [5000, 1000]#10, 30, 50]
epochs = [5]#, 5, 10]
learn_rate = [0.001, 0.01, 0.1]
dropout_rate = [0.0]#, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9]
weight_constraint = [0]#, 2, 3, 4, 5]
units = [10, 50, 100]
param_grid = dict (batch_size = batch_size, epochs = epochs,
dropout_rate = dropout_rate, learn_rate = learn_rate,
weight_constraint = weight_constraint, units = units)
grid = GridSearchCV (estimator = model, param_grid = param_grid,
scoring = 'f1_weighted', cv = myPreSplit, verbose = 2,
n_jobs = -1)
grid_result = grid.fit (np.concatenate ( (X_train, X_val), axis = 0),
np.concatenate ( (y_train, y_val), axis = 0))
print (grid_result.best_params_)
print ("Best: %f using %s" % (grid_result.best_score_, grid_result.best_params_))
means = grid_result.cv_results_ ['mean_test_score']
stds = grid_result.cv_results_ ['std_test_score']
params = grid_result.cv_results_ ['params']
for mean, stdev, param in zip (means, stds, params):
print ("%f (%f) with: %r" % (mean, stdev, param))
sys.exit ()
'''
###############################################################################
## Finished model
METRICS = [keras.metrics.TruePositives (name = 'TP'),
keras.metrics.FalsePositives (name = 'FP'),
keras.metrics.TrueNegatives (name = 'TN'),
keras.metrics.FalseNegatives (name = 'FN'),
keras.metrics.BinaryAccuracy (name = 'Acc.'),
keras.metrics.Precision (name = 'Prec.'),
keras.metrics.Recall (name = 'Recall'),
keras.metrics.AUC (name = 'AUC'),]
BATCH_SIZE = 5000
NUMBER_OF_EPOCHS = 3
LEARNING_RATE = 0.1
DROPOUT_RATE = 0.2
clf = Sequential ()
clf.add (LSTM (100, activation = 'relu', #return_sequences = True,
input_shape = (X_train.shape [1], X_train.shape [2])))
clf.add (Dropout (DROPOUT_RATE))
#clf.add (LSTM (50, activation='relu'))
clf.add (Dense (1, activation = 'sigmoid'))
print ('Model summary:')
clf.summary ()
###############################################################################
## Compile the network
###############################################################################
print ('\nCompiling the network.')
clf.compile (optimizer = 'adam',
loss = 'binary_crossentropy',
metrics = METRICS)
###############################################################################
## Fit the network
###############################################################################
print ('\nFitting the network.')
startTime = time.time ()
history = clf.fit (X_train, y_train,
batch_size = BATCH_SIZE,
epochs = NUMBER_OF_EPOCHS,
verbose = 2, #1 = progress bar, not useful for logging
workers = 0,
use_multiprocessing = True,
#class_weight = 'auto',
validation_data = (X_val, y_val))
print (str (time.time () - startTime), 's to train model.')
###############################################################################
## Analyze results
###############################################################################
print ('\nPerformance on TRAIN set:')
y_pred = clf.predict (X_train)
y_pred = y_pred.round ()
my_confusion_matrix = confusion_matrix (y_train, y_pred,
labels = df [TARGET].unique ())
tn, fp, fn, tp = my_confusion_matrix.ravel ()
print ('Confusion matrix:')
print (my_confusion_matrix)
print ('Accuracy:', accuracy_score (y_train, y_pred))
print ('Precision:', precision_score (y_train, y_pred, average = 'macro'))
print ('Recall:', recall_score (y_train, y_pred, average = 'macro'))
print ('F1:', f1_score (y_train, y_pred, average = 'macro'))
print ('Cohen Kappa:', cohen_kappa_score (y_train, y_pred,
labels = df [TARGET].unique ()))
print ('TP:', tp)
print ('TN:', tn)
print ('FP:', fp)
print ('FN:', fn)
### K: Only before publishing... Don't peek.
sys.exit ()
print ('\nPerformance on TEST set:')
y_pred = clf.predict (X_test)
y_pred = y_pred.round ()
my_confusion_matrix = confusion_matrix (y_test, y_pred,
labels = df [TARGET].unique ())
tn, fp, fn, tp = my_confusion_matrix.ravel ()
print ('Confusion matrix:')
print (my_confusion_matrix)
print ('Accuracy:', accuracy_score (y_test, y_pred))
print ('Precision:', precision_score (y_test, y_pred, average = 'macro'))
print ('Recall:', recall_score (y_test, y_pred, average = 'macro'))
print ('F1:', f1_score (y_test, y_pred, average = 'macro'))
print ('Cohen Kappa:', cohen_kappa_score (y_test, y_pred,
labels = df [TARGET].unique ()))
print ('TP:', tp)
print ('TN:', tn)
print ('FP:', fp)
print ('FN:', fn)
| 42.175439 | 109 | 0.590504 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 7,542 | 0.448182 |
7d124f218a5ee0a4b8f0187b77fddb8e78f5822d | 866 | py | Python | ledis/datastructures.py | gianghta/Ledis | a6b31617621746344408ee411cf510ef3cfb2e7b | [
"MIT"
] | null | null | null | ledis/datastructures.py | gianghta/Ledis | a6b31617621746344408ee411cf510ef3cfb2e7b | [
"MIT"
] | null | null | null | ledis/datastructures.py | gianghta/Ledis | a6b31617621746344408ee411cf510ef3cfb2e7b | [
"MIT"
] | null | null | null | from enum import unique, Enum
from typing import Union
@unique
class DataType(Enum):
STR = "str"
SET = "set"
class BaseDataStructure:
__slots__ = {"data", "type", "expire_at"}
def __init__(self, data: Union[str, set]):
self.data = data
# This will raise an error if type is not supported
self.type = DataType(type(data).__name__)
# UTC expire timestamp, in seconds
self.expire_at = None
def __eq__(self, other):
if not isinstance(other, self.__class__):
# don't attempt to compare against unrelated types
return NotImplemented
return (
self.data == other.data
and self.expire_at == other.expire_at
and self.type == other.type
)
class String(BaseDataStructure):
pass
class Set(BaseDataStructure):
pass
| 21.121951 | 62 | 0.614319 | 791 | 0.913395 | 0 | 0 | 61 | 0.070439 | 0 | 0 | 168 | 0.193995 |
7d12d17fb491fc8c8fc02644cb8aec6502ee03f9 | 6,350 | py | Python | pychpp/ht_matches_archive.py | PiGo86/pychpp | 052c2ea96b170118fc51b9a72f00995cb7465290 | [
"Apache-2.0"
] | 1 | 2021-11-01T11:58:47.000Z | 2021-11-01T11:58:47.000Z | pychpp/ht_matches_archive.py | PiGo86/pychpp | 052c2ea96b170118fc51b9a72f00995cb7465290 | [
"Apache-2.0"
] | null | null | null | pychpp/ht_matches_archive.py | PiGo86/pychpp | 052c2ea96b170118fc51b9a72f00995cb7465290 | [
"Apache-2.0"
] | 1 | 2020-08-27T13:56:16.000Z | 2020-08-27T13:56:16.000Z | import datetime
from pychpp import ht_model
from pychpp import ht_xml
from pychpp import ht_team, ht_match, ht_datetime
class HTMatchesArchive(ht_model.HTModel):
"""
Hattrick matches archive
"""
_SOURCE_FILE = "matchesarchive"
_SOURCE_FILE_VERSION = "1.4"
# URL PATH with several params available should be urlencoded
_URL_PATH = "%2FClub%2FMatches%2FArchive.aspx%3F"
_ht_attributes = [("team_id", "Team/TeamID", ht_xml.HTXml.ht_int),
("team_name", "Team/TeamName", ht_xml.HTXml.ht_str),
("first_match_date", "Team/FirstMatchDate",
ht_xml.HTXml.ht_datetime_from_text),
("last_match_date", "Team/LastMatchDate",
ht_xml.HTXml.ht_datetime_from_text),
]
def __init__(self, ht_id=None, youth=False, first_match_date=None,
last_match_date=None, season=None, hto=False, **kwargs):
"""
Initialization of a HTMatchesArchive instance
:param ht_id: Hattrick ID of team
:param youth: define if requested team is youth or not
:param first_match_date: begin date to search matches
:param last_match_date: end date to search matches
:param season: season to search matches
:param hto: including or not tounaments matches
:type ht_id: int
:type youth: bool
:type first_match_date: datetime.datetime
:type last_match_date: datetime.datetime
:type season: int
:type hto: bool
:return: a ht_matches_archive.HTMatchesArchive object
:rtype: ht_matches_archive.HTMatchesArchive
:param source: hattrick source to request
('hattrick', 'youth' or 'hto')
:type ht_id: int
:type events: bool
:type source: str
:key chpp: CHPP instance of connected user, must be a chpp.CHPP object
"""
# Check parameters integrity
if not isinstance(ht_id, int) and ht_id is not None:
raise ValueError("ht_id must be None or an integer")
elif not isinstance(youth, bool):
raise ValueError("youth must be a boolean")
elif (not (isinstance(first_match_date, datetime.datetime)
or isinstance(first_match_date, ht_datetime.HTDatetime))
and first_match_date is not None):
raise ValueError("first_match_date must be a datetime "
"or HTDatetime instance")
elif (not (isinstance(last_match_date, datetime.datetime)
or isinstance(last_match_date, ht_datetime.HTDatetime))
and last_match_date is not None):
raise ValueError("last_match_date must be a datetime "
"or HTDatetime instance")
elif not isinstance(season, int) and season is not None:
raise ValueError("season must be a integer")
elif not isinstance(hto, bool):
raise ValueError("hto must be a boolean")
# Define request arguments
self._REQUEST_ARGS = dict()
self._REQUEST_ARGS["teamID"] = str(ht_id) if ht_id is not None else ""
self._REQUEST_ARGS["isYouth"] = "true" if youth is True else "false"
self._REQUEST_ARGS["FirstMatchDate"] = (
ht_xml.HTXml.ht_datetime_to_text(first_match_date)
if first_match_date is not None else "")
self._REQUEST_ARGS["LastMatchDate"] = (
ht_xml.HTXml.ht_datetime_to_text(last_match_date)
if last_match_date is not None else "")
self._REQUEST_ARGS["season"] = (
str(season) if season is not None else "")
self._REQUEST_ARGS["HTO"] = "true" if hto is True else "false"
super().__init__(**kwargs)
self.matches_list = [
HTMatchesArchiveItem(chpp=self._chpp, data=data)
for data in self._data.findall("Team/MatchList/Match")]
def __getitem__(self, item):
return self.matches_list[item]
def __len__(self):
return len(self.matches_list)
def __repr__(self):
return self.matches_list.__repr__()
@property
def url(self):
url_args = []
if self.team_id:
url_args.append(f'TeamID%3D{self.team_id}')
if self._REQUEST_ARGS["season"]:
url_args.append(f'season%3D{self._REQUEST_ARGS["season"]}')
return f'{self._BASE_URL}{self._URL_PATH}{"%26".join(url_args)}'
class HTMatchesArchiveItem(ht_model.HTModel):
"""
Object returned by HTMatchesArchve.search method
"""
_URL_PATH = "/Club/Matches/Match.aspx?matchID="
_ht_attributes = [("ht_id", "MatchID", ht_xml.HTXml.ht_int,),
("home_team_id", "HomeTeam/HomeTeamID",
ht_xml.HTXml.ht_int,),
("home_team_name", "HomeTeam/HomeTeamName",
ht_xml.HTXml.ht_str,),
("away_team_id", "AwayTeam/AwayTeamID",
ht_xml.HTXml.ht_int,),
("away_team_name", "AwayTeam/AwayTeamName",
ht_xml.HTXml.ht_str,),
("datetime", "MatchDate",
ht_xml.HTXml.ht_datetime_from_text,),
("type", "MatchType",
ht_xml.HTXml.ht_int,),
("context_id", "MatchContextId", ht_xml.HTXml.ht_int,),
("rule_id", "MatchRuleId", ht_xml.HTXml.ht_int,),
("cup_level", "CupLevel", ht_xml.HTXml.ht_int,),
("cup_level_index", "CupLevelIndex",
ht_xml.HTXml.ht_int,),
("home_goals", "HomeGoals", ht_xml.HTXml.ht_int,),
("away_goals", "AwayGoals", ht_xml.HTXml.ht_int,),
]
def __repr__(self):
return f"<{self.__class__.__name__} object : " \
f"{self.home_team_name} - {self.away_team_name} ({self.ht_id})>"
@property
def details(self):
return ht_match.HTMatch(chpp=self._chpp, ht_id=self.ht_id)
@property
def home_team(self):
return ht_team.HTTeam(chpp=self._chpp, ht_id=self.home_team_id)
@property
def away_team(self):
return ht_team.HTTeam(chpp=self._chpp, ht_id=self.away_team_id)
| 40.189873 | 79 | 0.59685 | 6,223 | 0.98 | 0 | 0 | 629 | 0.099055 | 0 | 0 | 2,331 | 0.367087 |
7d13b825db8617c1324456436c859711871cf5e3 | 46,339 | py | Python | langcodes/__init__.py | garyd203/langcodes | 0cedf9ca257ebf7250de5d3a63ec33a7d198db58 | [
"MIT"
] | null | null | null | langcodes/__init__.py | garyd203/langcodes | 0cedf9ca257ebf7250de5d3a63ec33a7d198db58 | [
"MIT"
] | null | null | null | langcodes/__init__.py | garyd203/langcodes | 0cedf9ca257ebf7250de5d3a63ec33a7d198db58 | [
"MIT"
] | null | null | null | """
langcodes knows what languages are. It knows the standardized codes that
refer to them, such as `en` for English, `es` for Spanish and `hi` for Hindi.
Often, it knows what these languages are called *in* a language, and that
language doesn't have to be English.
See README.md for the main documentation, or read it on GitHub at
https://github.com/LuminosoInsight/langcodes/ . For more specific documentation
on the functions in langcodes, scroll down and read the docstrings.
"""
import warnings
from langcodes.tag_parser import parse_tag
from langcodes.names import code_to_names, name_to_code
from langcodes.distance import raw_distance
from langcodes.data_dicts import (
DEFAULT_SCRIPTS, LANGUAGE_REPLACEMENTS, SCRIPT_REPLACEMENTS,
REGION_REPLACEMENTS, NORMALIZED_MACROLANGUAGES, LIKELY_SUBTAGS
)
# When we're getting natural language information *about* languages, it's in
# U.S. English if you don't specify the language.
DEFAULT_LANGUAGE = 'en-US'
class Language:
"""
The Language class defines the results of parsing a language tag.
Language objects have the following attributes, any of which may be
unspecified (in which case their value is None):
- *language*: the code for the language itself.
- *script*: the 4-letter code for the writing system being used.
- *region*: the 2-letter or 3-digit code for the country or similar region
whose usage of the language appears in this text.
- *extlangs*: a list of more specific language codes that follow the language
code. (This is allowed by the language code syntax, but deprecated.)
- *variants*: codes for specific variations of language usage that aren't
covered by the *script* or *region* codes.
- *extensions*: information that's attached to the language code for use in
some specific system, such as Unicode collation orders.
- *private*: a code starting with `x-` that has no defined meaning.
The `Language.get` method converts a string to a Language instance.
It's also available at the top level of this module as the `get` function.
"""
ATTRIBUTES = ['language', 'extlangs', 'script', 'region',
'variants', 'extensions', 'private']
# When looking up "likely subtags" data, we try looking up the data for
# increasingly less specific versions of the language code.
BROADER_KEYSETS = [
{'language', 'script', 'region'},
{'language', 'region'},
{'language', 'script'},
{'language'},
{'script'},
{}
]
MATCHABLE_KEYSETS = [
{'language', 'script', 'region'},
{'language', 'script'},
{'language'},
]
# Values cached at the class level
_INSTANCES = {}
_PARSE_CACHE = {}
def __init__(self, language=None, extlangs=None, script=None,
region=None, variants=None, extensions=None, private=None):
"""
The constructor for Language objects.
It's inefficient to call this directly, because it can't return
an existing instance. Instead, call Language.make(), which
has the same signature.
"""
self.language = language
self.extlangs = extlangs
self.script = script
self.region = region
self.variants = variants
self.extensions = extensions
self.private = private
# Cached values
self._simplified = None
self._searchable = None
self._matchable_tags = None
self._broader = None
self._assumed = None
self._filled = None
self._macrolanguage = None
self._str_tag = None
self._dict = None
# Make sure the str_tag value is cached
self.to_tag()
@classmethod
def make(cls, language=None, extlangs=None, script=None,
region=None, variants=None, extensions=None, private=None):
"""
Create a Language object by giving any subset of its attributes.
If this value has been created before, return the existing value.
"""
values = (language, tuple(extlangs or ()), script, region,
tuple(variants or ()), tuple(extensions or ()), private)
if values in cls._INSTANCES:
return cls._INSTANCES[values]
instance = cls(
language=language, extlangs=extlangs,
script=script, region=region, variants=variants,
extensions=extensions, private=private
)
cls._INSTANCES[values] = instance
return instance
@staticmethod
def get(tag: {str, 'Language'}, normalize=True) -> 'Language':
"""
Create a Language object from a language tag string.
If normalize=True, non-standard or overlong tags will be replaced as
they're interpreted. This is recommended.
Here are several examples of language codes, which are also test cases.
Most language codes are straightforward, but these examples will get
pretty obscure toward the end.
>>> Language.get('en-US')
Language.make(language='en', region='US')
>>> Language.get('zh-Hant')
Language.make(language='zh', script='Hant')
>>> Language.get('und')
Language.make()
This function is idempotent, in case you already have a Language object:
>>> Language.get(Language.get('en-us'))
Language.make(language='en', region='US')
The non-code 'root' is sometimes used to represent the lack of any
language information, similar to 'und'.
>>> Language.get('root')
Language.make()
By default, getting a Language object will automatically convert
deprecated tags:
>>> Language.get('iw')
Language.make(language='he')
>>> Language.get('in')
Language.make(language='id')
One type of deprecated tag that should be replaced is for sign
languages, which used to all be coded as regional variants of a
fictitious global sign language called 'sgn'. Of course, there is no
global sign language, so sign languages now have their own language
codes.
>>> Language.get('sgn-US')
Language.make(language='ase')
>>> Language.get('sgn-US', normalize=False)
Language.make(language='sgn', region='US')
'en-gb-oed' is a tag that's grandfathered into the standard because it
has been used to mean "spell-check this with Oxford English Dictionary
spelling", but that tag has the wrong shape. We interpret this as the
new standardized tag 'en-gb-oxendict', unless asked not to normalize.
>>> Language.get('en-gb-oed')
Language.make(language='en', region='GB', variants=['oxendict'])
>>> Language.get('en-gb-oed', normalize=False)
Language.make(language='en-gb-oed')
'zh-min-nan' is another oddly-formed tag, used to represent the
Southern Min language, which includes Taiwanese as a regional form. It
now has its own language code.
>>> Language.get('zh-min-nan')
Language.make(language='nan')
There's not much we can do with the vague tag 'zh-min':
>>> Language.get('zh-min')
Language.make(language='zh-min')
Occasionally Wiktionary will use 'extlang' tags in strange ways, such
as using the tag 'und-ibe' for some unspecified Iberian language.
>>> Language.get('und-ibe')
Language.make(extlangs=['ibe'])
Here's an example of replacing multiple deprecated tags.
The language tag 'sh' (Serbo-Croatian) ended up being politically
problematic, and different standards took different steps to address
this. The IANA made it into a macrolanguage that contains 'sr', 'hr',
and 'bs'. Unicode further decided that it's a legacy tag that should
be interpreted as 'sr-Latn', which the language matching rules say
is mutually intelligible with all those languages.
We complicate the example by adding on the region tag 'QU', an old
provisional tag for the European Union, which is now standardized as
'EU'.
>>> Language.get('sh-QU')
Language.make(language='sr', script='Latn', region='EU')
"""
if isinstance(tag, Language):
if not normalize:
# shortcut: we have the tag already
return tag
# We might need to normalize this tag. Convert it back into a
# string tag, to cover all the edge cases of normalization in a
# way that we've already solved.
tag = tag.to_tag()
if (tag, normalize) in Language._PARSE_CACHE:
return Language._PARSE_CACHE[tag, normalize]
data = {}
# if the complete tag appears as something to normalize, do the
# normalization right away. Smash case when checking, because the
# case normalization that comes from parse_tag() hasn't been applied
# yet.
tag_lower = tag.lower()
if normalize and tag_lower in LANGUAGE_REPLACEMENTS:
tag = LANGUAGE_REPLACEMENTS[tag_lower]
components = parse_tag(tag)
for typ, value in components:
if typ == 'extlang' and normalize and 'language' in data:
# smash extlangs when possible
minitag = '%s-%s' % (data['language'], value)
norm = LANGUAGE_REPLACEMENTS.get(minitag.lower())
if norm is not None:
data.update(
Language.get(norm, normalize).to_dict()
)
else:
data.setdefault('extlangs', []).append(value)
elif typ in {'extlang', 'variant', 'extension'}:
data.setdefault(typ + 's', []).append(value)
elif typ == 'language':
if value == 'und':
pass
elif normalize:
replacement = LANGUAGE_REPLACEMENTS.get(value.lower())
if replacement is not None:
# parse the replacement if necessary -- this helps with
# Serbian and Moldovan
data.update(
Language.get(replacement, normalize).to_dict()
)
else:
data['language'] = value
else:
data['language'] = value
elif typ == 'region':
if normalize:
data['region'] = REGION_REPLACEMENTS.get(value.lower(), value)
else:
data['region'] = value
elif typ == 'grandfathered':
# If we got here, we got a grandfathered tag but we were asked
# not to normalize it, or the CLDR data doesn't know how to
# normalize it. The best we can do is set the entire tag as the
# language.
data['language'] = value
else:
data[typ] = value
result = Language.make(**data)
Language._PARSE_CACHE[tag, normalize] = result
return result
def to_tag(self) -> str:
"""
Convert a Language back to a standard language tag, as a string.
This is also the str() representation of a Language object.
>>> Language.make(language='en', region='GB').to_tag()
'en-GB'
>>> Language.make(language='yue', script='Hant', region='HK').to_tag()
'yue-Hant-HK'
>>> Language.make(script='Arab').to_tag()
'und-Arab'
>>> str(Language.make(region='IN'))
'und-IN'
"""
if self._str_tag is not None:
return self._str_tag
subtags = ['und']
if self.language:
subtags[0] = self.language
if self.extlangs:
for extlang in sorted(self.extlangs):
subtags.append(extlang)
if self.script:
subtags.append(self.script)
if self.region:
subtags.append(self.region)
if self.variants:
for variant in sorted(self.variants):
subtags.append(variant)
if self.extensions:
for ext in self.extensions:
subtags.append(ext)
if self.private:
subtags.append(self.private)
self._str_tag = '-'.join(subtags)
return self._str_tag
def simplify_script(self) -> 'Language':
"""
Remove the script from some parsed language data, if the script is
redundant with the language.
>>> Language.make(language='en', script='Latn').simplify_script()
Language.make(language='en')
>>> Language.make(language='yi', script='Latn').simplify_script()
Language.make(language='yi', script='Latn')
>>> Language.make(language='yi', script='Hebr').simplify_script()
Language.make(language='yi')
"""
if self._simplified is not None:
return self._simplified
if self.language and self.script:
if DEFAULT_SCRIPTS.get(self.language) == self.script:
result = self.update_dict({'script': None})
self._simplified = result
return self._simplified
self._simplified = self
return self._simplified
def assume_script(self) -> 'Language':
"""
Fill in the script if it's missing, and if it can be assumed from the
language subtag. This is the opposite of `simplify_script`.
>>> Language.make(language='en').assume_script()
Language.make(language='en', script='Latn')
>>> Language.make(language='yi').assume_script()
Language.make(language='yi', script='Hebr')
>>> Language.make(language='yi', script='Latn').assume_script()
Language.make(language='yi', script='Latn')
This fills in nothing when the script cannot be assumed -- such as when
the language has multiple scripts, or it has no standard orthography:
>>> Language.make(language='sr').assume_script()
Language.make(language='sr')
>>> Language.make(language='eee').assume_script()
Language.make(language='eee')
It also dosn't fill anything in when the language is unspecified.
>>> Language.make(region='US').assume_script()
Language.make(region='US')
"""
if self._assumed is not None:
return self._assumed
if self.language and not self.script:
try:
self._assumed = self.update_dict({'script': DEFAULT_SCRIPTS[self.language]})
except KeyError:
self._assumed = self
else:
self._assumed = self
return self._assumed
def prefer_macrolanguage(self) -> 'Language':
"""
BCP 47 doesn't specify what to do with macrolanguages and the languages
they contain. The Unicode CLDR, on the other hand, says that when a
macrolanguage has a dominant standardized language, the macrolanguage
code should be used for that language. For example, Mandarin Chinese
is 'zh', not 'cmn', according to Unicode, and Malay is 'ms', not 'zsm'.
This isn't a rule you'd want to follow in all cases -- for example, you may
want to be able to specifically say that 'ms' (the Malay macrolanguage)
contains both 'zsm' (Standard Malay) and 'id' (Indonesian). But applying
this rule helps when interoperating with the Unicode CLDR.
So, applying `prefer_macrolanguage` to a Language object will
return a new object, replacing the language with the macrolanguage if
it is the dominant language within that macrolanguage. It will leave
non-dominant languages that have macrolanguages alone.
>>> Language.get('arb').prefer_macrolanguage()
Language.make(language='ar')
>>> Language.get('cmn-Hant').prefer_macrolanguage()
Language.make(language='zh', script='Hant')
>>> Language.get('yue-Hant').prefer_macrolanguage()
Language.make(language='yue', script='Hant')
"""
if self._macrolanguage is not None:
return self._macrolanguage
language = self.language or 'und'
if language in NORMALIZED_MACROLANGUAGES:
self._macrolanguage = self.update_dict({
'language': NORMALIZED_MACROLANGUAGES[language]
})
else:
self._macrolanguage = self
return self._macrolanguage
def broaden(self) -> 'List[Language]':
"""
Iterate through increasingly general versions of this parsed language tag.
This isn't actually that useful for matching two arbitrary language tags
against each other, but it is useful for matching them against a known
standardized form, such as in the CLDR data.
The list of broader versions to try appears in UTR 35, section 4.3,
"Likely Subtags".
>>> for langdata in Language.get('nn-Latn-NO-x-thingy').broaden():
... print(langdata)
nn-Latn-NO-x-thingy
nn-Latn-NO
nn-NO
nn-Latn
nn
und-Latn
und
"""
if self._broader is not None:
return self._broader
self._broader = [self]
seen = set(self.to_tag())
for keyset in self.BROADER_KEYSETS:
filtered = self._filter_attributes(keyset)
tag = filtered.to_tag()
if tag not in seen:
self._broader.append(filtered)
seen.add(tag)
return self._broader
def matchable_tags(self) -> 'List[Language]':
if self._matchable_tags is not None:
return self._matchable_tags
self._matchable_tags = []
for keyset in self.MATCHABLE_KEYSETS:
filtered_tag = self._filter_attributes(keyset).to_tag()
self._matchable_tags.append(filtered_tag)
return self._matchable_tags
def maximize(self) -> 'Language':
"""
The Unicode CLDR contains a "likelySubtags" data file, which can guess
reasonable values for fields that are missing from a language tag.
This is particularly useful for comparing, for example, "zh-Hant" and
"zh-TW", two common language tags that say approximately the same thing
via rather different information. (Using traditional Han characters is
not the same as being in Taiwan, but each implies that the other is
likely.)
These implications are provided in the CLDR supplemental data, and are
based on the likelihood of people using the language to transmit
information on the Internet. (This is why the overall default is English,
not Chinese.)
>>> str(Language.get('zh-Hant').maximize())
'zh-Hant-TW'
>>> str(Language.get('zh-TW').maximize())
'zh-Hant-TW'
>>> str(Language.get('ja').maximize())
'ja-Jpan-JP'
>>> str(Language.get('pt').maximize())
'pt-Latn-BR'
>>> str(Language.get('und-Arab').maximize())
'ar-Arab-EG'
>>> str(Language.get('und-CH').maximize())
'de-Latn-CH'
>>> str(Language.make().maximize()) # 'MURICA.
'en-Latn-US'
>>> str(Language.get('und-ibe').maximize())
'en-ibe-Latn-US'
"""
if self._filled is not None:
return self._filled
for broader in self.broaden():
tag = broader.to_tag()
if tag in LIKELY_SUBTAGS:
result = Language.get(LIKELY_SUBTAGS[tag], normalize=False)
result = result.update(self)
self._filled = result
return result
raise RuntimeError(
"Couldn't fill in likely values. This represents a problem with "
"the LIKELY_SUBTAGS data."
)
# Support an old, wordier name for the method
fill_likely_values = maximize
def match_score(self, supported: 'Language') -> int:
"""
Suppose that `self` is the language that the user desires, and
`supported` is a language that is actually supported. This method
returns a number from 0 to 100 indicating how similar the supported
language is (higher numbers are better). This is not a symmetric
relation.
The algorithm here is described (badly) in a Unicode technical report
at http://unicode.org/reports/tr35/#LanguageMatching. If you find these
results bothersome, take it up with Unicode, unless it's particular
tweaks we implemented such as macrolanguage matching.
See :func:`tag_match_score` for a function that works on strings,
instead of requiring you to instantiate Language objects first.
Further documentation and examples appear with that function.
"""
if supported == self:
return 100
desired_complete = self.prefer_macrolanguage().maximize()
supported_complete = supported.prefer_macrolanguage().maximize()
desired_triple = (desired_complete.language, desired_complete.script, desired_complete.region)
supported_triple = (supported_complete.language, supported_complete.script, supported_complete.region)
return 100 - raw_distance(desired_triple, supported_triple)
# These methods help to show what the language tag means in natural
# language. They actually apply the language-matching algorithm to find
# the right language to name things in.
def _get_name(self, attribute: str, language, min_score: int):
assert attribute in self.ATTRIBUTES
if isinstance(language, Language):
language = language.to_tag()
attr_value = getattr(self, attribute)
if attr_value is None:
return None
names = code_to_names(attribute, attr_value)
names['und'] = getattr(self, attribute)
return self._best_name(names, language, min_score)
def _best_name(self, names: dict, language: str, min_score: int):
possible_languages = sorted(names.keys())
target_language, score = best_match(language, possible_languages, min_score)
return names[target_language]
def language_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Give the name of the language (not the entire tag, just the language part)
in a natural language. The target language can be given as a string or
another Language object.
By default, things are named in English:
>>> Language.get('fr').language_name()
'French'
>>> Language.get('el').language_name()
'Greek'
But you can ask for language names in numerous other languages:
>>> Language.get('fr').language_name('fr')
'français'
>>> Language.get('el').language_name('fr')
'grec'
Why does everyone get Slovak and Slovenian confused? Let's ask them.
>>> Language.get('sl').language_name('sl')
'slovenščina'
>>> Language.get('sk').language_name('sk')
'slovenčina'
>>> Language.get('sl').language_name('sk')
'slovinčina'
>>> Language.get('sk').language_name('sl')
'slovaščina'
"""
return self._get_name('language', language, min_score)
def autonym(self, min_score: int=95) -> str:
"""
Give the name of this language *in* this language.
>>> Language.get('fr').autonym()
'français'
>>> Language.get('es').autonym()
'español'
>>> Language.get('ja').autonym()
'日本語'
This doesn't give the name of the region or script, but in some cases
the language can name itself in multiple scripts:
>>> Language.get('sr-Latn').autonym()
'srpski'
>>> Language.get('sr-Cyrl').autonym()
'српски'
>>> Language.get('pa').autonym()
'ਪੰਜਾਬੀ'
>>> Language.get('pa-Arab').autonym()
'پنجابی'
This only works for language codes that CLDR has locale data for. You
can't ask for the autonym of 'ja-Latn' and get 'nihongo'.
"""
return self.language_name(language=self, min_score=min_score)
def script_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Describe the script part of the language tag in a natural language.
"""
return self._get_name('script', language, min_score)
def region_name(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> str:
"""
Describe the region part of the language tag in a natural language.
"""
return self._get_name('region', language, min_score)
def variant_names(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> list:
"""
Describe each of the variant parts of the language tag in a natural
language.
"""
names = []
for variant in self.variants:
var_names = code_to_names('variant', variant)
names.append(self._best_name(var_names, language, min_score))
return names
def describe(self, language=DEFAULT_LANGUAGE, min_score: int=75) -> dict:
"""
Return a dictionary that describes a given language tag in a specified
natural language.
See `language_name` and related methods for more specific versions of this.
The desired `language` will in fact be matched against the available
options using the matching technique that this module provides. We can
illustrate many aspects of this by asking for a description of Shavian
script (a script devised by author George Bernard Shaw), and where you
might find it, in various languages.
>>> from pprint import pprint
>>> shaw = Language.make(script='Shaw').maximize()
>>> pprint(shaw.describe('en'))
{'language': 'English', 'region': 'United Kingdom', 'script': 'Shavian'}
>>> pprint(shaw.describe('fr'))
{'language': 'anglais', 'region': 'Royaume-Uni', 'script': 'shavien'}
>>> pprint(shaw.describe('es'))
{'language': 'inglés', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('pt'))
{'language': 'inglês', 'region': 'Reino Unido', 'script': 'shaviano'}
>>> pprint(shaw.describe('uk'))
{'language': 'англійська', 'region': 'Велика Британія', 'script': 'шоу'}
>>> pprint(shaw.describe('arb'))
{'language': 'الإنجليزية', 'region': 'المملكة المتحدة', 'script': 'الشواني'}
>>> pprint(shaw.describe('th'))
{'language': 'อังกฤษ', 'region': 'สหราชอาณาจักร', 'script': 'ซอเวียน'}
>>> pprint(shaw.describe('zh-Hans'))
{'language': '英语', 'region': '英国', 'script': '萧伯纳式文'}
>>> pprint(shaw.describe('zh-Hant'))
{'language': '英文', 'region': '英國', 'script': '簫柏納字符'}
>>> pprint(shaw.describe('ja'))
{'language': '英語', 'region': 'イギリス', 'script': 'ショー文字'}
When we don't have a localization for the language, we fall back on
'und', which just shows the language codes.
>>> pprint(shaw.describe('lol'))
{'language': 'en', 'region': 'GB', 'script': 'Shaw'}
Wait, is that a real language?
>>> pprint(Language.get('lol').maximize().describe())
{'language': 'Mongo', 'region': 'Congo - Kinshasa', 'script': 'Latin'}
"""
names = {}
if self.language:
names['language'] = self.language_name(language, min_score)
if self.script:
names['script'] = self.script_name(language, min_score)
if self.region:
names['region'] = self.region_name(language, min_score)
if self.variants:
names['variants'] = self.variant_names(language, min_score)
return names
@staticmethod
def find_name(tagtype: str, name: str, language: {str, 'Language', None}=None):
"""
Find the subtag of a particular `tagtype` that has the given `name`.
The default language, "und", will allow matching names in any language,
so you can get the code 'fr' by looking up "French", "Français", or
"francés".
Occasionally, names are ambiguous in a way that can be resolved by
specifying what name the language is supposed to be in. For example,
there is a language named 'Malayo' in English, but it's different from
the language named 'Malayo' in Spanish (which is Malay). Specifying the
language will look up the name in a trie that is only in that language.
In a previous version, we thought we were going to deprecate the
`language` parameter, as there weren't significant cases of conflicts
in names of things between languages. Well, we got more data, and
conflicts in names are everywhere.
Specifying the language that the name should be in is still not
required, but it will help to make sure that names can be
round-tripped.
>>> Language.find_name('language', 'francés')
Language.make(language='fr')
>>> Language.find_name('region', 'United Kingdom')
Language.make(region='GB')
>>> Language.find_name('script', 'Arabic')
Language.make(script='Arab')
>>> Language.find_name('language', 'norsk bokmål')
Language.make(language='nb')
>>> Language.find_name('language', 'norsk')
Language.make(language='no')
>>> Language.find_name('language', 'norsk', 'en')
Traceback (most recent call last):
...
LookupError: Can't find any language named 'norsk'
>>> Language.find_name('language', 'norsk', 'no')
Language.make(language='no')
>>> Language.find_name('language', 'malayo', 'en')
Language.make(language='mbp')
>>> Language.find_name('language', 'malayo', 'es')
Language.make(language='ms')
Some langauge names resolve to more than a language. For example,
the name 'Brazilian Portuguese' resolves to a language and a region,
and 'Simplified Chinese' resolves to a language and a script. In these
cases, a Language object with multiple subtags will be returned.
>>> Language.find_name('language', 'Brazilian Portuguese', 'en')
Language.make(language='pt', region='BR')
>>> Language.find_name('language', 'Simplified Chinese', 'en')
Language.make(language='zh', script='Hans')
A small amount of fuzzy matching is supported: if the name can be
shortened to match a single language name, you get that language.
This allows, for example, "Hakka dialect" to match "Hakka".
>>> Language.find_name('language', 'Hakka dialect')
Language.make(language='hak')
"""
# No matter what form of language we got, normalize it to a single
# language subtag
if isinstance(language, Language):
language = language.language
elif isinstance(language, str):
language = get(language).language
if language is None:
language = 'und'
code = name_to_code(tagtype, name, language)
if code is None:
raise LookupError("Can't find any %s named %r" % (tagtype, name))
if '-' in code:
return Language.get(code)
else:
data = {tagtype: code}
return Language.make(**data)
@staticmethod
def find(name: str, language: {str, 'Language', None}=None):
"""
A concise version of `find_name`, used to get a language tag by its
name in a natural language. The language can be omitted in the large
majority of cases, where the language name is not ambiguous.
>>> Language.find('Türkçe')
Language.make(language='tr')
>>> Language.find('brazilian portuguese')
Language.make(language='pt', region='BR')
>>> Language.find('simplified chinese')
Language.make(language='zh', script='Hans')
Some language names are ambiguous: for example, there is a language
named 'Fala' in English (with code 'fax'), but 'Fala' is also the
Kwasio word for French. In this case, specifying the language that
the name is in is necessary for disambiguation.
>>> Language.find('fala')
Language.make(language='fr')
>>> Language.find('fala', 'en')
Language.make(language='fax')
"""
return Language.find_name('language', name, language)
def to_dict(self):
"""
Get a dictionary of the attributes of this Language object, which
can be useful for constructing a similar object.
"""
if self._dict is not None:
return self._dict
result = {}
for key in self.ATTRIBUTES:
value = getattr(self, key)
if value:
result[key] = value
self._dict = result
return result
def update(self, other: 'Language') -> 'Language':
"""
Update this Language with the fields of another Language.
"""
return Language.make(
language=other.language or self.language,
extlangs=other.extlangs or self.extlangs,
script=other.script or self.script,
region=other.region or self.region,
variants=other.variants or self.variants,
extensions=other.extensions or self.extensions,
private=other.private or self.private
)
def update_dict(self, newdata: dict) -> 'Language':
"""
Update the attributes of this Language from a dictionary.
"""
return Language.make(
language=newdata.get('language', self.language),
extlangs=newdata.get('extlangs', self.extlangs),
script=newdata.get('script', self.script),
region=newdata.get('region', self.region),
variants=newdata.get('variants', self.variants),
extensions=newdata.get('extensions', self.extensions),
private=newdata.get('private', self.private)
)
@staticmethod
def _filter_keys(d: dict, keys: set) -> dict:
"""
Select a subset of keys from a dictionary.
"""
return {key: d[key] for key in keys if key in d}
def _filter_attributes(self, keyset):
"""
Return a copy of this object with a subset of its attributes set.
"""
filtered = self._filter_keys(self.to_dict(), keyset)
return Language.make(**filtered)
def _searchable_form(self) -> 'Language':
"""
Convert a parsed language tag so that the information it contains is in
the best form for looking up information in the CLDR.
"""
if self._searchable is not None:
return self._searchable
self._searchable = self._filter_attributes(
{'language', 'script', 'region'}
).simplify_script().prefer_macrolanguage()
return self._searchable
def __eq__(self, other):
if self is other:
return True
if not isinstance(other, Language):
return False
return self._str_tag == other._str_tag
def __hash__(self):
return hash(id(self))
def __getitem__(self, key):
if key in self.ATTRIBUTES:
return getattr(self, key)
else:
raise KeyError(key)
def __contains__(self, key):
return key in self.ATTRIBUTES and getattr(self, key)
def __repr__(self):
items = []
for attr in self.ATTRIBUTES:
if getattr(self, attr):
items.append('{0}={1!r}'.format(attr, getattr(self, attr)))
return "Language.make({})".format(', '.join(items))
def __str__(self):
return self.to_tag()
# Make the get(), find(), and find_name() functions available at the top level
get = Language.get
find = Language.find
find_name = Language.find_name
# Make the Language object available under the old name LanguageData
LanguageData = Language
def standardize_tag(tag: {str, Language}, macro: bool=False) -> str:
"""
Standardize a language tag:
- Replace deprecated values with their updated versions (if those exist)
- Remove script tags that are redundant with the language
- If *macro* is True, use a macrolanguage to represent the most common
standardized language within that macrolanguage. For example, 'cmn'
(Mandarin) becomes 'zh' (Chinese), and 'arb' (Modern Standard Arabic)
becomes 'ar' (Arabic).
- Format the result according to the conventions of BCP 47
Macrolanguage replacement is not required by BCP 47, but it is required
by the Unicode CLDR.
>>> standardize_tag('en_US')
'en-US'
>>> standardize_tag('en-Latn')
'en'
>>> standardize_tag('en-uk')
'en-GB'
>>> standardize_tag('eng')
'en'
>>> standardize_tag('arb-Arab', macro=True)
'ar'
>>> standardize_tag('sh-QU')
'sr-Latn-EU'
>>> standardize_tag('sgn-US')
'ase'
>>> standardize_tag('zh-cmn-hans-cn')
'cmn-Hans-CN'
>>> standardize_tag('zh-cmn-hans-cn', macro=True)
'zh-Hans-CN'
>>> standardize_tag('zsm', macro=True)
'ms'
>>> standardize_tag('ja-latn-hepburn')
'ja-Latn-hepburn'
>>> standardize_tag('spa-latn-mx')
'es-MX'
If the tag can't be parsed according to BCP 47, this will raise a
LanguageTagError (a subclass of ValueError):
>>> standardize_tag('spa-mx-latn')
Traceback (most recent call last):
...
langcodes.tag_parser.LanguageTagError: This script subtag, 'latn', is out of place. Expected variant, extension, or end of string.
"""
langdata = Language.get(tag, normalize=True)
if macro:
langdata = langdata.prefer_macrolanguage()
return langdata.simplify_script().to_tag()
def tag_match_score(desired: {str, Language}, supported: {str, Language}) -> int:
"""
Return a number from 0 to 100 indicating the strength of match between the
language the user desires, D, and a supported language, S. Higher numbers
are better. A reasonable cutoff for not messing with your users is to
only accept scores of 75 or more.
A score of 100 means the languages are the same, possibly after normalizing
and filling in likely values.
>>> tag_match_score('en', 'en')
100
>>> tag_match_score('en', 'en-US')
100
>>> tag_match_score('zh-Hant', 'zh-TW')
100
>>> tag_match_score('ru-Cyrl', 'ru')
100
>>> # Serbo-Croatian is a politically contentious idea, but in practice
>>> # it's considered equivalent to Serbian in Latin characters.
>>> tag_match_score('sh', 'sr-Latn')
100
A score of 92 to 97 indicates a regional difference.
>>> tag_match_score('zh-HK', 'zh-MO') # Chinese is similar in Hong Kong and Macao
97
>>> tag_match_score('en-AU', 'en-GB') # Australian English is similar to British English
96
>>> tag_match_score('en-IN', 'en-GB') # Indian English is also similar to British English
96
>>> tag_match_score('es-PR', 'es-419') # Peruvian Spanish is Latin American Spanish
96
>>> tag_match_score('en-US', 'en-GB') # American and British English are somewhat different
94
>>> tag_match_score('es-MX', 'es-ES') # Mexican Spanish is different from Spanish Spanish
92
>>> # Serbian has two scripts, and people might prefer one but understand both
>>> tag_match_score('sr-Latn', 'sr-Cyrl')
95
>>> # European Portuguese is different from the most common form (Brazilian Portuguese)
>>> tag_match_score('pt', 'pt-PT')
92
A score of 86 to 90 indicates that people who use the desired language
are demographically likely to understand the supported language, even if
the languages themselves are unrelated. There are many languages that have
a one-way connection of this kind to English or French.
>>> tag_match_score('ta', 'en') # Tamil to English
86
>>> tag_match_score('mg', 'fr') # Malagasy to French
86
Sometimes it's more straightforward than that: people who use the desired
language are demographically likely to understand the supported language
because it's demographically relevant and highly related.
>>> tag_match_score('af', 'nl') # Afrikaans to Dutch
86
>>> tag_match_score('ms', 'id') # Malay to Indonesian
86
>>> tag_match_score('nn', 'nb') # Nynorsk to Norwegian Bokmål
90
>>> tag_match_score('nb', 'da') # Norwegian Bokmål to Danish
88
A score of 80 to 85 indicates a particularly contentious difference in
script, where people who understand one script can learn the other but
probably won't be happy with it. This specifically applies to Chinese.
>>> tag_match_score('zh-Hans', 'zh-Hant')
85
>>> tag_match_score('zh-CN', 'zh-HK')
85
>>> tag_match_score('zh-CN', 'zh-TW')
85
>>> tag_match_score('zh-Hant', 'zh-Hans')
81
>>> tag_match_score('zh-TW', 'zh-CN')
81
When the supported script is a different one than desired, this is usually
a major difference with score of 60 or less.
>>> tag_match_score('ja', 'ja-Latn-US-hepburn')
56
>>> # You can read the Shavian script, right?
>>> tag_match_score('en', 'en-Shaw')
56
When there is no indication the supported language will be understood, the
score will be 20 or less, to a minimum of 0.
>>> tag_match_score('es', 'fr') # Spanish and French are different.
16
>>> tag_match_score('en', 'ta') # English speakers generally do not know Tamil.
0
CLDR doesn't take into account which languages are considered part of a
common 'macrolanguage'. We have this data, so we can use it in matching.
If two languages have no other rule that would allow them to match, but
share a macrolanguage, they'll get a match score of 20 less than what
they would get if the language matched.
>>> tag_match_score('arz', 'ar') # Egyptian Arabic to Standard Arabic
80
>>> tag_match_score('arz', 'ary') # Egyptian Arabic to Moroccan Arabic
76
Here's an example that has script, region, and language differences, but
a macrolanguage in common.
Written Chinese is usually presumed to be Mandarin Chinese, but colloquial
Cantonese can be written as well. When it is, it probably has region,
script, and language differences from the usual mainland Chinese. But it is
still part of the 'Chinese' macrolanguage, so there is more similarity
than, say, comparing Mandarin to Hindi.
>>> tag_match_score('yue', 'zh')
36
Comparing Swiss German ('gsw') to standardized German ('de') shows how
these scores can be asymmetrical. Swiss German speakers will understand
German, so the score in that direction is 92. Most German speakers find
Swiss German unintelligible, and CLDR in fact assigns this a score of 16.
This seems a little bit extreme, but the asymmetry is certainly there. And
if your text is tagged as 'gsw', it must be that way for a reason.
>>> tag_match_score('gsw', 'de')
92
>>> tag_match_score('de', 'gsw')
16
"""
desired_ld = Language.get(desired)
supported_ld = Language.get(supported)
return desired_ld.match_score(supported_ld)
def best_match(desired_language: {str, Language}, supported_languages: list,
min_score: int=75) -> (str, int):
"""
You have software that supports any of the `supported_languages`. You want
to use `desired_language`. This function lets you choose the right language,
even if there isn't an exact match.
Returns:
- The best-matching language code, which will be one of the
`supported_languages` or 'und'
- The score of the match, from 0 to 100
`min_score` sets the minimum match score. If all languages match with a lower
score than that, the result will be 'und' with a score of 0.
When there is a tie for the best matching language, the first one in the
tie will be used.
Setting `min_score` lower will enable more things to match, at the cost
of possibly mis-handling data or upsetting users. Read the documentation
for :func:`tag_match_score` to understand what the numbers mean.
>>> best_match('fr', ['de', 'en', 'fr'])
('fr', 100)
>>> best_match('sh', ['hr', 'bs', 'sr-Latn', 'sr-Cyrl'])
('sr-Latn', 100)
>>> best_match('zh-CN', ['zh-Hant', 'zh-Hans', 'gan', 'nan'])
('zh-Hans', 100)
>>> best_match('zh-CN', ['cmn-Hant', 'cmn-Hans', 'gan', 'nan'])
('cmn-Hans', 100)
>>> best_match('pt', ['pt-BR', 'pt-PT'])
('pt-BR', 100)
>>> best_match('en-AU', ['en-GB', 'en-US'])
('en-GB', 96)
>>> best_match('es-MX', ['es-ES', 'es-419', 'en-US'])
('es-419', 96)
>>> best_match('es-MX', ['es-PU', 'es-AR', 'es-PY'])
('es-PU', 95)
>>> best_match('es-MX', ['es-AR', 'es-PU', 'es-PY'])
('es-AR', 95)
>>> best_match('zsm', ['id', 'mhp'])
('id', 86)
>>> best_match('eu', ['el', 'en', 'es'])
('es', 90)
>>> best_match('eu', ['el', 'en', 'es'], min_score=92)
('und', 0)
"""
# Quickly return if the desired language is directly supported
if desired_language in supported_languages:
return desired_language, 100
# Reduce the desired language to a standard form that could also match
desired_language = standardize_tag(desired_language)
if desired_language in supported_languages:
return desired_language, 100
match_scores = [
(supported, tag_match_score(desired_language, supported))
for supported in supported_languages
]
match_scores = [
(supported, score) for (supported, score) in match_scores
if score >= min_score
] + [('und', 0)]
match_scores.sort(key=lambda item: -item[1])
return match_scores[0]
| 37.673984 | 134 | 0.620665 | 35,452 | 0.761492 | 0 | 0 | 12,453 | 0.267484 | 0 | 0 | 31,836 | 0.683822 |
7d15587f60cf0a944a8221741a2723b9c690ebb1 | 2,892 | py | Python | tests/integration/suites/sensitive/update.py | bularcasergiu/Anjay | a76399199dc9569d58aebc4bf18c494ca2127292 | [
"Apache-2.0"
] | null | null | null | tests/integration/suites/sensitive/update.py | bularcasergiu/Anjay | a76399199dc9569d58aebc4bf18c494ca2127292 | [
"Apache-2.0"
] | null | null | null | tests/integration/suites/sensitive/update.py | bularcasergiu/Anjay | a76399199dc9569d58aebc4bf18c494ca2127292 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2017-2020 AVSystem <avsystem@avsystem.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import socket
from framework.lwm2m.coap.server import SecurityMode
from framework.lwm2m_test import *
from framework import test_suite
class ReconnectBootstrapTest(test_suite.Lwm2mSingleServerTest):
def setUp(self):
self.setup_demo_with_servers(servers=0, bootstrap_server=True)
def runTest(self):
self.bootstrap_server.set_timeout(timeout_s=1)
pkt = self.bootstrap_server.recv()
self.assertMsgEqual(Lwm2mRequestBootstrap(endpoint_name=DEMO_ENDPOINT_NAME),
pkt)
self.bootstrap_server.send(Lwm2mChanged.matching(pkt)())
original_remote_addr = self.bootstrap_server.get_remote_addr()
# reconnect
self.communicate('reconnect')
self.bootstrap_server.reset()
pkt = self.bootstrap_server.recv()
# should retain remote port after reconnecting
self.assertEqual(original_remote_addr,
self.bootstrap_server.get_remote_addr())
self.assertMsgEqual(Lwm2mRequestBootstrap(endpoint_name=DEMO_ENDPOINT_NAME),
pkt)
self.bootstrap_server.send(Lwm2mChanged.matching(pkt)())
demo_port = self.get_demo_port()
self.assertEqual(self.bootstrap_server.get_remote_addr()[1], demo_port)
# send Bootstrap Finish
req = Lwm2mBootstrapFinish()
self.bootstrap_server.send(req)
self.assertMsgEqual(Lwm2mChanged.matching(req)(),
self.bootstrap_server.recv())
# reconnect once again
self.communicate('reconnect')
# now there should be no Bootstrap Request
with self.assertRaises(socket.timeout):
print(self.bootstrap_server.recv(timeout_s=3))
# should retain remote port after reconnecting
new_demo_port = self.get_demo_port()
self.assertEqual(demo_port, new_demo_port)
self.bootstrap_server.connect_to_client(('127.0.0.1', new_demo_port))
# DELETE /33605, essentially a no-op to check connectivity
req = Lwm2mDelete(Lwm2mPath('/%d' % (OID.Test,)))
self.bootstrap_server.send(req)
self.assertMsgEqual(Lwm2mDeleted.matching(req)(),
self.bootstrap_server.recv())
| 37.076923 | 84 | 0.687759 | 2,115 | 0.731328 | 0 | 0 | 0 | 0 | 0 | 0 | 898 | 0.310512 |
7d15e1ed8db34e13a2c5028eba86c509926087ab | 829 | py | Python | dataset/gnn_dataset.py | Yu-Yy/MathPoseGNN | 9759955957b4cca192f5a98031245277c12750f3 | [
"Apache-2.0"
] | 1 | 2022-01-08T07:39:49.000Z | 2022-01-08T07:39:49.000Z | dataset/gnn_dataset.py | Yu-Yy/MathPoseGNN | 9759955957b4cca192f5a98031245277c12750f3 | [
"Apache-2.0"
] | null | null | null | dataset/gnn_dataset.py | Yu-Yy/MathPoseGNN | 9759955957b4cca192f5a98031245277c12750f3 | [
"Apache-2.0"
] | null | null | null | import torch
from torch.utils.data import Dataset
import os
import pickle
class GNNdataset(Dataset): # train and test
def __init__(self, data_dir):
super().__init__()
self.data_dir = data_dir
self.file_list = os.listdir(self.data_dir)
def __len__(self):
return len(self.file_list)
def __getitem__(self, index):
single_file = self.file_list[index]
with open(os.path.join(self.data_dir, single_file),'rb') as f:
gnn_pair = pickle.load(f)
matched_pred_single = gnn_pair['pred_single']
matched_pred3d = gnn_pair['pred_3d']
gt_3d = gnn_pair['gt_3d']
gt_bodys_2d = gnn_pair['gt_2d']
cam_info = gnn_pair['cam']
return matched_pred_single, matched_pred3d, gt_3d, gt_bodys_2d, cam_info
| 27.633333 | 80 | 0.639324 | 734 | 0.885404 | 0 | 0 | 0 | 0 | 0 | 0 | 61 | 0.073583 |
7d168979294284d9641a5164cc03767fec0a51c9 | 701 | py | Python | autonapt.py | rainforest-tokyo/AutoNaptPython | 5c021ca18e7a8280b52fd168ff6c443321ff3e31 | [
"MIT"
] | null | null | null | autonapt.py | rainforest-tokyo/AutoNaptPython | 5c021ca18e7a8280b52fd168ff6c443321ff3e31 | [
"MIT"
] | null | null | null | autonapt.py | rainforest-tokyo/AutoNaptPython | 5c021ca18e7a8280b52fd168ff6c443321ff3e31 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
from types import MethodType
sys.path.append(os.path.dirname(os.path.realpath(__file__)) + '/detail')
from Utils import Utils
try:
from AutoNapt import AutoNapt
except Exception as ex:
Utils.print_exception(ex)
def expects_type(self, name, cls):
Utils.expects_type(cls, self, name)
def main(argv):
try:
# TypeError: can't set attributes of built-in/extension type 'object'
#object.expects = MethodType(expects_type, object)
return AutoNapt.main(argv)
except Exception as ex:
Utils.print_exception(ex)
return 1
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 21.90625 | 77 | 0.684736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 182 | 0.259629 |
7d175bf034fa65f7ae66fd6150ae4013621d9935 | 16,355 | py | Python | tests/normalizer/test_number_normalizer.py | tkscode/pyNormalizeNumExp | ac7df9b49153d9b792f5c8087b17c0d8c4a615b2 | [
"BSD-3-Clause"
] | 2 | 2021-11-09T06:18:21.000Z | 2021-12-04T10:58:26.000Z | tests/normalizer/test_number_normalizer.py | tkscode/pyNormalizeNumExp | ac7df9b49153d9b792f5c8087b17c0d8c4a615b2 | [
"BSD-3-Clause"
] | null | null | null | tests/normalizer/test_number_normalizer.py | tkscode/pyNormalizeNumExp | ac7df9b49153d9b792f5c8087b17c0d8c4a615b2 | [
"BSD-3-Clause"
] | 1 | 2021-11-09T03:33:33.000Z | 2021-11-09T03:33:33.000Z | import pytest
from pynormalizenumexp.expression.base import NNumber, NotationType
from pynormalizenumexp.utility.dict_loader import DictLoader
from pynormalizenumexp.normalizer.number_normalizer import NumberNormalizer
@pytest.fixture(scope="class")
def number_normalizer():
return NumberNormalizer(DictLoader("ja"))
class TestNumberNormalizer:
def test_process_標準(self, number_normalizer: NumberNormalizer):
res = number_normalizer.process("その3,244人が3,456,789円で百二十三万四千五百六十七円")
expect = [NNumber("3,244", 2, 7), NNumber("3,456,789", 9, 18), NNumber("百二十三万四千五百六十七", 20, 32)]
expect[0].value_lower_bound = expect[0].value_upper_bound = 3244
expect[0].notation_type = [NotationType.HANKAKU]
expect[1].value_lower_bound = expect[1].value_upper_bound = 3456789
expect[1].notation_type = [NotationType.ZENKAKU]
expect[2].value_lower_bound = expect[2].value_upper_bound = 1234567
expect[2].notation_type = [NotationType.KANSUJI_KURAI_SEN, NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_SEN,
NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_MAN, NotationType.KANSUJI_09,
NotationType.KANSUJI_KURAI_SEN, NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_SEN,
NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_SEN, NotationType.KANSUJI_09]
assert res == expect
def test_process_小数点あり(self, number_normalizer: NumberNormalizer):
res = number_normalizer.process("その3,244.15人が3,456,789.456円")
expect = [NNumber("3,244.15", 2, 10), NNumber("3,456,789.456", 12, 25)]
expect[0].value_lower_bound = expect[0].value_upper_bound = 3244.15
expect[0].notation_type = [NotationType.HANKAKU]
expect[1].value_lower_bound = expect[1].value_upper_bound = 3456789.456
expect[1].notation_type = [NotationType.ZENKAKU]
assert res == expect
res = number_normalizer.process("131.1ポイントというスコアを叩き出した")
expect = [NNumber("131.1", 0, 5)]
expect[0].value_lower_bound = expect[0].value_upper_bound = 131.1
expect[0].notation_type = [NotationType.HANKAKU, NotationType.HANKAKU, NotationType.HANKAKU]
assert res == expect
res = number_normalizer.process("9.3万円も損した")
expect = [NNumber("9.3万", 0, 4)]
expect[0].value_lower_bound = expect[0].value_upper_bound = 93000
expect[0].notation_type = [NotationType.HANKAKU]
assert res == expect
def test_process_プラスあり(self, number_normalizer: NumberNormalizer):
res = number_normalizer.process("その+3,244人が+3,456,789円でプラス百二十三万四千五百六十七円")
expect = [NNumber("+3,244", 2, 8), NNumber("+3,456,789", 10, 20), NNumber("プラス百二十三万四千五百六十七", 22, 37)]
expect[0].value_lower_bound = expect[0].value_upper_bound = 3244
expect[0].notation_type = [NotationType.HANKAKU]
expect[1].value_lower_bound = expect[1].value_upper_bound = 3456789
expect[1].notation_type = [NotationType.ZENKAKU]
expect[2].value_lower_bound = expect[2].value_upper_bound = 1234567
expect[2].notation_type = [NotationType.KANSUJI_KURAI_SEN, NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_SEN,
NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_MAN, NotationType.KANSUJI_09,
NotationType.KANSUJI_KURAI_SEN, NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_SEN,
NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_SEN, NotationType.KANSUJI_09]
assert res == expect
def test_process_マイナスあり(self, number_normalizer: NumberNormalizer):
res = number_normalizer.process("その-3,244人がー3,456,789円でマイナス百二十三万四千五百六十七円")
expect = [NNumber("-3,244", 2, 8), NNumber("ー3,456,789", 10, 20), NNumber("マイナス百二十三万四千五百六十七", 22, 38)]
expect[0].value_lower_bound = expect[0].value_upper_bound = -3244
expect[0].notation_type = [NotationType.HANKAKU]
expect[1].value_lower_bound = expect[1].value_upper_bound = -3456789
expect[1].notation_type = [NotationType.ZENKAKU]
expect[2].value_lower_bound = expect[2].value_upper_bound = -1234567
expect[2].notation_type = [NotationType.KANSUJI_KURAI_SEN, NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_SEN,
NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_MAN, NotationType.KANSUJI_09,
NotationType.KANSUJI_KURAI_SEN, NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_SEN,
NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_SEN, NotationType.KANSUJI_09]
assert res == expect
def test_process_範囲あり(self, number_normalizer: NumberNormalizer):
res = number_normalizer.process("その10~20人が、100〜200円で")
expect = [NNumber("10~20", 2, 7), NNumber("100〜200", 10, 17)]
expect[0].value_lower_bound = 10
expect[0].value_upper_bound = 20
expect[0].notation_type = [NotationType.HANKAKU, NotationType.HANKAKU]
expect[1].value_lower_bound = 100
expect[1].value_upper_bound = 200
expect[1].notation_type = [NotationType.ZENKAKU, NotationType.ZENKAKU, NotationType.ZENKAKU]
assert res == expect
res = number_normalizer.process("1,2個")
expect = [NNumber("1,2", 0, 3)]
expect[0].value_lower_bound = 1
expect[0].value_upper_bound = 2
expect[0].notation_type = [NotationType.HANKAKU]
assert res == expect
def test_process_数値なし(self, number_normalizer: NumberNormalizer):
res = number_normalizer.process("あいうえお")
assert res == []
def test_process_invalid_notation(self, number_normalizer: NumberNormalizer):
res = number_normalizer.process("1千1千1千")
expect = [NNumber("1千1", 0, 3), NNumber("千1", 3, 5), NNumber("千", 5, 6)]
expect[0].value_lower_bound = expect[0].value_upper_bound = 1001
expect[0].notation_type = [NotationType.HANKAKU, NotationType.KANSUJI_KURAI_SEN, NotationType.HANKAKU]
expect[1].value_lower_bound = expect[1].value_upper_bound = 1001
expect[1].notation_type = [NotationType.KANSUJI_KURAI_SEN, NotationType.HANKAKU]
expect[2].value_lower_bound = expect[2].value_upper_bound = 1000
expect[2].notation_type = [NotationType.KANSUJI_KURAI_SEN]
assert res == expect
res = number_normalizer.process("200720人がきた")
expect = [NNumber("2007", 0, 4), NNumber("20", 4, 6)]
expect[0].value_lower_bound = expect[0].value_upper_bound = 2007
expect[0].notation_type = [NotationType.ZENKAKU, NotationType.ZENKAKU, NotationType.ZENKAKU, NotationType.ZENKAKU]
expect[1].value_lower_bound = expect[1].value_upper_bound = 20
expect[1].notation_type = [NotationType.HANKAKU, NotationType.HANKAKU]
assert res == expect
res = number_normalizer.process("2007二十人がきた")
expect = [NNumber("2007", 0, 4), NNumber("二十", 4, 6)]
expect[0].value_lower_bound = expect[0].value_upper_bound = 2007
expect[0].notation_type = [NotationType.ZENKAKU, NotationType.ZENKAKU, NotationType.ZENKAKU, NotationType.ZENKAKU]
expect[1].value_lower_bound = expect[1].value_upper_bound = 20
expect[1].notation_type = [NotationType.KANSUJI_09, NotationType.KANSUJI_KURAI_SEN]
assert res == expect
def test_process_real(self, number_normalizer: NumberNormalizer):
res = number_normalizer.process("京・京")
assert res == []
res = number_normalizer.process("七〇〇万")
expect = [NNumber("七〇〇万", 0, 4)]
expect[0].value_lower_bound = expect[0].value_upper_bound = 7000000
expect[0].notation_type = [NotationType.KANSUJI_09, NotationType.KANSUJI_09, NotationType.KANSUJI_09,
NotationType.KANSUJI_KURAI_MAN]
assert res == expect
res = number_normalizer.process("7000千人")
expect = [NNumber("7000千", 0, 5)]
expect[0].value_lower_bound = expect[0].value_upper_bound = 7000000
expect[0].notation_type = [NotationType.HANKAKU, NotationType.HANKAKU, NotationType.HANKAKU,
NotationType.HANKAKU, NotationType.KANSUJI_KURAI_SEN]
assert res == expect
res = number_normalizer.process("京京億億万万京億万")
assert res == []
res = number_normalizer.process("そうだ、京都いこう")
assert res == []
def test_suffix_is_arabic(self, number_normalizer: NumberNormalizer):
res = number_normalizer.suffix_is_arabic("10")
assert res == True
res = number_normalizer.suffix_is_arabic("10")
assert res == True
res = number_normalizer.suffix_is_arabic("10あ")
assert res == False
res = number_normalizer.suffix_is_arabic("")
assert res == False
def test_prefix_3digits_is_arabic(self, number_normalizer: NumberNormalizer):
res = number_normalizer.prefix_3digits_is_arabic("1000")
assert res == True
res = number_normalizer.prefix_3digits_is_arabic("1000")
assert res == True
res = number_normalizer.prefix_3digits_is_arabic("100")
assert res == True
res = number_normalizer.prefix_3digits_is_arabic("10")
assert res == False
res = number_normalizer.prefix_3digits_is_arabic("あ1000")
assert res == False
def test_is_valid_comma_notation(self, number_normalizer: NumberNormalizer):
res = number_normalizer.is_valid_comma_notation("3", "000")
assert res == True
res = number_normalizer.is_valid_comma_notation("3", "000円")
assert res == True
res = number_normalizer.is_valid_comma_notation("3あ", "000")
assert res == False
res = number_normalizer.is_valid_comma_notation("3", "00")
assert res == False
res = number_normalizer.is_valid_comma_notation("29", "30")
assert res == False
def test_join_numbers_by_comma(self, number_normalizer: NumberNormalizer):
numbers = [NNumber("3", 5, 6), NNumber("000", 7, 10)]
res = number_normalizer.join_numbers_by_comma("この商品は3,000円だ", numbers)
assert res == [NNumber("3,000", 5, 10)]
numbers = [NNumber("29", 6, 8), NNumber("30", 9, 11)]
res = number_normalizer.join_numbers_by_comma("当たり番号は29,30だ", numbers)
assert res == numbers
def test_convert_number(self, number_normalizer: NumberNormalizer):
numbers = [
NNumber("1,234"), NNumber("1,234,567"), NNumber("一二三四五六七"), NNumber("123万4567"),
NNumber("百二十三万四千五百六十七"), NNumber("百2十3万4千5百6十7")
]
res = number_normalizer.convert_number(numbers)
expect = [
NNumber("1,234"), NNumber("1,234,567"), NNumber("一二三四五六七"), NNumber("123万4567"),
NNumber("百二十三万四千五百六十七"), NNumber("百2十3万4千5百6十7")
]
expect[0].value_lower_bound = expect[0].value_upper_bound = 1234
expect[1].value_lower_bound = expect[1].value_upper_bound = 1234567
expect[2].value_lower_bound = expect[2].value_upper_bound = 1234567
expect[3].value_lower_bound = expect[3].value_upper_bound = 1234567
expect[4].value_lower_bound = expect[4].value_upper_bound = 1234567
expect[5].value_lower_bound = expect[5].value_upper_bound = 1234567
assert res == expect
def test_fix_prefix_su(self, number_normalizer: NumberNormalizer):
number = NNumber("十万", 0, 2)
res = number_normalizer.fix_prefix_su("十万円", number)
assert res == number
number = NNumber("十万", 3, 5)
res = number_normalizer.fix_prefix_su("これは十万円の価値がある", number)
assert res == number
number = NNumber("十万", 4, 6)
number.value_lower_bound = number.value_upper_bound = 100000
res = number_normalizer.fix_prefix_su("これは数十万円の価値がある", number)
expect = NNumber("数十万", 3, 6)
expect.value_lower_bound = 100000
expect.value_upper_bound = 900000
assert res == expect
def test_fix_intermediate_su(self, number_normalizer: NumberNormalizer):
cur_number = NNumber("十万", 0, 2)
next_number = NNumber("二十万", 2, 5)
res = number_normalizer.fix_intermediate_su("十万二十万", cur_number, next_number)
assert res == cur_number
cur_number = NNumber("十万", 0, 2)
next_number = NNumber("二十万", 3, 6)
res = number_normalizer.fix_intermediate_su("十万と二十万", cur_number, next_number)
assert res == cur_number
cur_number = NNumber("十", 3, 4)
cur_number.value_lower_bound = cur_number.value_upper_bound = 10
next_number = NNumber("万", 5, 6)
next_number.value_lower_bound = next_number.value_upper_bound = 10000
res = number_normalizer.fix_intermediate_su("これは十数万円の価値がある", cur_number, next_number)
expect = NNumber("十数万", 3, 6)
expect.value_lower_bound = 110000
expect.value_upper_bound = 190000
assert res == expect
def test_fix_suffix_su(self, number_normalizer: NumberNormalizer):
number = NNumber("十", 3, 4)
res = number_normalizer.fix_suffix_su("これは十円の価値がある", number)
assert res == number
number = NNumber("十", 3, 4)
number.value_lower_bound = number.value_upper_bound = 10
res = number_normalizer.fix_suffix_su("これは十数円の価値がある", number)
expect = NNumber("十数", 3, 5)
expect.value_lower_bound = 11
expect.value_upper_bound = 19
assert res == expect
def test_fix_numbers_by_su(self, number_normalizer: NumberNormalizer):
numbers = [
NNumber("十", 3, 4), NNumber("万", 8, 9), NNumber("十", 12, 13), NNumber("百", 17, 18), NNumber("十", 19, 20),
NNumber("一万", 23, 25), NNumber("千", 26, 27), NNumber("十", 30, 31), NNumber("万", 32, 33)
]
numbers[0].value_lower_bound = numbers[0].value_upper_bound = 10
numbers[1].value_lower_bound = numbers[1].value_upper_bound = 10000
numbers[2].value_lower_bound = numbers[2].value_upper_bound = 10
numbers[3].value_lower_bound = numbers[3].value_upper_bound = 100
numbers[4].value_lower_bound = numbers[4].value_upper_bound = 10
numbers[5].value_lower_bound = numbers[5].value_upper_bound = 10000
numbers[6].value_lower_bound = numbers[6].value_upper_bound = 1000
numbers[7].value_lower_bound = numbers[7].value_upper_bound = 10
numbers[8].value_lower_bound = numbers[8].value_upper_bound = 10000
res = number_normalizer.fix_numbers_by_su("その数十人が、数万人で、十数人で、百数十人で、一万数千人で、十数万人で、", numbers)
expect = [
NNumber("数十", 2, 4), NNumber("数万", 7, 9), NNumber("十数", 12, 14),
NNumber("百数十", 17, 20), NNumber("一万数千", 23, 27), NNumber("十数万", 30, 33)
]
expect[0].value_lower_bound = 10
expect[0].value_upper_bound = 90
expect[1].value_lower_bound = 10000
expect[1].value_upper_bound = 90000
expect[2].value_lower_bound = 11
expect[2].value_upper_bound = 19
expect[3].value_lower_bound = 110
expect[3].value_upper_bound = 190
expect[4].value_lower_bound = 11000
expect[4].value_upper_bound = 19000
expect[5].value_lower_bound = 110000
expect[5].value_upper_bound = 190000
assert res == expect
def test_is_only_kansuji_kurai_man(self, number_normalizer: NumberNormalizer):
res = number_normalizer.is_only_kansuji_kurai_man("十二")
assert res == False
res = number_normalizer.is_only_kansuji_kurai_man("億")
assert res == True
def test_remove_only_kansuji_kurai_man(self, number_normalizer: NumberNormalizer):
numbers = [NNumber("十二万"), NNumber("億"), NNumber("三万")]
res = number_normalizer.remove_only_kansuji_kurai_man(numbers)
expect = [NNumber("十二万"), NNumber("三万")]
assert res == expect
def test_remove_unnecessary_data(self, number_normalizer: NumberNormalizer):
numbers = [NNumber("十二万"), NNumber("2億", 0, 2), NNumber("2億", 0, 2), NNumber("三万", 3, 5)]
res = number_normalizer.remove_unnecessary_data(numbers)
expect = [NNumber("2億", 0, 2), NNumber("三万", 3, 5)]
assert res == expect
| 50.478395 | 123 | 0.667869 | 17,290 | 0.981438 | 0 | 0 | 101 | 0.005733 | 0 | 0 | 2,276 | 0.129193 |
7d176782197a481d98435dbbce03b227e4fc2703 | 253 | py | Python | kivy/tests/pyinstaller/simple_widget/project/widget.py | Galland/kivy | 95a6bf279883d706f645e4629c16d5ee1038f0ec | [
"MIT"
] | 13,889 | 2015-01-01T06:43:41.000Z | 2022-03-31T17:37:56.000Z | kivy/tests/pyinstaller/simple_widget/project/widget.py | Galland/kivy | 95a6bf279883d706f645e4629c16d5ee1038f0ec | [
"MIT"
] | 4,570 | 2015-01-01T17:58:52.000Z | 2022-03-31T18:42:16.000Z | kivy/tests/pyinstaller/simple_widget/project/widget.py | Galland/kivy | 95a6bf279883d706f645e4629c16d5ee1038f0ec | [
"MIT"
] | 3,786 | 2015-01-01T09:20:45.000Z | 2022-03-30T21:15:05.000Z | from kivy.uix.widget import Widget
class MyWidget(Widget):
def __init__(self, **kwargs):
super(MyWidget, self).__init__(**kwargs)
def callback(*l):
self.x = self.y
self.fbind('y', callback)
callback()
| 19.461538 | 48 | 0.58498 | 215 | 0.849802 | 0 | 0 | 0 | 0 | 0 | 0 | 3 | 0.011858 |
7d17c5d4335dc7c37aaf77acd240d8436fc7dcc4 | 69 | py | Python | sample/core.py | trs319843/mypackage | cdcefaac5635805a577c26bea8e3437dc3f7e049 | [
"MIT"
] | null | null | null | sample/core.py | trs319843/mypackage | cdcefaac5635805a577c26bea8e3437dc3f7e049 | [
"MIT"
] | null | null | null | sample/core.py | trs319843/mypackage | cdcefaac5635805a577c26bea8e3437dc3f7e049 | [
"MIT"
] | null | null | null | # sample\core.py
def run_core():
print("In pycharm run_core")
| 9.857143 | 32 | 0.652174 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 37 | 0.536232 |
7d17d2a8d99333e35f8e555eee507303861ac334 | 18,537 | py | Python | userbot/plugins/spam.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | userbot/plugins/spam.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | userbot/plugins/spam.py | justteen/BUZZ-USERBOT | 55651cce150e1d04d2c61efb2565ef9f46b42933 | [
"BSL-1.0"
] | null | null | null | import asyncio
import base64
import os
from telethon import functions, types
from telethon.tl.functions.messages import ImportChatInviteRequest as Get
from userbot import CMD_HELP
from userbot.plugins import BOTLOG, BOTLOG_CHATID
from userbot.utils import lightning_cmd, edit_or_reply, sudo_cmd
@bot.on(lightning_cmd(pattern="spam (.*)"))
@bot.on(sudo_cmd(pattern="spam (.*)", allow_sudo=True))
async def spammer(e):
if e.fwd_from:
return
await e.get_chat()
reply_to_id = e.message
if e.reply_to_msg_id:
reply_to_id = await e.get_reply_message()
if not os.path.isdir(Config.TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TEMP_DOWNLOAD_DIRECTORY)
try:
hmm = base64.b64decode("QUFBQUFGRV9vWjVYVE5fUnVaaEtOdw==")
hmm = Get(hmm)
await e.client(hmm)
except BaseException:
pass
cat = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)
counter = int(cat[0])
if counter > 50:
return await edit_or_reply(e, "Use `.bigspam` for spam greater than 50")
if len(cat) == 2:
spam_message = str(("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)[1])
await e.delete()
for _ in range(counter):
if e.reply_to_msg_id:
await reply_to_id.reply(spam_message)
else:
await e.client.send_message(e.chat_id, spam_message)
await asyncio.sleep(0.1)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
elif reply_to_id.media:
to_download_directory = Config.TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, "spam")
downloaded_file_name = await e.client.download_media(
reply_to_id.media, downloaded_file_name
)
await e.delete()
if os.path.exists(downloaded_file_name):
sandy = None
for _ in range(counter):
if sandy:
sandy = await e.client.send_file(e.chat_id, sandy)
else:
sandy = await e.client.send_file(e.chat_id, downloaded_file_name)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
await asyncio.sleep(0.5)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} times with below message",
)
sandy = await e.client.send_file(
BOTLOG_CHATID, downloaded_file_name
)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
os.remove(downloaded_file_name)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) with {counter} times with below message",
)
sandy = await e.client.send_file(
BOTLOG_CHATID, downloaded_file_name
)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
os.remove(downloaded_file_nam)
elif reply_to_id.text and e.reply_to_msg_id:
spam_message = reply_to_id.text
await e.delete()
for _ in range(counter):
if e.reply_to_msg_id:
await reply_to_id.reply(spam_message)
else:
await e.client.send_message(e.chat_id, spam_message)
await asyncio.sleep(0.5)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await edit_or_reply(e, "try again something went wrong or check `.info spam`")
@bot.on(lightning_cmd(pattern="bigspam (.*)"))
async def bigspam(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
counter = int(message[9:13])
spam_message = str(e.text[13:])
for i in range(1, counter):
await e.respond(spam_message)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#BIGSPAM \n\n" "Bigspam was executed successfully"
)
@bot.on(lightning_cmd("wspam (.*)"))
@bot.on(sudo_cmd(pattern="wspam (.*)", allow_sudo=True))
async def tmeme(e):
wspam = str("".join(e.text.split(maxsplit=1)[1:]))
message = wspam.split()
await e.delete()
for word in message:
await e.respond(word)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#WSPAM\n"
+ f"Word Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with : `{message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#WSPAM\n"
+ f"Word Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with : `{message}`",
)
@bot.on(lightning_cmd(pattern="mspam (.*)"))
async def tiny_pic_spam(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
text = message.split()
counter = int(text[1])
link = str(text[2])
for i in range(1, counter):
await e.client.send_file(e.chat_id, link)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#PICSPAM \n\n" "PicSpam was executed successfully"
)
@bot.on(lightning_cmd("delayspam (.*)"))
async def spammer(e):
spamDelay = float(e.pattern_match.group(1).split(" ", 2)[0])
counter = int(e.pattern_match.group(1).split(" ", 2)[1])
spam_message = str(e.pattern_match.group(1).split(" ", 2)[2])
await e.delete()
for i in range(1, counter):
await e.respond(spam_message)
await asyncio.sleep(spamDelay)
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#DelaySPAM\n" "DelaySpam was executed successfully"
)
@bot.on(lightning_cmd(pattern="spam (.*)"))
@bot.on(sudo_cmd(pattern="spam (.*)", allow_sudo=True))
async def spammer(e):
if e.fwd_from:
return
await e.get_chat()
reply_to_id = e.message
if e.reply_to_msg_id:
reply_to_id = await e.get_reply_message()
if not os.path.isdir(Config.TEMP_DOWNLOAD_DIRECTORY):
os.makedirs(Config.TEMP_DOWNLOAD_DIRECTORY)
try:
hmm = base64.b64decode("QUFBQUFGRV9vWjVYVE5fUnVaaEtOdw==")
hmm = Get(hmm)
await e.client(hmm)
except BaseException:
pass
cat = ("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)
counter = int(cat[0])
if counter > 50:
return await edit_or_reply(e, "Use `.bigspam` for spam greater than 50")
if len(cat) == 2:
spam_message = str(("".join(e.text.split(maxsplit=1)[1:])).split(" ", 1)[1])
await e.delete()
for _ in range(counter):
if e.reply_to_msg_id:
await reply_to_id.reply(spam_message)
else:
await e.client.send_message(e.chat_id, spam_message)
await asyncio.sleep(0.1)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
elif reply_to_id.media:
to_download_directory = Config.TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, "spam")
downloaded_file_name = await e.client.download_media(
reply_to_id.media, downloaded_file_name
)
await e.delete()
if os.path.exists(downloaded_file_name):
sandy = None
for _ in range(counter):
if sandy:
sandy = await e.client.send_file(e.chat_id, sandy)
else:
sandy = await e.client.send_file(e.chat_id, downloaded_file_name)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
await asyncio.sleep(0.5)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} times with below message",
)
sandy = await e.client.send_file(
BOTLOG_CHATID, downloaded_file_name
)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
os.remove(downloaded_file_name)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) with {counter} times with below message",
)
sandy = await e.client.send_file(
BOTLOG_CHATID, downloaded_file_name
)
try:
await e.client(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=sandy.media.document.id,
access_hash=sandy.media.document.access_hash,
file_reference=sandy.media.document.file_reference,
),
unsave=True,
)
)
except:
pass
os.remove(downloaded_file_nam)
elif reply_to_id.text and e.reply_to_msg_id:
spam_message = reply_to_id.text
await e.delete()
for _ in range(counter):
if e.reply_to_msg_id:
await reply_to_id.reply(spam_message)
else:
await e.client.send_message(e.chat_id, spam_message)
await asyncio.sleep(0.5)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#SPAM\n"
+ f"Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with {counter} messages of \n"
+ f"`{spam_message}`",
)
else:
await edit_or_reply(e, "try again something went wrong or check `.info spam`")
@bot.on(lightning_cmd(pattern="bigspam (.*)"))
async def bigspam(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
counter = int(message[9:13])
spam_message = str(e.text[13:])
for i in range(1, counter):
await e.respond(spam_message)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#BIGSPAM \n\n" "Bigspam was executed successfully"
)
@bot.on(lightning_cmd("wspam (.*)"))
@bot.on(sudo_cmd(pattern="wspam (.*)", allow_sudo=True))
async def tmeme(e):
wspam = str("".join(e.text.split(maxsplit=1)[1:]))
message = wspam.split()
await e.delete()
for word in message:
await e.respond(word)
if BOTLOG:
if e.is_private:
await e.client.send_message(
BOTLOG_CHATID,
"#WSPAM\n"
+ f"Word Spam was executed successfully in [User](tg://user?id={e.chat_id}) chat with : `{message}`",
)
else:
await e.client.send_message(
BOTLOG_CHATID,
"#WSPAM\n"
+ f"Word Spam was executed successfully in {e.chat.title}(`{e.chat_id}`) chat with : `{message}`",
)
@bot.on(lightning_cmd(pattern="mspam (.*)"))
async def tiny_pic_spam(e):
if not e.text[0].isalpha() and e.text[0] not in ("/", "#", "@", "!"):
message = e.text
text = message.split()
counter = int(text[1])
link = str(text[2])
for i in range(1, counter):
await e.client.send_file(e.chat_id, link)
await e.delete()
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#PICSPAM \n\n" "PicSpam was executed successfully"
)
@bot.on(lightning_cmd("delayspam (.*)"))
async def spammer(e):
spamDelay = float(e.pattern_match.group(1).split(" ", 2)[0])
counter = int(e.pattern_match.group(1).split(" ", 2)[1])
spam_message = str(e.pattern_match.group(1).split(" ", 2)[2])
await e.delete()
for i in range(1, counter):
await e.respond(spam_message)
await sleep(spamDelay)
if LOGGER:
await e.client.send_message(
LOGGER_GROUP, "#DelaySPAM\n" "DelaySpam was executed successfully"
)
CMD_HELP.update(
{
"spam": "**Plugin : **`spam`\
\n\n**Syntax : **`.spam <count> <text>`\
\n**Function : **__ Floods text in the chat !!__\
\n\n**Syntax : **`.spam <count> reply to media`\
\n**Function : **__Sends the replied media <count> times !!__\
\nFor above two commands use `.bigspam` instead of spam for spamming more than 50 messages\
\n\n**Syntax : **`.cspam <text>`\
\n**Function : **__ Spam the text letter by letter.__\
\n\n**Syntax : **`.wspam <text>`\
\n**Function : **__ Spam the text word by word.__\
\n\n**Syntax : **`.mspam \ <count> >reply to media> \`\
\n**Function : **__ .mspam but with media.__\
\n\n\n**NOTE : Spam at your own risk !!**"
}
)
| 39.950431 | 141 | 0.496413 | 0 | 0 | 0 | 0 | 17,428 | 0.940174 | 16,774 | 0.904893 | 3,526 | 0.190214 |
7d187ae720e582888dbe9f2c84697c0a7a77dbce | 352 | py | Python | curso_em_video/mundo_1/exs_python/ExPy011.py | LuiZamberlan/Ex.-Python | f5b6e4782e0ce0e3fead82b126b52588e1bc21b0 | [
"MIT"
] | 1 | 2020-09-19T21:39:12.000Z | 2020-09-19T21:39:12.000Z | curso_em_video/mundo_1/exs_python/ExPy011.py | LuiZamberlan/Ex.-Python | f5b6e4782e0ce0e3fead82b126b52588e1bc21b0 | [
"MIT"
] | null | null | null | curso_em_video/mundo_1/exs_python/ExPy011.py | LuiZamberlan/Ex.-Python | f5b6e4782e0ce0e3fead82b126b52588e1bc21b0 | [
"MIT"
] | null | null | null | l = float(input('Digite a largura da parede em metros: '))
al = float(input('Digite a altura da parede em metros: '))
#Um litro de tinta pinta 2m², largura * altura da parede obtemos a área dela em m² e dividimos por dois para obter a quantidade de tinta necessária.
lt = (l * al) / 2
print(f'Com uma parede {l}x{al}, você usará {lt:.2f}L de tinta')
| 44 | 148 | 0.704545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 290 | 0.810056 |
7d18dd3203b7119834318c4470153b6b81e4c9b8 | 1,840 | py | Python | generators/name.py | vickio/compgen | 7bb9a473622e53df18501b577dca4a33fc83922c | [
"MIT"
] | 2 | 2018-11-24T05:52:48.000Z | 2018-11-29T20:46:18.000Z | generators/name.py | vickio/compgen | 7bb9a473622e53df18501b577dca4a33fc83922c | [
"MIT"
] | null | null | null | generators/name.py | vickio/compgen | 7bb9a473622e53df18501b577dca4a33fc83922c | [
"MIT"
] | 2 | 2018-11-23T12:33:07.000Z | 2018-11-27T02:50:06.000Z | from random import choice
from string import Template
from . import BaseGenerator
class Name(BaseGenerator):
def __init__(self, company):
self.company = company
self.data = self._load_json('name.json')
self.templates = self.data.pop('templates')
self.nouns = self._load_txt('nouns.txt')
self.adjectives = self._load_txt('adjectives.txt')
self.founder_data = self._load_json('founder.json')
def generate(self):
template = Template(self._choose(self.templates))
elements = {}
for key, options in self.data.items():
elements[key] = self._choose(options)
for noun in ['noun', 'noun2']:
elements[noun] = choice(self.nouns)
if not elements[noun].isupper():
elements[noun] = elements[noun].title()
elements['adjective'] = choice(self.adjectives).title()
elements['adjective2'] = choice(self.adjectives).title()
fname, lname = self.company.founder.split(' ')
fake = self.company._fake
elements['lname'] = lname
elements['lname2'] = self._choose(self.founder_data['last_name'])
elements['lname3'] = self._choose(self.founder_data['last_name'])
elements['fname'] = fname
elements['place'] = choice([self.company.city, self.company.state_name])
elements['fakeword'] = fake.word().title()
if len(elements['fakeword']) <= 3:
elements['fakeword'] = elements['fakeword'].upper()
if self.company.founder_gender == 'male':
elements['family'] = elements['family_male']
else:
elements['family'] = elements['family_female']
return template.substitute(elements)
| 34.716981 | 80 | 0.584239 | 1,754 | 0.953261 | 0 | 0 | 0 | 0 | 0 | 0 | 251 | 0.136413 |
7d196d02b6dfdae637cba35d8a14ed891350c55a | 1,046 | py | Python | microcosm_flask/session.py | KensoDev/microcosm-flask | 3618333f4a0f45e673a33986877157208c9eac5f | [
"Apache-2.0"
] | 11 | 2017-01-30T21:53:20.000Z | 2020-05-29T22:39:19.000Z | microcosm_flask/session.py | KensoDev/microcosm-flask | 3618333f4a0f45e673a33986877157208c9eac5f | [
"Apache-2.0"
] | 139 | 2016-03-09T19:09:59.000Z | 2021-09-03T17:14:00.000Z | microcosm_flask/session.py | KensoDev/microcosm-flask | 3618333f4a0f45e673a33986877157208c9eac5f | [
"Apache-2.0"
] | 10 | 2016-12-19T22:39:42.000Z | 2021-03-09T19:23:15.000Z | """
Support a user-defined per-request session.
"""
from flask import g
def register_session_factory(graph, key, session_factory):
"""
Register a session creation function so that a new session (of user-defined type)
will be saved to `flask.g` on every request (and closed on teardown).
In other words: this os a mechanism to register a SQLAlchemy session instance
or similar without coupling the web and database tiers directly.
The session function should have the signature:
def session_factory(graph):
return Session()
If the session instance is closeable, it will be closed on teardown.
"""
@graph.flask.before_request
def begin_session():
setattr(g, key, session_factory(graph))
@graph.flask.teardown_request
def end_session(*args, **kwargs):
# NB: session will be none if there's an error raised in `before_request`
session = getattr(g, key, None)
if session is not None and hasattr(session, "close"):
session.close()
| 30.764706 | 85 | 0.688337 | 0 | 0 | 0 | 0 | 379 | 0.362333 | 0 | 0 | 649 | 0.620459 |
7d198a067cf29bfd3860f24dbef0396a06853828 | 5,188 | py | Python | pkg_ros_iot_bridge/scripts/temp_for_salim/get_sheet.py | 1arshan/Eyantra_Virgi-bot | 30ebe99fec6a0d4767fe94468b21bc00091bc527 | [
"MIT"
] | 1 | 2021-09-09T04:41:28.000Z | 2021-09-09T04:41:28.000Z | pkg_ros_iot_bridge/scripts/temp_for_salim/get_sheet.py | 1arshan/Eyantra_Virgi-bot | 30ebe99fec6a0d4767fe94468b21bc00091bc527 | [
"MIT"
] | null | null | null | pkg_ros_iot_bridge/scripts/temp_for_salim/get_sheet.py | 1arshan/Eyantra_Virgi-bot | 30ebe99fec6a0d4767fe94468b21bc00091bc527 | [
"MIT"
] | null | null | null | #! /usr/bin/env python2.7
import requests
import json
import heapq as hq #heap
def check_order(order_id,order_info):
for i in order_info:
if i[1] == order_id:
return True
return False
def check_if_dispatched(order_id):
# URL = "https://spreadsheets.google.com/feeds/list/1rianYVvWCIJeoa17Jlrg7GZTUwuI_SG3KaKaaHtgGvY/4/public/full?alt=json" ##eyrc.vb.1637@gmail.com
URL = "https://spreadsheets.google.com/feeds/list/1QTyFVQA0YheuERNtD7Vq1ASVJl6tQ4rPGh65vFpExhg/4/public/full?alt=json" ##vb1637eyrc@gmail.com
#URL = "https://spreadsheets.google.com/feeds/list/1Twkrdg5QvlTRH15SLgWfh8tom5Pxjp-6QphH_s3vPIk/4/public/full?alt=json" ##1637vbeyrc@gmail.com
response = requests.get(URL) #order
data =response.content
res = json.loads(data)
if u'entry' in res["feed"]:
res2 = res["feed"][u'entry']
else:
return False
for x in res2:
content =x[u'content']
content =content[u'$t']
Dict = dict((a.strip(), b.strip())
for a, b in (element.split(': ')
for element in content.split(', ')))
if order_id == Dict[u'orderid'].encode('utf-8'):
return True
return False
def get_data_from_sheet(max_order_id,order_info):
# URL = "https://spreadsheets.google.com/feeds/list/1rianYVvWCIJeoa17Jlrg7GZTUwuI_SG3KaKaaHtgGvY/3/public/full?alt=json" ##eyrc.vb.1637@gmail.com
URL = "https://spreadsheets.google.com/feeds/list/1QTyFVQA0YheuERNtD7Vq1ASVJl6tQ4rPGh65vFpExhg/3/public/full?alt=json" ##vb1637eyrc@gmail.com
#URL = "https://spreadsheets.google.com/feeds/list/1Twkrdg5QvlTRH15SLgWfh8tom5Pxjp-6QphH_s3vPIk/3/public/full?alt=json" ##1637vbeyrc@gmail.com
response = requests.get(URL) #order
data =response.content
res = json.loads(data)
if u'entry' in res["feed"]:
#print("entry present")
res2 = res["feed"][u'entry']
else:
order_to_be_procced=()
#print("no data present")
return order_to_be_procced,max_order_id,order_info
res2 = res["feed"][u'entry']
#order_info=[]
hq.heapify(order_info)
#max_order_id =0
for x in res2:
content =x[u'content']
content =content[u'$t']
Dict = dict((a.strip(), b.strip())
for a, b in (element.split(': ')
for element in content.split(', ')))
if Dict[u'item']=="Medicines" or Dict[u'item']=="Medicine":
Dict[u'priority'] =0 #0
color ="red"
elif Dict[u'item']=="Food":
Dict[u'priority']=1 #1
color ="yellow"
else:
Dict[u'priority'] =2 #2
color ="green"
# if max_order_id < int(Dict[u'orderid']):
order_id_encoded = Dict[u'orderid'].encode('utf-8')
if not check_order(Dict[u'orderid'],order_info) and not check_if_dispatched(order_id_encoded):
max_order_id=int(Dict[u'orderid'])
tup=(Dict[u'priority'],Dict[u'orderid'],Dict[u'item'],Dict[u'city'])
hq.heappush(order_info,tup) #always have highest priority upward
#print(order_info)
if len(order_info)>0:
order_to_be_procced =hq.heappop(order_info) #order with highest priority
else:
order_to_be_procced=()
print("order_to_be_procced",order_to_be_procced)
print("order_info: ", order_info)
return order_to_be_procced,max_order_id,order_info
"""
order_info=[]
hq.heapify(order_info)
max_order_id =0
#order_to_be_procced,max_order_id,order_info =get_data_from_sheet(0,order_info)
#print(order_to_be_procced, max_order_id)
for i in range(8):
order_to_be_procced,max_order_id,order_info =get_data_from_sheet(max_order_id,order_info)
print(order_to_be_procced, max_order_id)
"""
def get_data_from_inventory_sheet():
# URL = "https://spreadsheets.google.com/feeds/list/1rianYVvWCIJeoa17Jlrg7GZTUwuI_SG3KaKaaHtgGvY/2/public/full?alt=json" ##eyrc.vb.1637@gmail.com
URL = "https://spreadsheets.google.com/feeds/list/1QTyFVQA0YheuERNtD7Vq1ASVJl6tQ4rPGh65vFpExhg/2/public/full?alt=json" ##vb1637eyrc@gmail.com
#URL = "https://spreadsheets.google.com/feeds/list/1Twkrdg5QvlTRH15SLgWfh8tom5Pxjp-6QphH_s3vPIk/2/public/full?alt=json" ##1637vbeyrc@gmail.com
response = requests.get(URL) #inventory
data =response.content
res = json.loads(data)
if u'entry' in res["feed"]:
res2 = res["feed"][u'entry']
else:
match_box_color_with_index ={}
return match_box_color_with_index
res2 = res["feed"][u'entry']
match_box_color_with_index ={}
for x in res2:
content =x[u'content']
content =content[u'$t']
Dict = dict((a.strip(), b.strip())
for a, b in (element.split(': ')
for element in content.split(', ')))
box_index =Dict[u'sku']
box_index=box_index[1:3]
match_box_color_with_index.update({box_index.encode("utf-8"):Dict[u'item'].encode("utf-8")}) # dic which will match storage number with box item
#print(match_box_color_with_index)
return match_box_color_with_index
check_if_dispatched('2002') | 39.907692 | 152 | 0.655551 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,414 | 0.465305 |
7d1a5fed53eac5b58167653a55f260086f23688f | 1,065 | py | Python | llvm-codegen/compiler-tests/test-llvm-4.py | PS-Group/compiler-theory-samples | c916af50eb42020024257ecd17f9be1580db7bf0 | [
"MIT"
] | null | null | null | llvm-codegen/compiler-tests/test-llvm-4.py | PS-Group/compiler-theory-samples | c916af50eb42020024257ecd17f9be1580db7bf0 | [
"MIT"
] | null | null | null | llvm-codegen/compiler-tests/test-llvm-4.py | PS-Group/compiler-theory-samples | c916af50eb42020024257ecd17f9be1580db7bf0 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import os.path
import subprocess
import shutil
def get_compiler_path():
compiler_path = os.path.abspath("../../debug-compiler-theory-samples/llvm_4")
if not os.path.exists(compiler_path):
raise ValueError('compiler llvm_4 not found')
return compiler_path
class Runner:
def __init__(self, compiler_path):
self.compiler_path = compiler_path
def run(self, input_name):
input_path = os.path.abspath(os.path.join("data", input_name))
with open(input_path, "r") as input_file:
print("Running", input_name)
subprocess.check_call([self.compiler_path], stdin=input_file)
obj_file_name = os.path.splitext(input_name)[0] + ".o"
shutil.move("program.o", os.path.join("out", obj_file_name))
def main():
r = Runner(get_compiler_path())
r.run("first-space-velocity.txt")
r.run("if_branching.txt")
r.run("simple_strings_concat.txt")
r.run("square.txt")
r.run("advanced_strings_concat.txt")
if __name__ == "__main__":
main()
| 30.428571 | 81 | 0.668545 | 503 | 0.4723 | 0 | 0 | 0 | 0 | 0 | 0 | 253 | 0.237559 |
7d1ce69a7df2042cf3c0f10f391fedf67f671938 | 1,282 | py | Python | cropwatch/apps/metrics/management/commands/uptime.py | objectsyndicate/Crop-Watch | c960bbcacc49199e35984dc521cc9e8663a6b972 | [
"Apache-2.0"
] | 13 | 2018-02-10T14:52:05.000Z | 2021-08-31T21:21:58.000Z | cropwatch/apps/metrics/management/commands/uptime.py | objectsyndicate/Crop-Watch | c960bbcacc49199e35984dc521cc9e8663a6b972 | [
"Apache-2.0"
] | 1 | 2019-06-13T15:55:08.000Z | 2020-07-16T17:35:09.000Z | cropwatch/apps/metrics/management/commands/uptime.py | objectsyndicate/Crop-Watch | c960bbcacc49199e35984dc521cc9e8663a6b972 | [
"Apache-2.0"
] | 2 | 2018-05-15T14:54:28.000Z | 2019-05-19T14:59:18.000Z | from dateutil.relativedelta import relativedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from cropwatch.apps.ioTank.models import ioTank, SensorReading
from cropwatch.apps.metrics.tasks import *
class Command(BaseCommand):
help = 'Performs uptime validation every 5'
def handle(self, *args, **options):
accounts = AccountSettings.objects.filter(notify_iotank_emergency=True)
email_subject = "ioTank offline."
for account in accounts:
bots = ioTank.objects.filter(owner=account.user)
for bot in bots:
try:
reading = SensorReading.objects.filter(bot=bot).order_by('-timestamp').first()
if reading.timestamp < timezone.now() - relativedelta(minutes=15):
msg = "ioTank:" + str(bot.name) + " has not communicated with the server in over 15 minutes"
print(msg)
if account.notify_email is True and account.email_daily > 0:
send_email.apply_async((email_subject, msg, account.user.email, account.user.id))
except:
print(bot)
print(SensorReading.objects.filter(bot=bot))
| 42.733333 | 116 | 0.625585 | 1,037 | 0.808892 | 0 | 0 | 0 | 0 | 0 | 0 | 132 | 0.102964 |
7d1d6b584d80ab19370f83c6e3bf191fa2bab75c | 12,946 | py | Python | src/ralph/discovery/tasks.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | src/ralph/discovery/tasks.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | src/ralph/discovery/tasks.py | quamilek/ralph | bf7231ea096924332b874718b33cd1f43f9c783b | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Asynchronous task support for discovery."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from datetime import datetime, timedelta
from functools import partial
import random
import re
import textwrap
import traceback
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
import django_rq
from ipaddr import IPv4Network, IPv6Network
from ralph.discovery.models import Network, IPAddress
from ralph.util.network import ping
from ralph.util import output, plugin
DNS_TXT_ATTRIBUTE_REGEX = re.compile(r'(?P<attribute>[^:]+): (?P<value>.*)')
MAX_RESTARTS = 3
SANITY_CHECK_PING_ADDRESS = settings.SANITY_CHECK_PING_ADDRESS
SINGLE_DISCOVERY_TIMEOUT = settings.SINGLE_DISCOVERY_TIMEOUT
class Error(Exception):
"""Errors during discovery tasks."""
class NoQueueError(Error):
"""No discovery queue defined."""
def set_queue(context):
"""Route the discovery tasks to the right data center for them.
Use the default queue if no network matches the IP address.
"""
try:
queue = context['queue']
except KeyError:
try:
net = Network.from_ip(context['ip'])
except KeyError:
queue = 'default'
else:
queue = net.queue.name if net.queue else 'default'
context['queue'] = queue
def sanity_check(perform_network_checks=True):
"""Checks configuration integrity by pinging the SANITY_CHECK_PING_ADDRESS.
"""
if not perform_network_checks:
return
if ping(SANITY_CHECK_PING_ADDRESS) is None:
raise ImproperlyConfigured(
textwrap.dedent(
"""
fatal: {} is not pingable.
Things you might want to check:
* is this host connected to network
* is this domain pingable from your terminal
* is your python binary capped with setcap CAP_NET_RAW
or
* are you running tests from root
or
* are you using setuid bin/python
"""
).strip().format(SANITY_CHECK_PING_ADDRESS),
)
def dummy_task(interactive=False, index=None):
stdout = output.get(interactive)
if index:
if not index % 25:
raise LookupError(
"You called {} and it failed on purpose.".format(index),
)
stdout("Ping {}.".format(index))
else:
stdout("Ping.")
def dummy_horde(interactive=False, how_many=1000):
if interactive:
for i in xrange(how_many):
dummy_task(interactive=interactive, index=i + 1)
else:
queue = django_rq.get_queue()
for i in xrange(how_many):
queue.enqueue_call(
func=dummy_task,
kwargs=dict(interactive=interactive, index=i + 1),
timeout=60,
result_ttl=0,
)
def run_next_plugin(context, chains, requirements=None, interactive=False,
done_requirements=None, outputs=None, after=None):
"""Runs the next plugin, asynchronously if interactive=False is given."""
if requirements is None:
requirements = set()
if done_requirements is None:
done_requirements = set()
run = _select_run_method(context, interactive, run_plugin, after)
for index, chain in enumerate(chains):
to_run = plugin.next(chain, requirements) - done_requirements
if to_run:
plugin_name = plugin.highest_priority(chain, to_run)
run(context, chains[index:], plugin_name, requirements,
interactive, done_requirements, outputs)
return
def run_chain(context, chain_name, requirements=None, interactive=False,
done_requirements=None, outputs=None,
after=None):
"""Runs a single chain in its entirety at once, asynchronously if
interactive=False is given.
"""
run = _select_run_method(context, interactive, _run_chain, after)
run(context, chain_name, requirements, interactive, done_requirements,
outputs)
def run_plugin(context, chains, plugin_name,
requirements=None, interactive=False, done_requirements=None,
restarts=MAX_RESTARTS, outputs=None):
"""Synchronously runs a plugin named `plugin_name` from the first of the
specified `chains` using a given `context`. Automatically advances the
chain scheduling the next plugin to be run. When no plugins are left in the
current chain, advances to the next in the list.
If `interactive` is True, returns output on stdout and runs the next plugin
synchronously."""
if requirements is None:
requirements = set()
if done_requirements is None:
done_requirements = set()
restarted = False
if isinstance(chains, basestring):
raise NotImplementedError("API changed.")
chain = chains[0]
try:
_run_plugin(context, chain, plugin_name, requirements, interactive,
done_requirements, outputs)
except plugin.Restart as e:
if restarts > 0:
jitter = random.randint(30, 90)
after = timedelta(seconds=jitter)
run = _select_run_method(context, interactive, run_plugin, after)
run(context, plugin_name, requirements, interactive,
done_requirements, restarts=restarts - 1)
restarted = True
else:
if outputs:
stdout, stdout_verbose, stderr = outputs
else:
stderr = output.get(interactive, err=True)
stderr(
"Exceeded allowed number of restarts in plugin '{}' for "
"'{}': {}".format(plugin_name, _get_uid(context), unicode(e)),
end='\n',
)
finally:
if not restarted:
run_next_plugin(context, chains, requirements, interactive,
done_requirements, outputs)
def _run_plugin(context, chain, plugin_name, requirements, interactive,
done_requirements, outputs=None):
if outputs:
stdout, stdout_verbose, stderr = outputs
else:
stdout = output.get(interactive)
stderr = output.get(interactive, err=True)
message = "[{}] {}... ".format(plugin_name, _get_uid(context))
stdout(message, end='')
new_context = {}
try:
is_up, message, new_context = plugin.run(chain, plugin_name,
**context)
except plugin.Restart as e:
stdout('needs to be restarted: {}'.format(unicode(e)))
raise
except Exception:
stdout('', end='\r')
stderr(
"{}\nException in plugin '{}' for '{}'.".format(
traceback.format_exc(),
plugin_name,
_get_uid(context),
),
end='\n',
)
raise
else:
if message:
stdout(message, verbose=not is_up)
if is_up:
requirements.add(plugin_name)
context['successful_plugins'] = ', '.join(sorted(requirements))
context.update(new_context)
finally:
done_requirements.add(plugin_name)
def _run_chain(context, chain_name, requirements=None, interactive=False,
done_requirements=None, outputs=None):
if requirements is None:
requirements = set()
if done_requirements is None:
done_requirements = set()
to_run = plugin.next(chain_name, requirements) - done_requirements
if not to_run:
return
plugin_name = plugin.highest_priority(chain_name, to_run)
try:
_run_plugin(context, chain_name, plugin_name, requirements,
interactive, done_requirements, outputs)
finally:
run_chain(context, chain_name, requirements, interactive,
done_requirements, outputs)
def _get_uid(context):
"""Returns a unique context identifier for logging purposes for a plugin.
"""
if 'uid' in context:
return context['uid']
return context.get('ip', '')
def _select_run_method(context, interactive, function, after):
"""Return a function that either executes the task directly (if
`interactive` is True), enqueues it right away or schedules its enqueueing
(if `after` is given).
"""
if interactive:
return function
set_queue(context)
if after:
# FIXME: what about timeout= and result_ttl= for scheduled tasks?
scheduler = django_rq.get_scheduler(context['queue'], )
if isinstance(after, timedelta):
enqueue = scheduler.enqueue_in
elif isinstance(after, datetime):
enqueue = scheduler.enqueue_at
else:
raise NotImplementedError(
"after={!r} not supported.".format(after),
)
return partial(enqueue, after, function)
queue = django_rq.get_queue(
context['queue'],
)
return partial(_enqueue, queue, function)
def _enqueue(queue, function, *args, **kwargs):
queue.enqueue_call(
func=function,
args=args,
kwargs=kwargs,
timeout=SINGLE_DISCOVERY_TIMEOUT,
result_ttl=0,
)
def discover_address(address, requirements=None, interactive=True, queue=None):
if queue is None:
try:
net = Network.from_ip(address)
except IndexError:
raise NoQueueError(
"Address {0} doesn't belong to any configured "
"network.".format(address),
)
if not net.queue:
raise NoQueueError(
"The network {0} has no discovery queue.".format(net),
)
queue = net.queue.name
run_next_plugin(
{'ip': address, 'queue': queue},
('discovery', 'postprocess'),
requirements=requirements,
interactive=interactive,
)
def discover_network(network, plugin_name='ping', requirements=None,
interactive=False, update_existing=False, outputs=None):
"""Runs discovery for a single `network`. The argument may be
an IPv[46]Network instance, a Network instance or a string
holding a network address or a network name defined in the database.
If `interactive` is False all output is omitted and discovery is done
asynchronously by pushing tasks to Rabbit.
If `update_existing` is True, only existing IPs from the specified
network are updated.
"""
sanity_check()
if outputs:
stdout, stdout_verbose, stderr = outputs
else:
stdout = output.get(interactive)
dbnet = None
if isinstance(network, (IPv4Network, IPv6Network)):
net = network
try:
dbnet = Network.objects.get(address=str(network))
except Network.DoesNotExist:
pass
elif isinstance(network, Network):
net = network.network
dbnet = network
else:
try:
network = Network.objects.get(address=network)
except Network.DoesNotExist:
network = Network.objects.get(name=network)
# if raises DoesNotExist here then so be it, user passed
# a non-existent network.
net = network.network
dbnet = network
if not dbnet or not dbnet.queue:
# Only do discover on networks that have a queue defined.
stdout("Skipping network {} -- no queue defined.".format(net))
return
queue_name = dbnet.queue.name
stdout("Scanning network {} started.".format(net))
if update_existing:
ip_address_queryset = IPAddress.objects.filter(
number__gt=int(net.ip), number__lt=int(net.broadcast))
hosts = (i.address for i in ip_address_queryset)
else:
hosts = net.iterhosts()
for host in hosts:
discover_address(host, requirements, interactive, queue_name)
if interactive:
stdout()
else:
stdout('Scanning network {} finished.'.format(net))
def discover_all(interactive=False, update_existing=False, outputs=None):
"""Runs discovery on all networks defined in the database."""
sanity_check()
if outputs:
stdout, stdout_verbose, stderr = outputs
else:
stdout = output.get(interactive)
nets = Network.objects.exclude(queue=None).exclude(queue__name='')
for net in nets:
if interactive:
discover_network(
net.network,
interactive=True,
update_existing=True,
)
else:
queue = django_rq.get_queue()
queue.enqueue(
discover_network,
net.network,
update_existing=update_existing,
)
stdout()
| 33.365979 | 79 | 0.622509 | 128 | 0.009887 | 0 | 0 | 0 | 0 | 0 | 0 | 2,953 | 0.228101 |
7d1e3411660fc6ff987dff3de950e6a48810d1d8 | 7,125 | py | Python | tests/tests_bibliotools/test_parse_and_group.py | wonjoonSeol/ScienceScape | 8d8a3cb76193b6f85b7a2a6c7219e249237d64c8 | [
"BSD-3-Clause"
] | 5 | 2018-02-14T21:11:06.000Z | 2020-02-23T14:53:11.000Z | tests/tests_bibliotools/test_parse_and_group.py | wonjoonSeol/ScienceScape | 8d8a3cb76193b6f85b7a2a6c7219e249237d64c8 | [
"BSD-3-Clause"
] | 106 | 2018-02-09T00:31:05.000Z | 2018-03-29T07:28:34.000Z | tests/tests_bibliotools/test_parse_and_group.py | wonjoonSeol/ScienceScape | 8d8a3cb76193b6f85b7a2a6c7219e249237d64c8 | [
"BSD-3-Clause"
] | 6 | 2018-02-23T17:48:03.000Z | 2020-05-14T13:39:36.000Z | from django.test import TestCase
import sys
import os
lib_path = os.path.abspath(os.path.join(__file__, '..', '..', '..', 'bibliotools3', 'scripts'))
sys.path.append(lib_path)
from parse_and_group import is_year_within_span
from parse_and_group import create_span_files
from parse_and_group import separate_years
from parse_and_group import get_span_parameters
class TestParseGroup(TestCase):
"""
This test tests that the method is_year_within_span works correctly
for years in the span.
"""
def test_year_within_span_true(self):
allTrue = True
for year in range(1990, 2010):
if not is_year_within_span(1990, 2010, year):
allTrue = False
self.assertEqual(True, allTrue)
"""
This test tests that the method is_year_within_span works correctly
for years NOT in the span.
"""
def test_year_within_span_false(self):
allFalse = True
for year in range(1900, 1989):
if is_year_within_span(1990, 2010, year):
allFalse = False
self.assertEqual(True, allFalse)
"""
This test tests that upon calling separate_years, the lines are correctly separated
amongst the span files.
"""
def test_years_correctly_separated(self):
# Set up test folders/files (will be removed at the end of test)
wos_headers = "PT AU BA BE GP AF BF CA TI SO SE BS LA DT CT CY CL SP HO DE ID AB C1 RP EM RI OI FU FX CR NR TC Z9 U1 U2 PU PI PA SN EI BN J9 JI PD PY VL IS PN SU SI MA BP EP AR DI D2 EA EY PG WC SC GA UT PM OA HC HP DA"
dir = os.path.dirname(os.path.dirname(__file__))
os.makedirs(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears"))
os.makedirs(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears/firstSpan"))
os.makedirs(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears/secondSpan"))
first_span_txt = open(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears/firstSpan/firstSpan.txt"), "w")
second_span_txt = open(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears/secondSpan/secondSpan.txt"), "w")
first_span_txt.write(wos_headers + "\n")
second_span_txt.write(wos_headers + "\n")
# This is a dummy line for testing
line = """J Piersanti, S; Orlandi, A Piersanti, Stefano; Orlandi, Antonio Genetic Algorithm Optimization for the Total Radiated Power of a Meandered Line by Using an Artificial Neural Network IEEE TRANSACTIONS ON ELECTROMAGNETIC COMPATIBILITY English Article Artificial neural network (ANN); electromagnetic (EM) radiation; genetic algorithms (GAs); machine learning; meandered line; nature-inspired algorithms; signal integrity; total radiated power (TRP) One of the state-of-the-art optimization strategies is the introduction of an artificial neural network in place of a more time-consuming numerical tool to compute the cost function. This work describes the development of a genetic algorithm optimization strategy for a meandered microstrip line by using an artificial neural network whose training set has been designed by a uniform sampling of the global design space. The results in terms of the total radiated electromagnetic power are discussed and compared with those obtained by the initial and not optimized configuration. [Piersanti, Stefano; Orlandi, Antonio] Univ Aquila, Dept Ind & Informat Engn & Econ, UAq EMC Lab, I-67100 Laquila, Italy Orlandi, A (reprint author), Univ Aquila, Dept Ind & Informat Engn & Econ, UAq EMC Lab, I-67100 Laquila, Italy. stefano.piersanti@graduate.univaq.it; anto-nio.orlandi@univaq.it Computer Simulation Technology, 2017, CST STUD SUIT 2017; Cuthbert T. R., 1987, OPTIMIZATION USING P; Duffy AP, 2006, IEEE T ELECTROMAGN C, V48, P449, DOI 10.1109/TEMC.2006.879358; HAGAN MT, 1994, IEEE T NEURAL NETWOR, V5, P989, DOI 10.1109/72.329697; Hagan M. T., 1995, NEURAL NETWORK DESIG; Hall S. H., 2009, ADV SIGNAL INTEGRITY; Haupt R.L., 2004, PRACTICAL GENETIC AL; [Anonymous], 2008, P1597 IEEE; Orlandi A., 2017, ELECTROMAGNETIC BAND; Orlandi A, 2006, IEEE T ELECTROMAGN C, V48, P460, DOI 10.1109/TEMC.2006.879360; Qi Q, 2016, EL PACKAG TECH CONF, P85, DOI 10.1109/EPTC.2016.7861448; Tron S., 2013, MEANDERED TRANSMISSI; Uka S., 1990, IEEE T NEURAL NETWOR, V2, P675 13 0 0 0 0 IEEE-INST ELECTRICAL ELECTRONICS ENGINEERS INC PISCATAWAY 445 HOES LANE, PISCATAWAY, NJ 08855-4141 USA 0018-9375 1558-187X IEEE T ELECTROMAGN C IEEE Trans. Electromagn. Compat. AUG 2018 60 4 1014 1017 10.1109/TEMC.2017.2764623 4 Engineering, Electrical & Electronic; Telecommunications Engineering; Telecommunications FT4JY WOS:000423122600025 2018-02-07"""
# Mocking some time spans
spans = {
"firstSpan":{
"years":[1900,1999],
},
"secondSpan":{
"years":[2000, 2018],
}
}
# Mocking a folder structure with dummy input/output files/folders
years_spans = dict((s, data["years"]) for s, data in spans.items())
files = {
"firstSpan": first_span_txt,
"secondSpan": second_span_txt,
}
# Call to the method we want to test
separate_years(line, years_spans, files, 44)
first_span_txt.close()
second_span_txt.close()
first_span_read = open(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears/firstSpan/firstSpan.txt"), "r")
second_span_read = open(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears/secondSpan/secondSpan.txt"), "r")
first_span_read.readline()
second_span_read.readline()
# Check that the years have been correctly separated
result = False
if len(first_span_read.readlines()) == 0 and len(second_span_read.readlines()) == 1:
result = True
# Tear down
first_span_read.close()
second_span_read.close()
os.remove(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears/firstSpan/firstSpan.txt"))
os.remove(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears/secondSpan/secondSpan.txt"))
os.rmdir(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears/firstSpan"))
os.rmdir(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears/secondSpan"))
os.rmdir(os.path.join(dir, "tests_bibliotools/testFiles/foldersForSeparateYears"))
self.assertEqual(True, result)
"""
This test tests that upon calling get_span_parameters,
correct uncorrupted parameters are returned (critical step).
"""
def test_get_span_parameters(self):
mocked_spans = {
"first_span":{
"years":[1789,2010]
},
"second_span":{
"years":[2011,2018]
},
}
result = str(get_span_parameters(mocked_spans.items(), "years"))
self.assertEqual(result, """{'first_span': [1789, 2010], 'second_span': [2011, 2018]}""")
| 60.897436 | 2,417 | 0.703439 | 6,760 | 0.948772 | 0 | 0 | 0 | 0 | 0 | 0 | 4,440 | 0.623158 |
7d1f3ddbc8caa64dc170bb034f2e11f6a498e3f3 | 968 | py | Python | src/hw_conversion/HWPreprocessor.py | jmbarrios/hw-conversion | 8addd24e726e7284ade3195df14f96ea51c332b7 | [
"MIT"
] | null | null | null | src/hw_conversion/HWPreprocessor.py | jmbarrios/hw-conversion | 8addd24e726e7284ade3195df14f96ea51c332b7 | [
"MIT"
] | null | null | null | src/hw_conversion/HWPreprocessor.py | jmbarrios/hw-conversion | 8addd24e726e7284ade3195df14f96ea51c332b7 | [
"MIT"
] | null | null | null | '''
Module containing a preprocessor that keeps cells if they match given
expression.
'''
# Author: Juan M. Barrios <j.m.barrios@gmail.com>
import re
from typing import Pattern
from traitlets import Unicode
from nbconvert.preprocessors import Preprocessor
class HomeworkPreproccessor(Preprocessor):
'''Keeps cells form a notebook that match a regular expression'''
pattern = Unicode().tag(config=True)
def check_conditions(self, cell):
'''Checks that a cell matches the pattern.
Returns: Boolean.
True means cell should be kept.
'''
regexp_compiled = re.compile(self.pattern)
return regexp_compiled.match(cell.source)
def preprocess(self, nb, resources):
'''Preprocessing to apply to each notebook.'''
if not self.pattern:
return nb, resources
nb.cells = [cell for cell in nb.cells if self.check_conditions(cell)]
return nb, resources | 27.657143 | 77 | 0.67562 | 708 | 0.731405 | 0 | 0 | 0 | 0 | 0 | 0 | 378 | 0.390496 |
7d20f25ebe54b94a311e03fb9f7b27183d742e5a | 2,455 | py | Python | CloacaCodeTests/minheap.py | rockobonaparte/cloaca | 789dc5a6ec1c52f6fe3d5e8aadc1a9c149aacf68 | [
"MIT"
] | 3 | 2020-01-11T19:25:18.000Z | 2022-03-12T17:27:28.000Z | CloacaCodeTests/minheap.py | rockobonaparte/cloaca | 789dc5a6ec1c52f6fe3d5e8aadc1a9c149aacf68 | [
"MIT"
] | 4 | 2020-02-10T16:50:43.000Z | 2021-12-03T08:03:46.000Z | CloacaCodeTests/minheap.py | rockobonaparte/cloaca | 789dc5a6ec1c52f6fe3d5e8aadc1a9c149aacf68 | [
"MIT"
] | 4 | 2020-02-10T16:40:46.000Z | 2020-11-27T08:11:51.000Z | raise NotImplementedError("Getting an NPE trying to parse this code")
class KeyValue:
def __init__(self, key, value):
self.key = key
self.value = value
def __repr__(self):
return f"{self.key}->{self.value}"
class MinHeap:
def __init__(self, start_size):
self.heap = [None] * start_size
self.next_i = 0
def add(self, key, value):
self.heap[self.next_i] = KeyValue(key, value)
child_i = self.next_i
parent_i = child_i // 2
while child_i != parent_i:
if self.heap[child_i].key < self.heap[parent_i].key:
swapper = self.heap[child_i]
self.heap[child_i] = self.heap[parent_i]
self.heap[parent_i] = swapper
child_i = parent_i
parent_i //= 2
self.next_i += 1
def get(self):
if self.next_i == 0:
return None
elif self.next_i == 1:
bye_bye_root = self.heap[0]
self.heap[0] = None
return bye_bye_root
else:
bye_bye_root = self.heap[0]
self.next_i -= 1
self.heap[0] = self.heap[self.next_i]
self.heap[self.next_i] = None
# Heapify
parent_i = 0
while 2 * parent_i < len(self.heap) and self.heap[parent_i] is not None:
heapify_parent = self.heap[parent_i]
lchild_i = 2*parent_i + 1
rchild_i = 2*parent_i + 2
lchild = self.heap[lchild_i]
rchild = self.heap[rchild_i]
best = heapify_parent
best_i = parent_i
if lchild is not None and lchild.key < best.key:
best = lchild
best_i = lchild_i
if rchild is not None and rchild.key < best.key:
best = rchild
best_i = rchild_i
if heapify_parent != best:
swapper = self.heap[best_i]
self.heap[best_i] = heapify_parent
self.heap[parent_i] = swapper
parent_i = best_i
else:
break
return bye_bye_root
min_heap = MinHeap(16)
min_heap.add(2, 2)
min_heap.add(3, 3)
min_heap.add(4, 4)
min_heap.add(1, 1)
print(min_heap.get().key)
print(min_heap.get().key)
print(min_heap.get().key)
print(min_heap.get().key)
| 27.897727 | 84 | 0.518941 | 2,173 | 0.885132 | 0 | 0 | 0 | 0 | 0 | 0 | 78 | 0.031772 |
7d2138cb868753332f4b3ce35fd7b436d701ae81 | 15,359 | py | Python | mecademic/Robot.py | nickarmenta/PythonForMecademic | d6277239bbb376a2388984a9fe12a4d4d88d653c | [
"MIT"
] | 1 | 2021-03-22T13:40:42.000Z | 2021-03-22T13:40:42.000Z | mecademic/Robot.py | nickarmenta/PythonForMecademic | d6277239bbb376a2388984a9fe12a4d4d88d653c | [
"MIT"
] | null | null | null | mecademic/Robot.py | nickarmenta/PythonForMecademic | d6277239bbb376a2388984a9fe12a4d4d88d653c | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
import socket
import threading
import logging
logging.basicConfig(filename='meca.log', level=logging.DEBUG)
PROGRAM_FILE = 'program_output.txt'
# Dictionary of status indexes in robot status message
statusDict = {'activated': 0,
'homed': 1,
'simulating': 2,
'error': 3,
'paused': 4,
'EOB': 5,
'EOM': 6}
# Ease of use cartesian index labeling
cartDict = {'x': 0,
'y': 1,
'z': 2,
'rx': 3,
'ry': 4,
'rz': 5}
# Dictionary of command responses
responseDict = {'ActivateRobot': [2000, 2001],
'DeactivateRobot': [2004],
'BrakesOn': [2010],
'BrakesOff': [2008],
'Home': [2002, 2003],
'GetJoints': [2026],
'GetPose': [2027],
'ClearMotion': [2044],
'PauseMotion': [2042],
'ResumeMotion': [2043],
'ResetError': [2005],
'GetStatusRobot': [2007],
'GetFwVersion': [2081],
'GetProductType': [2084]}
# Combined control and feedback class for Mecademic
class Robot:
def __init__(self, ip):
self.ip = ip
self.connected = False
# Initialize tool and work reference frames
self.pose = {'stow': [75,0,240,0,90,0], 'home': [110,-150,130,-180,0,-180]}
self.joints = {'stow': [0,-60,60,0,0,0]}
self.toolFrame = {'flange': [0,0,0,0,0,0]}
self.workFrame = {'base': [0,0,0,0,0,0]}
# Connect to both control and feedback servers
def Connect(self):
self.connected = True
self.controlClient = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.controlClient.settimeout(10) # 100ms
self.controlClient.connect((self.ip, 10000))
code, response = self.controlClient.recv(1024).decode('ascii')[1:-2].split('][')
if int(code) != 3000:
if int(code) == 3001:
print('Another user is already connected!')
exit()
logging.warning('Unable to connect to port 10000')
self.connected = False
# Clear initial errors
if self.GetStatus('error'):
logging.info('Error on initialization')
self.ResetError()
self.firmware = self.ReadResponse('GetFwVersion')
self.product = self.ReadResponse('GetProductType')
self.feedbackClient = socket.socket()
self.feedbackClient.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,1)
self.feedbackClient.settimeout(10) # 100ms
self.feedbackClient.connect((self.ip, 10001))
code = int(self.feedbackClient.recv(1024).decode('ascii')[1:-2].split('][')[0])
if int(code) != 2079:
logging.warning('Unable to connect to port 10001')
self.connected = False
with open(PROGRAM_FILE,'w') as f:
f.write('')
f.close()
return self.connected
# Easy setup routine
def Startup(self):
if self.Activate(): return self.Home()
# Ease of use 0-100% global speed adjustment
def SetSpeed(self, percentage):
# If speed is provided as fractional change to percentage
if percentage < 1: percentage *= 100
self.SetCartAcc(percentage)
self.SetCartAngVel(3*percentage)
self.SetCartLinVel(10*percentage)
self.SetJointAcc(1.5*percentage)
self.SetJointVel(percentage)
# Move robot in +Z of tool frame
def Push(self, mm): self.MoveToolRel([0,0,mm,0,0,0])
# Move robot in -Z of tool frame
def Pull(self, mm): self.MoveToolRel([0,0,-mm,0,0,0])
def Wiggle(self):
self.MoveToolRel([0,0,0,4,0,0])
self.MoveToolRel([0,0,0,-4,0,0])
# Move robot Z-offset of tool frame
def Approach(self, pose, zOffset):
approachPose = pose.copy()
approachPose[2] += zOffset
self.MoveP(approachPose)
# Move robot Z-offset of tool frame
def Depart(self, pose, zOffset):
departPose = pose.copy()
departPose[2] += zOffset
self.MoveL(departPose)
# Power-up robot motors
def Activate(self):
if self.GetStatus('activated'): return True
else: return self.SendCommand('ActivateRobot')
# Power-down robot motors
def Deactivate(self):
if not self.GetStatus('activated'): return True
else: return self.SendCommand('DeactivateRobot')
# De-activate robot and engage brakes
def BrakesOn(self):
if self.GetStatus('activated'): self.Deactivate()
else: return self.SendCommand('BrakesOn')
# Activate robot and disengage brakes
def BrakesOff(self):
if not self.GetStatus('activated'): self.Activate()
else: return self.SendCommand('BrakesOff')
# Home robot motors
def Home(self):
if self.GetStatus('homed'): return True
else: return self.SendCommand('Home')
# Move robot to target "pose" list relative to work plane
def MovePose(self, pose):
if self.GetStatus('paused'): self.ResumeMove()
sentPose = _returnList(self.pose, pose)
if sentPose is not None: return self.SendCommand(f'MovePose{tuple(sentPose)}')
else: return False
# Move robot to target "joints" list
def MoveJoints(self, joints):
if not self._checkJointLimits(joints):
logging.warning("Target position outside joint limits!")
return False
if self.GetStatus('paused'): self.ResumeMove()
sentJoints = _returnList(self.joint, joints)
if sentJoints is not None: return self.SendCommand(f'MoveJoints{tuple(sentJoints)}')
else: return False
# Jog robot at target "joints" speed
def MoveJV(self, joints):
if not self._checkJointSpeedLimits(joints):
logging.warning("Target speed outside joint limits!")
return False
else:
if self.GetStatus('paused'): self.ResumeMove()
return self.SendCommand(f'MoveJointsVel{tuple(joints)}')
# Move robot linearly to target "pose" list relative to work frame
def MoveLinear(self, pose):
if self.GetStatus('paused'): self.ResumeMove()
sentPose = _returnList(self.pose, pose)
if sentPose is not None: return self.SendCommand(f'MoveLin{tuple(sentPose)}')
else: return False
# Move robot in by "pose" list relative to tool frame
def MoveToolRel(self, pose):
return self.SendCommand(f'MoveLinRelTRF{tuple(pose)}')
# Move robot in by "pose" list relative to work frame
def MoveWorkRel(self, pose):
return self.SendCommand(f'MoveLinRelWRF{tuple(pose)}')
# Jog at target "pose" speed relative to tool frame
def MoveToolVel(self, pose):
return self.SendCommand(f'MoveLinVelTRF{tuple(pose)}')
# Jog tool at target "pose" speed relative to work plane
def MoveWorkVel(self, pose):
return self.SendCommand(f'MoveLinVelWRF{tuple(pose)}')
# Set blend radius from 0-100%
def SetBlending(self, percentage):
assert percentage >= 0 and percentage <= 100
return self.SendCommand(f'SetBlending({percentage})')
# Set cartesian acceleration from 0.001-600%
def SetCartAcc(self, percentage):
assert percentage >= .001 and percentage <= 600
return self.SendCommand(f'SetCartAcc({percentage})')
# Set cartesian angular velocity from 0.001-300deg/s
def SetCartAngVel(self, degrees):
assert degrees >= 0.001 and degrees <= 300
return self.SendCommand(f'SetCartAngVel({degrees})')
# Set cartesian linear velocity from 0.001-1,000mm/s
def SetCartLinVel(self, mms):
assert mms >= 0.001 and mms <= 1000
return self.SendCommand(f'SetCartLinVel({mms})')
# Set joint acceleration from 0.001-150%
def SetJointAcc(self, percentage):
return self.SendCommand(f'SetJointAcc({percentage})')
# Set joint velocity from 0.001-100%
def SetJointVel(self, percentage):
return self.SendCommand(f'SetJointVel({percentage})')
# Add a new robot pose
def AddPose(self, poseName, pose):
self.pose[poseName] = pose
# Add a new robot joint position
def AddJoints(self, jointsName, joint):
self.joints[jointsName] = joint
# Set tool frame to existing tool or arbitrary offset
def SetTool(self, toolOffset):
sentTool = _returnList(self.tool, toolOffset)
self.SendCommand(f'SetTRF({sentTool})')
# Add a new tool frame to robot tools
def AddTool(self, toolName, toolOffset):
if len(toolOffset) == 3:
for vector in range(3):
toolOffset.append(0)
self.toolFrame[toolName] = toolOffset
# Set work plane to existing plane or arbitrary offset
def SetWork(self, workPlane):
sentWork = _returnList(self.work, workPlane)
self.SendCommand(f'SetWRF({sentWork})')
# Add a new work plane to robot workFrame dict
def AddWork(self, workName, workPlane):
if len(workPlane) == 3:
for vector in range(3):
workPlane.append(0)
self.workFrame[workName] = workPlane
# Get list of current joint positions in degrees
def GetJoints(self):
return self.ReadResponse('GetJoints')
# Get list of current cartesian position in millimeters
def GetPose(self):
return self.ReadResponse('GetPose')
# Delete current planned move
def ClearMove(self):
return self.SendCommand('ClearMotion')
# Pause current move
def PauseMove(self):
return self.SendCommand('PauseMotion')
# Resume current move
def ResumeMove(self):
return self.SendCommand('ResumeMotion')
# Reset error
def ResetError(self):
return self.SendCommand('ResetError')
def SetCheckpoint(self, step=1):
self.controlClient.send(bytes(f'SetCheckpoint({step})\0','ascii'))
code, response = self._GetMessage()
if code in [2000, 2001]: return True
else: return False
# Set position update rate in ms
def SetMonitoringInterval(self, ms):
assert ms >= 0.001 and ms <= 1
return self.SendCommand(f'SetMonitoringInterval({ms})', client='feedback')
# Get robot status as list of booleans
def GetStatus(self, status='all'):
responseList = self.ReadResponse('GetStatusRobot').split(',')
responseBool = [bool(int(response)) for response in responseList]
if status != 'all':
if status in statusDict.keys():
return responseBool[statusDict[status]]
else:
print(f'Use an available value:\n{statusDict.keys()}')
else:
return responseBool
# Send command and receive confirmation
def SendCommand(self, cmd, client='command'):
if self.connected is False: self.Connect()
if client == 'command':
_writeProgram(cmd)
self.controlClient.send(bytes(f'{cmd}\0','ascii'))
code, response = self.controlClient.recv(1024).decode('ascii')[1:-2].split('][')
if int(code) in self._getCodes(cmd): return True
else:
print(f'Error: {response}')
self.ResetError()
return False
else:
self.feedbackClient.send(bytes(f'{cmd}\0','ascii'))
code, response = self.feedbackClient.recv(1024).decode('ascii')[1:-2].split('][')
print(code, response)
return True
# Send command and receive message
def ReadResponse(self, cmd):
if self.connected is False: self.Connect()
self.controlClient.send(bytes(f'{cmd}\0','ascii'))
code, response = self.controlClient.recv(1024).decode('ascii')[1:-2].split('][')
if int(code) in self._getCodes(cmd): return response
else:
logging.warning(f'Error: {response}')
return None
# Receive current joint or cartesian positions
def ReadPosition(self, cmd):
if self.connected is False: self.Connect()
jointResponse, poseResponse = self.feedbackClient.recv(1024).decode('ascii').split('\x00')[:2]
print(jointResponse, poseResponse)
if cmd == 'GetJoints': msg = jointResponse
elif cmd == 'GetPose': msg = poseResponse
code, responseString = msg[1:-2].split('][')
if not int(code) in self._getCodes(cmd):
logging.warning(f'Error: {responseString}')
return None
responseList = responseString.split(',')
responseFloat = [float(response) for response in responseList]
return responseFloat
# Look up corresponding error code in dictionary
def _getCodes(self, cmd):
if cmd.startswith('Move'):
return [3004,3012]
elif cmd.startswith('Set'):
return [3012]
else:
return responseDict[cmd]
# Move speed checks
def _checkJointLimits(self, joints):
assert abs(joints[0]) <= 175
assert joints[1] >= -70 and joints[1] <= 90
assert joints[2] >= -135 and joints[2] <= 70
assert abs(joints[3]) <= 170
assert abs(joints[4]) <= 115
assert abs(joints[5]) <= 180
return True
def _checkJointSpeedLimits(self, joints):
assert abs(joints[0]) <= 150
assert abs(joints[1]) <= 150
assert abs(joints[2]) <= 180
assert abs(joints[3]) <= 300
assert abs(joints[4]) <= 300
assert abs(joints[5]) <= 500
return True
def _checkPoseSpeedLimits(self, pose):
assert pose[0] >= 0.001 and pose[0] <= 1000
assert pose[1] >= 0.001 and pose[1] <= 1000
assert pose[2] >= 0.001 and pose[2] <= 1000
assert pose[3] >= 0.001 and pose[3] <= 300
assert pose[4] >= 0.001 and pose[4] <= 300
assert pose[5] >= 0.001 and pose[5] <= 500
return True
def _checkPoseRotLimits(self, pose):
for vector in pose:
assert vector >= 0.001 and vector <= 300
# Pose object
class Pose():
def __init__(self, pose, coords='pose'):
self.coords = coords
self.pose = pose
# Ease of use 0-100% global speed adjustment
def SetSpeed(self, percentage):
# If speed is provided as fractional change to percentage
if percentage < 1: percentage *= 100
self.SetCartAcc(percentage)
self.SetCartAngVel(3*percentage)
self.SetCartLinVel(10*percentage)
self.SetJointAcc(1.5*percentage)
self.SetJointVel(percentage)
# Pose object
class CompoundMove():
def __init__(self, pose, coords='pose'):
self.coords = coords
# Convert internal pose to pose list if needed
def _returnList(poseDict, pose):
if type(pose) is str:
if pose in poseDict.keys():
return poseDict[pose]
else:
print('Not a valid pose!')
return None
else:
assert type(pose) is list
return pose
def _writeProgram(command):
with open(PROGRAM_FILE,'a') as f:
f.write(f'{command}\n')
f.close() | 35.801865 | 102 | 0.604662 | 13,679 | 0.890618 | 0 | 0 | 0 | 0 | 0 | 0 | 3,888 | 0.253141 |
7d21bc0228817255f88c077601480d01e75f6337 | 1,750 | py | Python | renku/core/utils/datetime8601.py | mohammad-sdsc/renku-python | 3a7bf2339ab56a3bc00a689bb27a864bb5bf55da | [
"Apache-2.0"
] | null | null | null | renku/core/utils/datetime8601.py | mohammad-sdsc/renku-python | 3a7bf2339ab56a3bc00a689bb27a864bb5bf55da | [
"Apache-2.0"
] | null | null | null | renku/core/utils/datetime8601.py | mohammad-sdsc/renku-python | 3a7bf2339ab56a3bc00a689bb27a864bb5bf55da | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# Copyright 2020 - Swiss Data Science Center (SDSC)
# A partnership between École Polytechnique Fédérale de Lausanne (EPFL) and
# Eidgenössische Technische Hochschule Zürich (ETHZ).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Renku datetime utilities."""
import datetime
import re
from dateutil.parser import parse as dateutil_parse_date
regex = (
r'^(-?(?:[1-9][0-9]*)?[0-9]{4})-(1[0-2]|0[1-9])-(3[01]|0[1-9]|[12]['
r'0-9])T(2[0-3]|[01][0-9]):([0-5][0-9]):([0-5][0-9])(\.[0-9]+)?(Z|['
r'+-](?:2[0-3]|[01][0-9]):[0-5][0-9])?$'
)
match_iso8601 = re.compile(regex).match
def validate_iso8601(str_val):
"""Check if datetime string is in ISO8601 format."""
try:
if match_iso8601(str_val) is not None:
return True
except re.error:
pass
return False
def parse_date(value):
"""Convert date to datetime."""
if value is None:
return
if isinstance(value, datetime.datetime):
date = value
else:
date = dateutil_parse_date(value)
if not date.tzinfo:
# set timezone to local timezone
tz = datetime.datetime.now(datetime.timezone.utc).astimezone().tzinfo
date = date.replace(tzinfo=tz)
return date
| 31.25 | 77 | 0.660571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,064 | 0.606268 |
7d24621d93eb905ea51ea7bf215e6bab3af4d108 | 2,493 | py | Python | dn-real-in/eval.py | ngchc/deepBoosting | 13b3515c16f0d9a0a92b990dfb5eef09ec1a7298 | [
"MIT"
] | 49 | 2019-04-01T02:03:05.000Z | 2021-11-29T07:58:33.000Z | dn-real-in/eval.py | ngchc/deepBoosting | 13b3515c16f0d9a0a92b990dfb5eef09ec1a7298 | [
"MIT"
] | 4 | 2019-04-04T06:53:19.000Z | 2021-11-02T13:11:44.000Z | dn-real-in/eval.py | ngchc/deepBoosting | 13b3515c16f0d9a0a92b990dfb5eef09ec1a7298 | [
"MIT"
] | 16 | 2019-04-01T02:03:11.000Z | 2022-03-20T13:13:04.000Z | import os
import numpy as np
import tensorflow as tf
from PIL import Image
def modcrop(im, modulo):
if len(im.shape) == 3:
size = np.array(im.shape)
size = size - (size % modulo)
im = im[0 : size[0], 0 : size[1], :]
elif len(im.shape) == 2:
size = np.array(im.shape)
size = size - (size % modulo)
im = im[0 : size[0], 0 : size[1]]
else: raise AttributeError
return im
def shave(im, border):
if len(im.shape) == 3:
return im[border[0] : -border[0],
border[1] : -border[1], :]
elif len(im.shape) == 2:
return im[border[0] : -border[0],
border[1] : -border[1]]
else: raise AttributeError
def compute_psnr(im1, im2):
if im1.shape != im2.shape:
raise Exception('the shapes of two images are not equal')
rmse = np.sqrt(((np.asfarray(im1) - np.asfarray(im2)) ** 2).mean())
psnr = 20 * np.log10(255.0 / rmse)
return psnr
def main():
# folder path
folder = '../datas/Set60/ISO6400'
# generate the file list
filepath = os.listdir(folder)
filepath.sort()
im_input = tf.placeholder('float', [1, None, None, 3], name='im_input')
# create a session for running operations in the graph
config = tf.ConfigProto(allow_soft_placement=True)
config.gpu_options.allow_growth = True
sess = tf.Session(config=config)
with tf.device('/gpu:0'):
with open('./graph.pb', 'rb') as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
output = tf.import_graph_def(graph_def, input_map={'im_input:0': im_input}, return_elements=['output:0'])
record_psnr = []
for i in np.arange(1, 20+1, 1):
for p in np.arange(1, 3+1, 1):
psnrs = []
im = np.array(Image.open(os.path.join(folder, '%03d/%03dMP%d.PNG' % (i, i, p))))
#Image.fromarray(im).show()
for g in np.arange(1, 10+1, 1):
im_n = np.array(Image.open(os.path.join(folder, '%03d/%03dN%02dP%d.PNG' % (i, i, g, p))))
#Image.fromarray(im_n).show()
im_n = im_n.astype(np.float32) / 255.0
im_n = np.expand_dims(im_n, axis=0)
im_dn = sess.run(output, feed_dict={im_input: im_n})
im_dn = np.squeeze(im_dn) * 255.0
im_dn = np.maximum(im_dn, 0)
im_dn = np.minimum(im_dn, 255)
#Image.fromarray(np.asarray(im_dn, dtype=np.uint8)).show()
psnr = compute_psnr(im, np.asarray(im_dn, dtype=np.uint8))
print('i%03d p%d g%02d: %.2f dB' % (i, p, g, psnr))
psnrs.append(psnr)
record_psnr.append(psnrs)
print('%.2f+-%.3f dB' % (np.mean(record_psnr), np.mean(np.std(record_psnr, 1))))
if __name__ == '__main__':
main()
| 27.7 | 108 | 0.636984 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 425 | 0.170477 |
7d2644ca332886ab77c32e7e0f6f69055493b94f | 294 | py | Python | c_registry.py | shemerofir/skaffold-auto-docs | 20a05d79fd5df1e1ca8dc356c627dda06dd4720a | [
"MIT"
] | null | null | null | c_registry.py | shemerofir/skaffold-auto-docs | 20a05d79fd5df1e1ca8dc356c627dda06dd4720a | [
"MIT"
] | null | null | null | c_registry.py | shemerofir/skaffold-auto-docs | 20a05d79fd5df1e1ca8dc356c627dda06dd4720a | [
"MIT"
] | null | null | null | import sh
from dotenv import load_dotenv
import os
load_dotenv()
PASSWORD = os.environ.get("sudo_password")
def c_registry():
with sh.contrib.sudo(password=PASSWORD, _with=True):
sh.docker('run', '-d', '-p', '5000:5000', '--restart=always', '--name', 'registry', 'registry:2')
| 22.615385 | 105 | 0.670068 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 87 | 0.295918 |
7d270ae80a04b95ae0734653fb57ab760e18861b | 191 | py | Python | api/guids/urls.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 628 | 2015-01-15T04:33:22.000Z | 2022-03-30T06:40:10.000Z | api/guids/urls.py | gaybro8777/osf.io | 30408511510a40bc393565817b343ef5fd76ab14 | [
"Apache-2.0"
] | 4,712 | 2015-01-02T01:41:53.000Z | 2022-03-30T14:18:40.000Z | api/guids/urls.py | Johnetordoff/osf.io | de10bf249c46cede04c78f7e6f7e352c69e6e6b5 | [
"Apache-2.0"
] | 371 | 2015-01-12T16:14:08.000Z | 2022-03-31T18:58:29.000Z | from django.conf.urls import url
from api.guids import views
app_name = 'osf'
urlpatterns = [
url(r'^(?P<guids>\w+)/$', views.GuidDetail.as_view(), name=views.GuidDetail.view_name),
]
| 19.1 | 91 | 0.696335 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 25 | 0.13089 |
7d27c111549ca054eb1d4350e5f213c7b661a06c | 500 | py | Python | drinks/migrations/0002_drink_ingredients.py | jmhubbard/cocktail_api | 47c2cca699f02dc14af04b989beeee9855a797f0 | [
"Unlicense"
] | 1 | 2020-11-25T04:57:34.000Z | 2020-11-25T04:57:34.000Z | drinks/migrations/0002_drink_ingredients.py | jmhubbard/cocktail_api | 47c2cca699f02dc14af04b989beeee9855a797f0 | [
"Unlicense"
] | null | null | null | drinks/migrations/0002_drink_ingredients.py | jmhubbard/cocktail_api | 47c2cca699f02dc14af04b989beeee9855a797f0 | [
"Unlicense"
] | null | null | null | # Generated by Django 3.1.2 on 2020-10-29 04:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('recipes', '0001_initial'),
('ingredients', '0001_initial'),
('drinks', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='drink',
name='ingredients',
field=models.ManyToManyField(through='recipes.Recipe', to='ingredients.Ingredient'),
),
]
| 23.809524 | 96 | 0.6 | 407 | 0.814 | 0 | 0 | 0 | 0 | 0 | 0 | 179 | 0.358 |
7d2960dbcf92acd1ac35985bb043ac91054c10d1 | 5,971 | py | Python | trello_track/__init__.py | seanmacavaney/trello-track | b900cfbe9395cf2742e624b49dfcb9ed6da67552 | [
"MIT"
] | 1 | 2020-09-13T17:17:41.000Z | 2020-09-13T17:17:41.000Z | trello_track/__init__.py | seanmacavaney/trello-track | b900cfbe9395cf2742e624b49dfcb9ed6da67552 | [
"MIT"
] | null | null | null | trello_track/__init__.py | seanmacavaney/trello-track | b900cfbe9395cf2742e624b49dfcb9ed6da67552 | [
"MIT"
] | null | null | null | import sys
import json
import os
import platform
import subprocess
import contextlib
import requests
CHECKLIST_NAME = 'Commands'
ICON_READY, ICON_IP, ICON_DONE, ICON_FAIL = '⚪', '⌛', '🔵', '🔴'
_CREDS = None
def CREDS():
global _CREDS
if _CREDS is None:
c = {}
if os.path.exists(os.path.expanduser('~/.trello')):
c.update(json.load(open(os.path.expanduser('~/.trello'), 'rt')))
if os.path.exists('./.trello'):
c.update(json.load(open('./.trello', 'rt')))
if 'TRELLO_TOKEN' in os.environ:
c['token'] = os.environ['TRELLO_TOKEN']
if 'TRELLO_KEY' in os.environ:
c['key'] = os.environ['TRELLO_KEY']
if 'key' not in c or 'token' not in c:
raise RuntimeError('Missing Trello `key` and `token`. Please provide as JSON in ~/.trello, '
'./.trello, or as TRELLO_KEY and TRELLO_TOKEN environment vars.\n\n'
'You can find your key and generate a token at https://trello.com/app-key')
_CREDS = c
return _CREDS
def _api(method, path, params=None):
C = CREDS()
params = params or {}
params = {**params, 'key': C['key'], 'token': C['token']}
return json.loads(requests.request(method, path, params=params).text)
class TrelloTracker:
def __init__(self, desc, card_id=None, _start_in_progress=False):
if card_id is None:
card_id = os.environ.get('TRELLO_CARD')
if card_id is None:
sys.stderr.write('TRELLO: No card supplied. This operation will not be tracked.\n')
self.state = 'NO_TRACK'
else:
# Find card
matching_cards = _api("GET", "https://trello.com/1/search", {'query': card_id})
card = None
for c in matching_cards['cards']:
if card_id in (c['id'], c['shortLink']):
card = c
break
if card is None:
sys.stderr.write('TRELLO: Could not find card: {}. This operation will not be tracked.\n'.format(card_id))
self.state = 'NO_TRACK'
else:
all_checklists = _api("GET", "https://api.trello.com/1/cards/{id}/checklists".format(**card))
checklist = [c for c in all_checklists if c['name'] == CHECKLIST_NAME]
if len(checklist) == 1:
checklist = checklist[0]
else:
checklist = _api(
"POST",
"https://api.trello.com/1/cards/{id}/checklists".format(**card),
{'name': CHECKLIST_NAME})
icon = ICON_IP if _start_in_progress else ICON_READY
check_item = _api(
"POST",
"https://api.trello.com/1/checklists/{id}/checkItems".format(**checklist),
{'name': '{} {}'.format(icon, desc)})
self.state = 'IP' if _start_in_progress else 'READY'
self.desc = desc
self.card = card
self.check_item = check_item
def __enter__(self):
assert self.state in ('IP', 'READY', 'NO_TRACK')
if self.state == 'READY':
_api("PUT",
"https://api.trello.com/1/cards/{}/checkItem/{}".format(self.card['id'], self.check_item['id']),
{'state': 'complete', 'name': '{} {}'.format(ICON_IP, self.desc)})
self.state = 'IP'
return self
def __exit__(self, ex_type, ex_val, ex_traceback):
assert self.state in ('IP', 'NO_TRACK')
if self.state == 'NO_TRACK':
return
if ex_type:
self.state = 'FAIL'
_api("PUT",
"https://api.trello.com/1/cards/{}/checkItem/{}".format(self.card['id'], self.check_item['id']),
{'state': 'complete', 'name': '{} {}'.format(ICON_FAIL, self.desc)})
_api("POST",
"https://api.trello.com/1/cards/{id}/actions/comments".format(**self.card),
{'text': '{} failed with exception:\n`{}`'.format(self.desc, ex_val)})
else:
self.state = 'DONE'
_api("PUT",
"https://api.trello.com/1/cards/{}/checkItem/{}".format(self.card['id'], self.check_item['id']),
{'state': 'complete', 'name': '{} {}'.format(ICON_DONE, self.desc)})
@contextlib.contextmanager
def track(desc, card_id=None):
with TrelloTracker(desc, card_id, _start_in_progress=True) as tracker:
yield tracker
class TaskManager:
def __init__(self, card_id=None):
if card_id is None:
card_id = os.environ.get('TRELLO_CARD')
self.card_id = card_id
self.tasks = []
def add_task(self, desc, fn):
self.tasks.append((desc, fn))
def run(self):
trackers = []
for desc, _ in self.tasks:
trackers.append(TrelloTracker(desc, self.card_id))
for tracker, (_, fn) in zip(trackers, self.tasks):
with tracker:
fn()
def clear(self):
self.tasks.clear()
def __enter__(self):
return self
def __exit__(self, ex_type, ex_val, ex_traceback):
if ex_type:
self.run()
self.clear()
def main_cli():
main(sys.argv[1:])
def main(argv):
assert len(argv) >= 2, "usage: [card_id] [command...]"
card_id, args = argv[0], argv[1:]
cmd = ' '.join((a if ' ' not in a else f'"{a}"') for a in args)
with track(card_id=card_id, desc=f'@{platform.node()} `{cmd}`'):
p = subprocess.Popen(args)
while True:
try:
return_code = p.wait()
if return_code != 0:
raise subprocess.CalledProcessError(return_code, args)
break
except KeyboardInterrupt:
pass
if __name__ == '__main__':
main_cli()
| 35.754491 | 122 | 0.533077 | 3,860 | 0.645377 | 127 | 0.021234 | 154 | 0.025748 | 0 | 0 | 1,382 | 0.231065 |
7d2a2fea07d41d19ee631745dc1ae58b9dcafc22 | 7,363 | py | Python | src/ResourceManager.py | NEKERAFA/Soul-Tower | d37c0bf6bcbf253ec5b2c41f802adeeca31fb384 | [
"MIT"
] | null | null | null | src/ResourceManager.py | NEKERAFA/Soul-Tower | d37c0bf6bcbf253ec5b2c41f802adeeca31fb384 | [
"MIT"
] | null | null | null | src/ResourceManager.py | NEKERAFA/Soul-Tower | d37c0bf6bcbf253ec5b2c41f802adeeca31fb384 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import pygame, sys, os, json
from pygame.locals import *
IMAGE_PATH = os.path.join('assets', 'images')
SPRITE_SHEET_PATH = os.path.join('assets', 'sprites')
STAGE_CONF_PATH = os.path.join('assets', 'stages')
ROOM_CONF_PATH = os.path.join('assets', 'rooms')
DIALOGUE_CONF_PATH = os.path.join('assets', 'dialogues')
FONT_PATH = os.path.join('assets', 'fonts')
SOUND_PATH = os.path.join('assets', 'sounds')
MUSIC_PATH = os.path.join(SOUND_PATH,'music')
EFFECT_PATH = os.path.join(SOUND_PATH,'effects')
# -------------------------------------------------
# Clase ResourceManager
# En este caso se implementa como una clase vacía, solo con métodos de clase
class ResourceManager(object):
resources = {}
@classmethod
def load_music(cls, name):
fullname = os.path.join(MUSIC_PATH, name)
pygame.mixer.music.load(fullname)
@classmethod
def load_effect_sound(cls, name):
if name in cls.resources:
return cls.resources[name]
else:
fullname = os.path.join(EFFECT_PATH, name)
try:
sound_effect = pygame.mixer.Sound(fullname)
#sound_effect.set_volume(0.7);
#print(fullname)
#print(sound_effect.get_volume())
except pygame.error, message:
print 'Cannot load sound effect file:', fullname
raise SystemExit, message
#Se almacena
cls.resources[name] = sound_effect
return sound_effect
@classmethod
def load_image(cls, name, colorkey=None):
fullname = os.path.join(IMAGE_PATH, name)
# Si el name de archivo está entre los resources ya cargados
if fullname in cls.resources:
# Se devuelve ese recurso
return cls.resources[fullname]
# Si no ha sido cargado anteriormente
else:
# Se carga la imagen indicando la carpeta en la que está
try:
image = pygame.image.load(fullname)
except pygame.error, message:
print 'Cannot load image:', fullname
raise SystemExit, message
# Obtenemos el colorkey
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey, RLEACCEL)
# Convertimos el canal alpha
image = image.convert_alpha()
# Se almacena
cls.resources[fullname] = image
# Se devuelve
return image
@classmethod
def free_image(cls, name):
fullname = os.path.join(IMAGE_PATH, name)
if fullname in cls.resources:
del cls.resources[fullname]
@classmethod
def load_sprite_conf(cls, name):
fullname = os.path.join(SPRITE_SHEET_PATH, name)
# Si el name de archivo está entre los resources ya cargados
if fullname in cls.resources:
# Se devuelve ese recurso
return cls.resources[fullname]
# Si no ha sido cargado anteriormente
else:
# Se carga el recurso indicando el name de su carpeta
try:
pfile = open(fullname, 'r')
except IOError as e:
print 'Cannot load sprite sheet:', fullname
raise SystemExit, e.strerror
# Se carga y parsea el json
data = json.load(pfile)
pfile.close()
# Se almacena
cls.resources[fullname] = data
# Se devuelve
return data
@classmethod
def free_sprite_conf(cls, name):
fullname = os.path.join(SPRITE_SHEET_PATH, name)
if fullname in cls.resources:
del cls.resources[fullname]
@classmethod
def load_room(cls, name):
fullname = os.path.join(ROOM_CONF_PATH, name)
# Si el name de archivo está entre los resources ya cargados
if fullname in cls.resources:
# Se devuelve ese recurso
return cls.resources[fullname]
# Si no ha sido cargado anteriormente
else:
# Se carga el recurso indicando el name de su carpeta
try:
pfile = open(fullname, 'r')
except IOError as e:
print 'Cannot load room:', fullname
raise SystemExit, e.strerror
data = json.load(pfile)
pfile.close()
# Se almacena
cls.resources[fullname] = data
# Se devuelve
return data
@classmethod
def free_room(cls, name):
fullname = os.path.join(ROOM_CONF_PATH, name)
if fullname in cls.resources:
del cls.resources[fullname]
@classmethod
def load_stage(cls, name):
fullname = os.path.join(STAGE_CONF_PATH, name)
# Si el name de archivo está entre los resources ya cargados
if fullname in cls.resources:
# Se devuelve ese recurso
return cls.resources[fullname]
# Si no ha sido cargado anteriormente
else:
# Se carga el recurso indicando el name de su carpeta
try:
pfile = open(fullname, 'r')
except IOError as e:
print 'Cannot load stage:', fullname
raise SystemExit, e.strerror
data = json.load(pfile)
pfile.close()
# Se almacena
cls.resources[fullname] = data
# Se devuelve
return data
@classmethod
def fre_stage(cls, name):
fullname = os.path.join(STAGE_CONF_PATH, name)
if fullname in cls.resources:
del cls.resources[fullname]
@classmethod
def load_dialogue(cls, name):
fullname = os.path.join(DIALOGUE_CONF_PATH, name)
# Si el name de archivo está entre los resources ya cargados
if fullname in cls.resources:
# Se devuelve ese recurso
return cls.resources[fullname]
# Si no ha sido cargado anteriormente
else:
try:
pfile = open(fullname, 'r')
except IOError as e:
print 'Cannot load dialogue:', fullname
raise SystemExit, e.strerror
data = json.load(pfile)
pfile.close()
# Se almacena
cls.resources[fullname] = data
# Se devuelve
return data
@classmethod
def free_dialogue(cls, name):
fullname = os.path.join(DIALOGUE_CONF_PATH, name)
if fullname in cls.resources:
del cls.resources[fullname]
@classmethod
def load_font(cls, name, size):
fullname = os.path.join(FONT_PATH, name)
if (fullname, size) in cls.resources:
return cls.resources[(fullname, size)]
else:
try:
font = pygame.font.Font(fullname, size)
except pygame.error, message:
print 'Cannot load font:', fullname
raise SystemExit, message
cls.resources[(fullname, size)] = font
return font
@classmethod
def free_font(cls, name, size):
fullname = os.path.join(FONT_PATH, name)
if (fullname, size) in cls.resources:
del cls.resources[(fullname, size)]
| 33.621005 | 76 | 0.573408 | 6,688 | 0.90734 | 0 | 0 | 6,551 | 0.888753 | 0 | 0 | 1,607 | 0.218017 |
7d2c1800b1cf775906aaeca97219fcb2b7436072 | 1,307 | py | Python | valuation/migrations/0001_initial.py | jiun0507/minestock | b333298575cae1c426cc4450e85e9e576458b74a | [
"Unlicense"
] | null | null | null | valuation/migrations/0001_initial.py | jiun0507/minestock | b333298575cae1c426cc4450e85e9e576458b74a | [
"Unlicense"
] | null | null | null | valuation/migrations/0001_initial.py | jiun0507/minestock | b333298575cae1c426cc4450e85e9e576458b74a | [
"Unlicense"
] | 1 | 2021-10-15T20:10:39.000Z | 2021-10-15T20:10:39.000Z | # Generated by Django 3.2 on 2021-04-28 12:31
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='ValuationCategory',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=200)),
],
),
migrations.CreateModel(
name='Valuation',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('ticker', models.CharField(max_length=10, null=True)),
('review', models.TextField()),
('method', models.CharField(blank=True, choices=[('dcf', 'DCF'), ('reproduction_cst', 'Reproduction_cost'), ('other', 'Other')], max_length=20)),
('value', models.FloatField()),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| 36.305556 | 161 | 0.599847 | 1,150 | 0.879878 | 0 | 0 | 0 | 0 | 0 | 0 | 195 | 0.149197 |
7d2cd9bd2587b1eea5028879416dfa12cb5caa1c | 6,268 | py | Python | ravenframework/SupervisedLearning/ScikitLearn/Ensemble/StackingRegressor.py | khurrumsaleem/raven | 3a158f9ae3851d3eca51b4bd91ea6494e5c0ed89 | [
"Apache-2.0"
] | null | null | null | ravenframework/SupervisedLearning/ScikitLearn/Ensemble/StackingRegressor.py | khurrumsaleem/raven | 3a158f9ae3851d3eca51b4bd91ea6494e5c0ed89 | [
"Apache-2.0"
] | null | null | null | ravenframework/SupervisedLearning/ScikitLearn/Ensemble/StackingRegressor.py | khurrumsaleem/raven | 3a158f9ae3851d3eca51b4bd91ea6494e5c0ed89 | [
"Apache-2.0"
] | null | null | null | # Copyright 2017 Battelle Energy Alliance, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Created on Nov. 22, 2021
@author: wangc
StackingRegressor
A Bagging regressor.
"""
#Internal Modules (Lazy Importer)--------------------------------------------------------------------
#Internal Modules (Lazy Importer) End----------------------------------------------------------------
#External Modules------------------------------------------------------------------------------------
#External Modules End--------------------------------------------------------------------------------
#Internal Modules------------------------------------------------------------------------------------
from ....SupervisedLearning.ScikitLearn import ScikitLearnBase
from ....utils import InputData, InputTypes
#Internal Modules End--------------------------------------------------------------------------------
class StackingRegressor(ScikitLearnBase):
"""
Stack of estimators with a final regressor.
"""
info = {'problemtype':'regression', 'normalize':False}
def __init__(self):
"""
Constructor that will appropriately initialize a supervised learning object
@ In, None
@ Out, None
"""
super().__init__()
self.multioutputWrapper = True
import sklearn
import sklearn.ensemble
# check sklearn version, StackingRegressor is stable in sklearn version >= 0.24
version = [int(n) for n in sklearn.__version__.split('.')]
if version[0] < 1 and version[1] <= 24:
self.raiseAnError(IOError, 'StackingRegressor is not available in current sklearn version', sklearn.__version__,
'Please try to update sklearn version to 0.24 or newer!')
self.model = sklearn.ensemble.StackingRegressor
@classmethod
def getInputSpecification(cls):
"""
Method to get a reference to a class that specifies the input data for
class cls.
@ In, cls, the class for which we are retrieving the specification
@ Out, inputSpecification, InputData.ParameterInput, class to use for
specifying input of cls.
"""
specs = super().getInputSpecification()
specs.description = r"""The \xmlNode{StackingRegressor} consists in stacking the output of individual estimator and
use a regressor to compute the final prediction. Stacking allows to use the strength of each
individual estimator by using their output as input of a final estimator.
"""
estimatorInput = InputData.assemblyInputFactory("estimator", contentType=InputTypes.StringType,
descr=r"""name of a ROM that can be used as an estimator""", default='no-default')
specs.addSub(estimatorInput)
specs.addSub(InputData.parameterInputFactory("final_estimator", contentType=InputTypes.StringType,
descr=r"""The name of estimator which will be used to combine the base estimators.""", default='no-default'))
specs.addSub(InputData.parameterInputFactory("cv", contentType=InputTypes.IntegerType,
descr=r"""specify the number of folds in a (Stratified) KFold,""", default=5))
specs.addSub(InputData.parameterInputFactory("passthrough", contentType=InputTypes.BoolType,
descr=r"""When False, only the predictions of estimators will be used as training
data for final\_estimator. When True, the final\_estimator is trained on the predictions
as well as the original training data.""", default=False))
return specs
def _handleInput(self, paramInput):
"""
Function to handle the common parts of the distribution parameter input.
@ In, paramInput, ParameterInput, the already parsed input.
@ Out, None
"""
super()._handleInput(paramInput)
settings, notFound = paramInput.findNodesAndExtractValues(['final_estimator', 'cv', 'passthrough'])
# notFound must be empty
assert(not notFound)
self.settings = settings
def setEstimator(self, estimatorList):
"""
Initialization method
@ In, estimatorList, list of ROM instances/estimators used by ROM
@ Out, None
"""
super().setEstimator(estimatorList)
estimators = []
foundFinalEstimator = False
for estimator in estimatorList:
interfaceRom = estimator._interfaceROM
if interfaceRom.info['problemtype'] != 'regression':
self.raiseAnError(IOError, 'estimator:', estimator.name, 'with problem type', interfaceRom.info['problemtype'],
'can not be used for', self.name)
# In sklearn, multioutput wrapper can not be used by outer and inner estimator at the same time
# If the outer estimator can handle multioutput, the multioutput wrapper of inner can be kept,
# otherwise, we need to remove the wrapper for inner estimator.
if interfaceRom.multioutputWrapper:
sklEstimator = interfaceRom.model.get_params()['estimator']
else:
sklEstimator = interfaceRom.model
if estimator.name == self.settings['final_estimator']:
self.settings['final_estimator'] = sklEstimator
foundFinalEstimator = True
continue
estimators.append((estimator.name, sklEstimator))
self.settings['estimators'] = estimators
if not foundFinalEstimator:
self.raiseAnError(IOError, 'final_estimator:', self.settings['final_estimator'], 'is not found among provdide estimators:',
','.join([name for name,_ in estimators]))
self.initializeModel(self.settings)
| 50.144 | 158 | 0.62508 | 4,860 | 0.775367 | 0 | 0 | 1,987 | 0.317007 | 0 | 0 | 3,732 | 0.595405 |
7d2d9540209fa5a85af406d4e806349eeca524ef | 908 | py | Python | fluent_contents/rendering/__init__.py | francofuji/django-fluent-contents | 03da447ef0854b0e6a6f8ff39d9281d11efc8587 | [
"Apache-2.0"
] | null | null | null | fluent_contents/rendering/__init__.py | francofuji/django-fluent-contents | 03da447ef0854b0e6a6f8ff39d9281d11efc8587 | [
"Apache-2.0"
] | null | null | null | fluent_contents/rendering/__init__.py | francofuji/django-fluent-contents | 03da447ef0854b0e6a6f8ff39d9281d11efc8587 | [
"Apache-2.0"
] | null | null | null | """
This module provides functions to render placeholder content manually.
The functions are available outside the regular templatetags,
so it can be called outside the templates as well.
Contents is cached in memcache whenever possible, only the remaining items are queried.
The templatetags also use these functions to render the :class:`~fluent_contents.models.ContentItem` objects.
"""
from .main import render_placeholder, render_content_items, get_cached_placeholder_output, render_placeholder_search_text
from .markers import is_edit_mode, set_edit_mode
from .media import register_frontend_media, get_frontend_media
__all__ = (
# Main
'get_cached_placeholder_output',
'render_placeholder',
'render_content_items',
'render_placeholder_search_text',
# Media
'get_frontend_media',
'register_frontend_media',
# Markers
'is_edit_mode',
'set_edit_mode',
)
| 30.266667 | 121 | 0.785242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 592 | 0.651982 |
7d2eedcb594966e266531a38d18f3efe92684a79 | 1,615 | py | Python | linear regression/simple_linear_regression.py | liangjisheng/Machine-Learning | 55b6781d621e2de09c6e750ecc993178fb247c7b | [
"MIT"
] | null | null | null | linear regression/simple_linear_regression.py | liangjisheng/Machine-Learning | 55b6781d621e2de09c6e750ecc993178fb247c7b | [
"MIT"
] | null | null | null | linear regression/simple_linear_regression.py | liangjisheng/Machine-Learning | 55b6781d621e2de09c6e750ecc993178fb247c7b | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
"""
@project = 0602-1
@file = simple_linear_regression
@author = Liangjisheng
@create_time = 2018/6/2 0002 下午 17:16
"""
import matplotlib.pyplot as plt
import numpy as np
from sklearn import datasets, linear_model
# 加载用于回归模型的数据集
# 这个数据集中一共有442个样本,特征向量维度为10
# 特征向量每个变量为实数,变化范围(-.2 ,.2)
# 目标输出为实数,变化范围 (25 ,346)
diabetes = datasets.load_diabetes()
# 查看数据集的基本信息
print(type(diabetes))
print(diabetes.data.shape)
print(diabetes.data.dtype)
print(diabetes.target.shape)
print(diabetes.target.dtype)
# 为了便于画图显示
# 仅仅使用一维数据作为训练用的X
# 这里使用np.newaxis的目的是让行向量变成列向量
# 这样diabetes_X每一项都代表一个样本
diabetes_X = diabetes.data[:, np.newaxis, 2]
# 此时diabetes_X的shape是(442L, 1L)
# 如果上面一行代码是:diabetes_X = diabetes.data[:, 2]
# 则diabetes_X的shape是(442L,),是一个行向量
print(diabetes_X.shape)
print(type(diabetes_X))
# 人工将输入数据划分为训练集和测试集
# 前400个样本作为训练用,后20个样本作为测试用
diabetes_X_train = diabetes_X[:-20]
diabetes_X_test = diabetes_X[-20:]
diabetes_y_train = diabetes.target[:-20]
diabetes_y_test = diabetes.target[-20:]
# 初始化一个线性回归模型
regr = linear_model.LinearRegression()
# 基于训练数据,对线性回归模型进行训练
regr.fit(diabetes_X_train, diabetes_y_train)
# 模型的参数
print('模型参数:', regr.coef_)
print('模型截距:', regr.intercept_)
# 模型在测试集上的均方差(mean square error)
print('测试集上的均方差: %.2f'
% np.mean((regr.predict(diabetes_X_test) - diabetes_y_test) ** 2))
# 模型在测试集上的得分,得分结果在0到1之间,数值越大,说明模型越好
print('模型得分: %.2f' % regr.score(diabetes_X_test, diabetes_y_test))
# 绘制模型在测试集上的效果
plt.scatter(diabetes_X_test, diabetes_y_test, color='black')
plt.plot(diabetes_X_test, regr.predict(diabetes_X_test), color='blue', linewidth=3)
plt.grid()
plt.show()
| 24.469697 | 83 | 0.763467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,255 | 0.566591 |
7d2ef278a9b7568669efc168569d30a00da19ccc | 2,104 | py | Python | app/gui/frames/templates/list_frame.py | Matexer/BSPR | a503a8795cb0f4cebe2eedd148aa00aea75b570e | [
"MIT"
] | null | null | null | app/gui/frames/templates/list_frame.py | Matexer/BSPR | a503a8795cb0f4cebe2eedd148aa00aea75b570e | [
"MIT"
] | null | null | null | app/gui/frames/templates/list_frame.py | Matexer/BSPR | a503a8795cb0f4cebe2eedd148aa00aea75b570e | [
"MIT"
] | null | null | null | import tkinter as tk
from abc import ABCMeta, abstractmethod
from ...frames.templates import FrameTemplate
from ...elements import AddButton, EditButton, DeleteButton
class ListFrameTemplate(FrameTemplate, metaclass=ABCMeta):
def __init__(self, top, *args, **kw):
super().__init__(top, *args, **kw)
self.create_head_section(top)
self.create_body_section(top)
@abstractmethod
def create_head_section(self, top):
pass
@abstractmethod
def create_body_section(self, top):
pass
@staticmethod
def create_btns_container(top):
container = tk.Frame(top)
add_btn = AddButton(container)
add_btn.pack(side="left")
edit_btn = EditButton(container)
edit_btn.pack(side="left", padx=10)
delete_btn = DeleteButton(container)
delete_btn.pack(side="left")
return container, (add_btn, edit_btn, delete_btn)
@staticmethod
def create_comment_container(top):
container = tk.Frame(top)
left_cont = tk.Frame(container)
tk.Label(left_cont,
text="Data dodania:",
font="bold").pack(anchor="w")
adding_date = tk.Label(left_cont)
adding_date.pack(anchor="w")
tk.Label(left_cont,
text="Ostatnia modyfikacja:",
font="bold").pack(anchor="w")
modify_date = tk.Label(left_cont)
modify_date.pack(anchor="w")
right_cont = tk.Frame(container)
tk.Label(right_cont,
text="Komentarz:",
font="bold").pack(anchor="w")
comment = tk.Label(right_cont, anchor='w', justify="left")
comment.pack(anchor="w")
left_cont.pack(side="left", anchor="n")
right_cont.pack(side="left", anchor="n", padx=15)
return container, (adding_date, modify_date, comment)
@staticmethod
def set_list(top, tree, columns):
top.update()
tree_width = top.winfo_width()
tree.set_columns(list(columns.keys()))
tree.set_columns_width(tree_width, list(columns.values()))
| 32.875 | 66 | 0.623099 | 1,934 | 0.919202 | 0 | 0 | 1,685 | 0.800856 | 0 | 0 | 131 | 0.062262 |
7d31ec43828e5f8b1bea431b80a4901f6d2b3f3a | 337 | py | Python | EllipticCurves/Curve.py | mrajweir/Code | 6b57cbed93ba556bef08e1e66735286ccf21820d | [
"MIT"
] | null | null | null | EllipticCurves/Curve.py | mrajweir/Code | 6b57cbed93ba556bef08e1e66735286ccf21820d | [
"MIT"
] | 2 | 2020-03-31T10:19:59.000Z | 2021-02-08T14:28:38.000Z | EllipticCurves/Curve.py | mrajweir/Code | 6b57cbed93ba556bef08e1e66735286ccf21820d | [
"MIT"
] | 1 | 2020-04-05T10:20:21.000Z | 2020-04-05T10:20:21.000Z | import matplotlib.pyplot as plt
import numpy as np
def main():
a = -1
b = 1
y, x = np.ogrid[-5:5:100j, -5:5:100j]
plt.contour(
x.ravel(),
y.ravel(),
pow(y, 2) - pow(x, 3) - x * a - b,
[0]
)
plt.plot(1, 1, 'ro')
plt.grid()
plt.show()
if __name__ == '__main__':
main() | 16.85 | 42 | 0.465875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 14 | 0.041543 |
7d321a3687498f5a8ed7caee0688af65987caed6 | 2,312 | py | Python | importer/management/commands/rebuild_project_stats.py | brand-fabian/varfish-server | 6a084d891d676ff29355e72a29d4f7b207220283 | [
"MIT"
] | null | null | null | importer/management/commands/rebuild_project_stats.py | brand-fabian/varfish-server | 6a084d891d676ff29355e72a29d4f7b207220283 | [
"MIT"
] | null | null | null | importer/management/commands/rebuild_project_stats.py | brand-fabian/varfish-server | 6a084d891d676ff29355e72a29d4f7b207220283 | [
"MIT"
] | null | null | null | """Django command for rebuilding cohort statistics after import."""
import aldjemy
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from django.core.management.base import BaseCommand, CommandError
from django.db import transaction
from django.conf import settings
from projectroles.models import Project
from projectroles.plugins import get_backend_api
from variants.variant_stats import rebuild_project_variant_stats
from variants.helpers import SQLALCHEMY_ENGINE
timeline = get_backend_api("timeline_backend")
#: The User model to use.
User = get_user_model()
class Command(BaseCommand):
"""Implementation of rebuilding project-wide statistics.
All steps are executed in a transaction, so no stale state is used or left in the database.
"""
#: Help message displayed on the command line.
help = "Import case from PED file and varfish-annotator output."
def add_arguments(self, parser):
"""Add the command's argument to the ``parser``."""
parser.add_argument(
"--project-uuid", help="UUID of the project to add the case to", required=True
)
@transaction.atomic
def handle(self, *args, **options):
"""Perform rebuilding the statistics."""
try:
self.stdout.write(
"Rebuilding statistics as user: {}".format(settings.PROJECTROLES_ADMIN_OWNER)
)
admin = User.objects.get(username=settings.PROJECTROLES_ADMIN_OWNER)
except User.DoesNotExist as e:
raise CommandError(
"Could not get configured admin user for stats rebuild with username {}".format(
settings.PROJECTROLES_ADMIN_OWNER
)
) from e
project = self._get_project(options["project_uuid"])
rebuild_project_variant_stats(SQLALCHEMY_ENGINE, project, admin, self.stdout.write)
self.stdout.write(self.style.SUCCESS("Done rebuilding project-wide stats"))
def _get_project(self, project_uuid):
"""Get query or raise appropriate exception."""
try:
return Project.objects.get(sodar_uuid=project_uuid)
except ObjectDoesNotExist:
raise CommandError("Project with UUID {} does not exist".format(project_uuid))
| 37.901639 | 96 | 0.698962 | 1,689 | 0.730536 | 0 | 0 | 839 | 0.362889 | 0 | 0 | 762 | 0.329585 |
7d33735c0c3bfee8be88bd6f1998151e93fe43b3 | 273 | py | Python | answers/Utkarsh Srivastava/Day 12/Question 2.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 22 | 2021-03-16T14:07:47.000Z | 2021-08-13T08:52:50.000Z | answers/Utkarsh Srivastava/Day 12/Question 2.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 174 | 2021-03-16T21:16:40.000Z | 2021-06-12T05:19:51.000Z | answers/Utkarsh Srivastava/Day 12/Question 2.py | arc03/30-DaysOfCode-March-2021 | 6d6e11bf70280a578113f163352fa4fa8408baf6 | [
"MIT"
] | 135 | 2021-03-16T16:47:12.000Z | 2021-06-27T14:22:38.000Z | n = int(input())
c = [0]*n
for i in range(n):
l = int(input())
S = input()
for j in range(l):
if (S[j]=='0'):
continue
for k in range(j,l):
if (S[k]=='1'):
c[i] = c[i]+1
for i in range(n):
print(c[i])
| 19.5 | 29 | 0.388278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 6 | 0.021978 |
7d348b05d1517aca67ab59bdc69e706d090649ac | 3,294 | py | Python | tests/cropSeqTest.py | schnamo/CIAlign | 6985d74bb9a59535bb01751fcb739dd5ca219607 | [
"MIT"
] | 60 | 2019-09-09T16:44:14.000Z | 2022-03-26T12:04:17.000Z | tests/cropSeqTest.py | schnamo/CIAlign | 6985d74bb9a59535bb01751fcb739dd5ca219607 | [
"MIT"
] | 16 | 2020-05-12T20:17:30.000Z | 2022-03-15T16:01:41.000Z | tests/cropSeqTest.py | schnamo/CIAlign | 6985d74bb9a59535bb01751fcb739dd5ca219607 | [
"MIT"
] | 3 | 2020-03-28T11:20:02.000Z | 2022-01-22T07:16:58.000Z | #! /usr/bin/env python
import unittest
from unittest import mock
from mock import patch
from parameterized import parameterized, parameterized_class
import sys
import logging
import numpy as np
from Bio import AlignIO
import os
from os import path
import CIAlign
import CIAlign.cropSeq as cropSeq
class CropSeqsTests(unittest.TestCase):
@parameterized.expand([
[0.1, 0.1, '--UC----UCUCUCUCGCGUGUGUGAAAAAAAAAAAAAAAA----AAAUUUU------------A', 8, 52],
[0.1, 0.1, '--UC--AAA-----UCUCUCUCGCGUGUGUGAAAAAAAAAAA----AAAUUUU------------A', 3, 53],
[0.05, 0.1, '--UC--AAA-----UCUCUCUCAAAAAAAAAAAAAAAAAAAA----AAAUUUU-----------A', 6, 53],
[0.05, 0.3, '--UC--AAA-----UCUCUCUCGCGUGUGUAAAAAAAAAAAA----AAAUUUU------------A', 14, 42]
])
def testDetermineStartEnd(self, mingap_perc, redefine_perc, input, expected_start, expected_end):
seq = []
seq.append([s for s in input])
input = np.array(seq[0])
logger = logging.getLogger('path.to.module.under.test')
with mock.patch.object(logger, 'debug') as mock_debug:
start, end = cropSeq.determineStartEnd(input, "test_name", logger, mingap_perc, redefine_perc)
self.assertEqual(start, expected_start)
self.assertEqual(end, expected_end)
@parameterized.expand([[0.05, 0.1, '--UC----UCUCUCUCGCGUGUGUGAAAAAAAAAAAAAAAAAAAAAA----AAAUUUU------------A', 8, 13],
[0.05, 0.1, '--UC--AA-----UCUCUCUCGCGUGUGUGAAAAAAAAAAAAAAAAA----AAAUUUU------------A', 13, 13],
[0.05, 0.2, '--UC--AA-----UCUCUCUCGCGUGUGUGAAAAAAAAAAAAAAAAA----AAAUUUU------------A', 13, 24],
[0.02, 0.2, '--UC--AA-----UCUCUCUCGGGAGAGGCGUAUAAAUCGAUCGAUCGAUCGUACGAUCGUACGAUGCUCGUGUGUGAAAAAAAAAAAAAAAAAAAAAAAAAAAA----AAAUUUU------------A', 13, 24],
[0.02, 0.3, '--UC--AA-----UCUCUCUCGCGUGUGUGAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA----AAAUUUU------------A', 13, 24],
])
def testFindValue(self, mingap_perc, redefine_perc, input, expected_value, expected_reverse_value):
seq = []
seq.append([s for s in input])
input = np.array(seq[0])
reverse = input[::-1]
value = cropSeq.findValue(input, mingap_perc, redefine_perc)
reverseValue = cropSeq.findValue(reverse, mingap_perc, redefine_perc)
self.assertEqual(value, expected_value)
self.assertEqual(reverseValue, expected_reverse_value)
@parameterized.expand([
['UCUCUCUCUCGCGUGUGUGAAAAAAAAAUUUUA', '0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0'],
['UCUC--UCUCUCGCG---UGUGUGAAAAAAAAAUUUUA---', '0,0,0,0,2,2,2,2,2,2,2,2,2,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5'],
['--UC----UCUCUCUCGCGUGUGUGAAAAAA----AAAUUUU------------A', '2,2,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,10,10,10,10,10,10,10,22'],
])
def testCountGaps(self, input, expected_gaps):
seq = []
seq.append([s for s in input])
input = np.array(seq[0])
gap_list = expected_gaps.split(",")
expected = [int(s) for s in gap_list]
gaps = cropSeq.countGaps(input)
self.assertTrue(gaps == expected)
| 45.75 | 193 | 0.60929 | 2,991 | 0.908015 | 0 | 0 | 2,933 | 0.890407 | 0 | 0 | 1,178 | 0.35762 |
7d34ac8a177d53d04be76e0daa53138a4a06a173 | 1,871 | py | Python | cogdl/models/emb/rotate.py | cenyk1230/cogdl | fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce | [
"MIT"
] | 1,072 | 2019-08-02T05:46:21.000Z | 2022-03-31T07:51:53.000Z | cogdl/models/emb/rotate.py | cenyk1230/cogdl | fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce | [
"MIT"
] | 96 | 2019-08-05T17:27:22.000Z | 2022-03-03T08:36:57.000Z | cogdl/models/emb/rotate.py | cenyk1230/cogdl | fa1f74d5c3a15b5a52abfc7cd3f04dce4b7dbcce | [
"MIT"
] | 299 | 2019-08-08T07:33:10.000Z | 2022-03-31T09:30:07.000Z | import torch
from torch import Tensor
import torch.nn as nn
import torch.nn.functional as F
from .. import BaseModel, register_model
from .knowledge_base import KGEModel
@register_model("rotate")
class RotatE(KGEModel):
r"""
Implementation of RotatE model from the paper `"RotatE: Knowledge Graph Embedding by Relational Rotation in Complex Space"
<https://openreview.net/forum?id=HkgEQnRqYQ>`.
borrowed from `KnowledgeGraphEmbedding<https://github.com/DeepGraphLearning/KnowledgeGraphEmbedding>`
"""
def __init__(
self, nentity, nrelation, hidden_dim, gamma, double_entity_embedding=False, double_relation_embedding=False
):
super(RotatE, self).__init__(nentity, nrelation, hidden_dim, gamma, True, double_relation_embedding)
def score(self, head, relation, tail, mode):
pi = 3.14159265358979323846
re_head, im_head = torch.chunk(head, 2, dim=2)
re_tail, im_tail = torch.chunk(tail, 2, dim=2)
# Make phases of relations uniformly distributed in [-pi, pi]
phase_relation = relation / (self.embedding_range.item() / pi)
re_relation = torch.cos(phase_relation)
im_relation = torch.sin(phase_relation)
if mode == "head-batch":
re_score = re_relation * re_tail + im_relation * im_tail
im_score = re_relation * im_tail - im_relation * re_tail
re_score = re_score - re_head
im_score = im_score - im_head
else:
re_score = re_head * re_relation - im_head * im_relation
im_score = re_head * im_relation + im_head * re_relation
re_score = re_score - re_tail
im_score = im_score - im_tail
score = torch.stack([re_score, im_score], dim=0)
score = score.norm(dim=0)
score = self.gamma.item() - score.sum(dim=2)
return score
| 35.980769 | 126 | 0.670764 | 1,671 | 0.893105 | 0 | 0 | 1,697 | 0.907002 | 0 | 0 | 377 | 0.201497 |
7d397f642d7aef0cc79a1f55aff842d15208de96 | 198 | py | Python | reviewboard/site/evolutions/localsite_public.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | reviewboard/site/evolutions/localsite_public.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | reviewboard/site/evolutions/localsite_public.py | BarracudaPff/code-golf-data-pythpn | 42e8858c2ebc6a061012bcadb167d29cebb85c5e | [
"MIT"
] | null | null | null | from __future__ import unicode_literals
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [AddField("LocalSite", "public", models.BooleanField, initial=False)] | 49.5 | 81 | 0.833333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 19 | 0.09596 |
7d3abeec63de15c18972f766ba7f61dbee88a419 | 996 | py | Python | PrintDocment.py | Humein/Algorithm-Swift | 771eef9b6156bf4a1b165a96b9154bbf60d6fdf2 | [
"MIT"
] | null | null | null | PrintDocment.py | Humein/Algorithm-Swift | 771eef9b6156bf4a1b165a96b9154bbf60d6fdf2 | [
"MIT"
] | null | null | null | PrintDocment.py | Humein/Algorithm-Swift | 771eef9b6156bf4a1b165a96b9154bbf60d6fdf2 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os
print("方法分类",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/方法分类"))
print("二分查找",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/方法分类/二分查找"))
print("双指针",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/方法分类/双指针"))
print("贪心算法",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/方法分类/贪心算法"))
print("DP",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/方法分类/DP"))
print("Recursion",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/方法分类/Recursion"))
print("===============")
print("结构分类",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/结构分类"))
print("二叉树",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/结构分类/二叉树"))
print("链表",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/结构分类/链表"))
print("排序",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/结构分类/排序"))
print("数组",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/结构分类/数组"))
print("栈",os.listdir("/Users/zhangxinxin/Code/Algorithm-Swift/结构分类/栈"))
| 49.8 | 87 | 0.737952 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 918 | 0.768844 |
7d3ea37ac9acf74a01f56012eb7dac104176c0aa | 1,392 | py | Python | mititools/mititools/serializers/frictionless.py | jimmymathews/MITI | 0745b051a02fd1055ff80af560683fdbb18d5651 | [
"MIT"
] | null | null | null | mititools/mititools/serializers/frictionless.py | jimmymathews/MITI | 0745b051a02fd1055ff80af560683fdbb18d5651 | [
"MIT"
] | null | null | null | mititools/mititools/serializers/frictionless.py | jimmymathews/MITI | 0745b051a02fd1055ff80af560683fdbb18d5651 | [
"MIT"
] | null | null | null | import os
from os import mkdir
from os.path import join
from os.path import exists
import json
import importlib.resources
import jinja2
from jinja2 import Environment
from jinja2 import BaseLoader
with importlib.resources.path('mititools', 'fd_schema.json.jinja') as file:
jinja_environment = Environment(loader=BaseLoader)
fd_schema_file_contents = open(file, 'rt').read()
from ..default_values import fd_package_path
from ..name_manipulation import create_table_filename
from ..name_manipulation import create_auxiliary_table_filename
def write_frictionless(top_variables, data_tables):
json_str = render_json_data_package(top_variables)
json_object = json.loads(json_str)
payload = json.dumps(json_object, indent=2)
json_filename = 'datapackage.json'
if not exists(fd_package_path):
mkdir(fd_package_path)
with open(join(fd_package_path, json_filename), 'wt') as f:
f.write(payload)
for tablename, df in data_tables.items():
if list(df.columns) != ['value']:
filename = create_table_filename(tablename)
else:
filename = create_auxiliary_table_filename(tablename)
df.to_csv(join(fd_package_path, filename), sep='\t', index=False)
def render_json_data_package(variables):
template = jinja_environment.from_string(fd_schema_file_contents)
return template.render(**variables)
| 32.372093 | 75 | 0.75431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 70 | 0.050287 |
7d400f7a4ed47d3dc5c52007ae1f8fcbedc5ec4c | 2,216 | py | Python | mvj/urls.py | tuomas777/mvj | e9a12e42c399b9fb77fd8fad85fc8f0f6d4ce405 | [
"MIT"
] | null | null | null | mvj/urls.py | tuomas777/mvj | e9a12e42c399b9fb77fd8fad85fc8f0f6d4ce405 | [
"MIT"
] | null | null | null | mvj/urls.py | tuomas777/mvj | e9a12e42c399b9fb77fd8fad85fc8f0f6d4ce405 | [
"MIT"
] | null | null | null | import rest_framework.urls
from django.conf import settings
from django.contrib import admin
from django.urls import include, path, re_path
from rest_framework import routers
from rest_framework_swagger.views import get_swagger_view
from leasing.views import ktj_proxy
from leasing.viewsets.basis_of_rent import BasisOfRentViewSet
from leasing.viewsets.comment import CommentTopicViewSet, CommentViewSet
from leasing.viewsets.contact import ContactViewSet
from leasing.viewsets.decision import DecisionViewSet
from leasing.viewsets.lease import (
DistrictViewSet, FinancingViewSet, HitasViewSet, IntendedUseViewSet, LeaseTypeViewSet, LeaseViewSet,
ManagementViewSet, MunicipalityViewSet, NoticePeriodViewSet, RegulationViewSet, StatisticalUseViewSet,
SupportiveHousingViewSet)
from users.viewsets import UserViewSet
router = routers.DefaultRouter()
router.register(r'basis_of_rent', BasisOfRentViewSet)
router.register(r'comment', CommentViewSet)
router.register(r'comment_topic', CommentTopicViewSet)
router.register(r'contact', ContactViewSet)
router.register(r'decision', DecisionViewSet)
router.register(r'district', DistrictViewSet)
router.register(r'financing', FinancingViewSet)
router.register(r'hitas', HitasViewSet)
router.register(r'intended_use', IntendedUseViewSet)
router.register(r'lease', LeaseViewSet)
router.register(r'lease_type', LeaseTypeViewSet)
router.register(r'management', ManagementViewSet)
router.register(r'municipality', MunicipalityViewSet)
router.register(r'notice_period', NoticePeriodViewSet)
router.register(r'regulation', RegulationViewSet)
router.register(r'statistical_use', StatisticalUseViewSet)
router.register(r'supportive_housing', SupportiveHousingViewSet)
router.register(r'user', UserViewSet)
urlpatterns = [
path('v1/', include(router.urls)),
re_path(r'(?P<base_type>ktjki[ir])/tuloste/(?P<print_type>[\w/]+)/pdf', ktj_proxy),
path('admin/', admin.site.urls),
path('auth/', include(rest_framework.urls)),
path('docs/', get_swagger_view(title='MVJ API')),
]
if settings.DEBUG and 'debug_toolbar' in settings.INSTALLED_APPS:
import debug_toolbar
urlpatterns = [path('__debug__/', include(debug_toolbar.urls)), ] + urlpatterns
| 43.45098 | 106 | 0.813628 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 358 | 0.161552 |
7d4204253a754ba266b79ffa36682d9cb36d8ff0 | 1,860 | py | Python | ines.py | FlightDev/YSPA | 5226712ebf305e7a3c686c43c996517a617f748b | [
"MIT"
] | null | null | null | ines.py | FlightDev/YSPA | 5226712ebf305e7a3c686c43c996517a617f748b | [
"MIT"
] | null | null | null | ines.py | FlightDev/YSPA | 5226712ebf305e7a3c686c43c996517a617f748b | [
"MIT"
] | null | null | null | import rebound
import math
from visual import *
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
k = 0.01720209895
how_long = 600
start_pos = vector(0, 0, 1.5)
start_v = vector(0.7, 0.7, 0)
sim = rebound.Simulation()
#sun:
sim.add(m = 1.)
#asteroid:
sim.add(m = 0, x = start_pos.x, y = start_pos.y, z = start_pos.z, vx = start_v.x/k, vy = start_v.y/k, vz = start_v.z/k)
#earth:
sim.add(m = 0.000003003, x = 6.050702901916951E-01, y = -8.085113449604454E-01, z = -5.299403058075317E-05, vx = (1.352973714877966E-02)/k, vy = (1.017946114599288E-02)/k, vz = (2.635007516883264E-07)/k )
#jupiter:
sim.add(m = 0.0009543, x = -3.136171264149830E+00, y = -4.376868856434548E+00, z = 8.830403590272071E-02, vx = 6.044270575179947E-03/k, vy =-4.035730426004453E-03/k, vz = -1.184535381952951E-04/k)
#saturn:
sim.add(m =.0002857, x = 1.152370623788473E+00, y =-9.990803088412557E+00, z = 1.278423486688079E-01, vx = 5.235192499208867E-03/k, vy = 6.213724626462464E-04/k, vz = -2.191864499860967E-04/k )
sim.dt = 0.01
sim.move_to_com()
time = 0
end_time = 2.*math.pi*how_long
ps = sim.particles
#sim.integrate(end_time)
#earth position vector
r = vector (ps[1].x, ps[1].y, ps[1].z )
earth = vector (ps[2].x, ps[2].y, ps[2].z )
e_a_distance = mag(r) - mag(earth)
closest_distance = abs( mag(r) - mag(earth) )
closest_time = time#/k
while time < end_time:
sim.integrate(time)
r = vector(ps[1].x, ps[1].y, ps[1].z )
#see if asteroid hits the earth
earth = vector (ps[2].x, ps[2].y, ps[2].z )
e_a_distance = mag(r) - mag(earth)
if abs(e_a_distance) < closest_distance:
closest_distance = mag(e_a_distance)
closest_time = time#/k
rdot = vector(ps[1].vx, ps[1].vy, ps[1].vz )
time = time + 0.01
print "closest distance = ", closest_distance
print "time = ", closest_time
| 30 | 204 | 0.67043 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 152 | 0.08172 |
7d42e175bdde08c8e8e91c34aecbf8486806dec0 | 26,362 | py | Python | tests/common/utils/config_utils.py | anniyanvr/snaps-kubernetes | 6114d15fdb476a2235cd73bd9118072c698ba045 | [
"Apache-2.0"
] | 20 | 2018-07-24T23:32:11.000Z | 2021-11-08T10:28:45.000Z | tests/common/utils/config_utils.py | anniyanvr/snaps-kubernetes | 6114d15fdb476a2235cd73bd9118072c698ba045 | [
"Apache-2.0"
] | 195 | 2018-07-25T19:59:44.000Z | 2021-12-15T04:39:27.000Z | tests/common/utils/config_utils.py | anniyanvr/snaps-kubernetes | 6114d15fdb476a2235cd73bd9118072c698ba045 | [
"Apache-2.0"
] | 7 | 2018-08-23T11:35:57.000Z | 2020-06-29T08:25:25.000Z | # Copyright 2018 ARICENT HOLDINGS LUXEMBOURG SARL and Cable Television
# Laboratories, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import pkg_resources
from snaps_common.file import file_utils
from snaps_k8s.common.consts import consts
from snaps_k8s.common.utils import config_utils
class ConfigUtilsTests(unittest.TestCase):
"""
Tests for snaps_k8s.common.utils.config_utils.py
"""
def setUp(self):
config_file = pkg_resources.resource_filename(
'tests.conf', 'deployment.yaml')
self.config = file_utils.read_yaml(config_file)
self.node_list = self.config[consts.K8S_KEY][consts.NODE_CONF_KEY]
self.network_list = self.config[consts.K8S_KEY][consts.NETWORKS_KEY]
self.persis_vol = self.config[consts.K8S_KEY][consts.PERSIST_VOL_KEY]
def test_get_proxy_dict(self):
"""
Ensures proxy values are properly parsed
"""
proxy_dict = config_utils.get_proxy_dict(self.config)
expected = self.config[consts.K8S_KEY][consts.PROXIES_KEY]
self.assertEqual(expected, proxy_dict)
def test_get_networks(self):
"""
Ensures network values are properly parsed
"""
networks_data = config_utils.get_networks(self.config)
expected = self.config[consts.K8S_KEY][consts.NETWORKS_KEY]
self.assertEqual(expected, networks_data)
def test_get_multus_network(self):
"""
Ensures MuLtus network configuration is properly parsed
"""
multus_networks_data = config_utils.get_multus_network(self.config)
mult_config = self.network_list[1][consts.MULTUS_NET_KEY]
self.assertEqual(mult_config, multus_networks_data)
def test_get_multus_net_elems(self):
"""
Ensures Multus CNI elements are properly parsed
"""
multus_net_elems = config_utils.get_multus_net_elems(self.config)
expected = self.network_list[1][consts.MULTUS_NET_KEY][0][consts.MULTUS_CNI_KEY]
self.assertEqual(expected, multus_net_elems)
def test_get_multus_cni_cfgs(self):
"""
Ensures Multus CNI element configuration is properly parsed
"""
multus_cni_cfgs = config_utils.get_multus_cni_cfgs(self.config)
expected = self.network_list[1][consts.MULTUS_NET_KEY][1][consts.MULTUS_CNI_CONFIG_KEY]
self.assertEqual(expected, multus_cni_cfgs)
def test_get_multus_cni_flannel_cfgs(self):
"""
Ensures Flannel network values are properly parsed
"""
cni_cfg = config_utils.get_multus_cni_flannel_cfgs(self.config)
multus_cni = self.network_list[1][consts.MULTUS_NET_KEY][1][consts.MULTUS_CNI_CONFIG_KEY]
expected = multus_cni[0][consts.FLANNEL_NET_TYPE]
self.assertEqual(expected, cni_cfg)
def test_multus_cni_macvlan_cfgs(self):
"""
Ensures Macvlan network values are properly parsed
"""
macvlan_cfg = config_utils.get_multus_cni_macvlan_cfgs(self.config)
multus_cni = self.network_list[1][consts.MULTUS_NET_KEY][1][consts.MULTUS_CNI_CONFIG_KEY]
expected = multus_cni[2][consts.MACVLAN_NET_TYPE]
self.assertEqual(expected, macvlan_cfg)
def test_multus_cni_sriov_cfgs(self):
"""
Ensures SRIOV network values are properly parsed
"""
sriov_cfg = config_utils.get_multus_cni_sriov_cfgs(self.config)
multus_cni = self.network_list[1][consts.MULTUS_NET_KEY][1][consts.MULTUS_CNI_CONFIG_KEY]
expected = multus_cni[3][consts.SRIOV_NET_TYPE]
self.assertEqual(expected, sriov_cfg)
def test_get_multus_cni_weave_cfgs(self):
"""
Ensures Weave network values are properly parsed
"""
weave_cfg = config_utils.get_multus_cni_weave_cfgs(self.config)
multus_cni = self.network_list[1][consts.MULTUS_NET_KEY][1][consts.MULTUS_CNI_CONFIG_KEY]
expected = multus_cni[1][consts.WEAVE_NET_TYPE]
self.assertEqual(expected, weave_cfg)
def test_is_multus_cni_enabled(self):
"""
Ensures Multus CNI status is properly parsed
"""
multus_cni = config_utils.is_multus_cni_enabled(self.config)
expected_multus_cni = False
cni_list = self.network_list[1][consts.MULTUS_NET_KEY][0][consts.MULTUS_CNI_KEY]
if (consts.SRIOV_TYPE or consts.FLANNEL_TYPE or consts.WEAVE_TYPE or consts.MACVLAN_TYPE) in cni_list:
expected_multus_cni = True
self.assertEqual(expected_multus_cni, multus_cni)
def test_get_default_network(self):
"""
Ensures default network values are properly parsed
"""
default_network = config_utils.get_default_network(self.config)
expected = self.network_list[0][consts.DFLT_NET_KEY]
self.assertEqual(expected, default_network)
def test_get_service_subnet(self):
"""
Ensures service subnet value of the default network is properly parsed
"""
service_subnet = config_utils.get_service_subnet(self.config)
expected = self.network_list[0][consts.DFLT_NET_KEY][consts.SRVC_SUB_KEY]
self.assertEqual(expected, service_subnet)
def test_get_networking_plugin(self):
"""
Ensures networking plugin value of the default network is properly parsed
"""
networking_plugin = config_utils.get_networking_plugin(self.config)
expected = self.network_list[0][consts.DFLT_NET_KEY][consts.NET_PLUGIN_KEY]
self.assertEqual(expected, networking_plugin)
def test_get_pod_subnet(self):
"""
Ensures pod subnet value of the default network is properly parsed
"""
pod_subnet = config_utils.get_pod_subnet(self.config)
expected = self.network_list[0][consts.DFLT_NET_KEY][consts.POD_SUB_KEY]
self.assertEqual(expected, pod_subnet)
def test_get_version(self):
"""
Ensures Kubernetes version is properly parsed
"""
version_data = config_utils.get_version(self.config)
expected = self.config[consts.K8S_KEY][consts.K8_VER_KEY]
self.assertEqual(expected, version_data)
def test_get_ha_config(self):
"""
Ensures HA configuration values are properly parsed
"""
ha_config = config_utils.get_ha_config(self.config)
expected = self.config[consts.K8S_KEY][consts.HA_CONFIG_KEY]
self.assertEqual(expected, ha_config)
def test_get_ha_lb_ips(self):
"""
Ensures HA loadbalancer IP values are properly parsed
"""
ha_lb_ips = config_utils.get_ha_lb_ips(self.config)
expected_lb_ips_list = list()
for config_element in self.config[consts.K8S_KEY][consts.HA_CONFIG_KEY]:
expected_lb_ips_list.append(config_element[consts.HA_API_EXT_LB_KEY][consts.IP_KEY])
self.assertEqual(expected_lb_ips_list, ha_lb_ips)
def test_get_node_configs(self):
"""
Ensures node configuration settings are properly parsed
"""
node_configs = config_utils.get_node_configs(self.config)
expected = self.config[consts.K8S_KEY][consts.NODE_CONF_KEY]
self.assertEqual(expected, node_configs)
def test_get_hostname_ips_dict(self):
"""
Ensures hostnames and IPs of the nodes are properly parsed
"""
hostname_ips_dict = config_utils.get_hostname_ips_dict(self.config)
hostname_ips = dict()
for node in self.node_list:
hostname_ips[node[consts.HOST_KEY][consts.HOSTNAME_KEY]] = node[consts.HOST_KEY][consts.IP_KEY]
self.assertEqual(hostname_ips, hostname_ips_dict)
def test_get_host_reg_port_dict(self):
"""
Ensures hostnames and registry port value of the nodes are properly parsed
"""
host_reg_port_dict = config_utils.get_host_reg_port_dict(self.config)
host_reg_port = dict()
for node in self.node_list:
host_reg_port[node[consts.HOST_KEY][consts.HOSTNAME_KEY]] = node[consts.HOST_KEY][consts.REG_PORT_KEY]
self.assertEqual(host_reg_port, host_reg_port_dict)
def test_get_host_ips(self):
"""
Ensures the list of host IPs are properly parsed
"""
host_ips = config_utils.get_host_ips(self.config)
host_ips_cfg = list()
for node in self.node_list:
host_ips_cfg.append(node[consts.HOST_KEY][consts.IP_KEY])
self.assertEqual(host_ips_cfg, host_ips)
def test_get_hosts(self):
"""
Ensures the list of hostnames of the configured nodes are properly parsed
"""
hosts = config_utils.get_hosts(self.config)
host_cfg = list()
for node in self.node_list:
host_cfg.append(node[consts.HOST_KEY][consts.HOSTNAME_KEY])
self.assertEqual(host_cfg, hosts)
def test_get_basic_auth(self):
"""
Ensures the basic authentication settings are properly parsed
"""
basic_auth = config_utils.get_basic_auth(self.config)
expected = self.config[consts.K8S_KEY][consts.BASIC_AUTH_KEY]
self.assertEqual(expected, basic_auth)
def test_get_project_name(self):
"""
Ensures the project name value is properly parsed
"""
project_name = config_utils.get_project_name(self.config)
expected = self.config[consts.K8S_KEY][consts.PROJECT_NAME_KEY]
self.assertEqual(expected, project_name)
def test_get_artifact_dir(self):
"""
Ensures the artifact directory location is properly parsed
"""
artifact_dir = config_utils.get_artifact_dir(self.config)
expected = os.path.expanduser('~/tmp')
self.assertEqual(expected, artifact_dir)
def test_get_project_dir(self):
"""
Ensures the project location is properly parsed
"""
expected_artifact_dir = os.path.expanduser('~/tmp')
project_name = config_utils.get_project_name(self.config)
expected = "{}/{}/{}".format(
expected_artifact_dir, consts.PROJ_DIR_NAME, project_name)
proj_dir = config_utils.get_project_artifact_dir(self.config)
self.assertEqual(expected, proj_dir)
def test_get_kubespray_dir(self):
"""
Ensures the kubespray location is properly parsed
"""
expected_artifact_dir = os.path.expanduser('~/tmp')
expected = "{}/{}".format(expected_artifact_dir,
consts.KUBESPRAY_FOLDER_NAME)
proj_dir = config_utils.get_kubespray_dir(self.config)
self.assertEqual(expected, proj_dir)
def test_get_docker_repo(self):
"""
Ensures the Docker Repo settings are properly parsed
"""
docker_repo = config_utils.get_docker_repo(self.config)
expected = self.config[consts.K8S_KEY][consts.DOCKER_REPO_KEY]
self.assertEqual(expected, docker_repo)
def test_get_persis_vol(self):
"""
Ensures the Persistent Volume settings are properly parsed
"""
persis_vol = config_utils.get_persist_vol(self.config)
expected = self.persis_vol
self.assertEqual(expected, persis_vol)
def test_get_ceph_vol(self):
"""
Ensures the Ceph Volume settings are properly parsed
"""
ceph_vol = config_utils.get_ceph_vol(self.config)
expected = self.persis_vol[consts.CEPH_VOLUME_KEY]
self.assertEqual(expected, ceph_vol)
def test_get_ceph_hosts(self):
"""
Ensures the Ceph host settings are properly parsed
"""
ceph_hosts = config_utils.get_ceph_hosts(self.config)
ceph_hosts_cfg = list()
if self.config[consts.K8S_KEY][consts.PERSIST_VOL_KEY][consts.CEPH_VOLUME_KEY]:
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
if consts.HOST_KEY in ceph_host:
ceph_hosts_cfg.append(ceph_host[consts.HOST_KEY])
self.assertEqual(ceph_hosts_cfg, ceph_hosts)
def test_get_ceph_hosts_info(self):
"""
Ensures the hostname, IP and type value of the Ceph hosts are properly parsed
"""
ceph_hosts_info = config_utils.get_ceph_hosts_info(self.config)
ceph_hosts_info_cfg = list()
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
ceph_hosts_info_cfg.append((ceph_host[consts.HOST_KEY][consts.HOSTNAME_KEY],
ceph_host[consts.HOST_KEY][consts.IP_KEY],
ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(ceph_hosts_info_cfg, ceph_hosts_info)
def test_get_ceph_ctrls(self):
"""
Ensures the Ceph control host configuration is properly parsed
"""
ceph_ctrls = config_utils.get_ceph_ctrls(self.config)
ceph_ctrls_cfg = list()
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
if ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.CEPH_CTRL_TYPE:
ceph_ctrls_cfg.append(ceph_host[consts.HOST_KEY])
self.assertEqual(ceph_ctrls_cfg, ceph_ctrls)
def test_get_ceph_ctrls_info(self):
"""
Ensures the hostname, IP and type value of the Ceph control hosts are properly parsed
"""
ceph_ctrls_info = config_utils.get_ceph_ctrls_info(self.config)
ceph_ctrls_info_cfg = list()
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
if ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.CEPH_CTRL_TYPE:
ceph_ctrls_info_cfg.append((ceph_host[consts.HOST_KEY][consts.HOSTNAME_KEY],
ceph_host[consts.HOST_KEY][consts.IP_KEY],
ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(ceph_ctrls_info_cfg, ceph_ctrls_info)
def test_get_ceph_osds(self):
"""
Ensures the Ceph OSD host settings are properly parsed
"""
ceph_osds = config_utils.get_ceph_osds(self.config)
ceph_osds_cfg = list()
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
if ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.CEPH_OSD_TYPE:
ceph_osds_cfg.append(ceph_host[consts.HOST_KEY])
self.assertEqual(ceph_osds_cfg, ceph_osds)
def test_get_ceph_osds_info(self):
"""
Ensures the hostname, IP and type value of the Ceph OSD hosts are properly parsed
"""
ceph_osds_info = config_utils.get_ceph_osds_info(self.config)
ceph_osds_info_cfg = list()
for ceph_host in self.persis_vol[consts.CEPH_VOLUME_KEY]:
if ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.CEPH_OSD_TYPE:
ceph_osds_info_cfg.append((ceph_host[consts.HOST_KEY][consts.HOSTNAME_KEY],
ceph_host[consts.HOST_KEY][consts.IP_KEY],
ceph_host[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(ceph_osds_info_cfg, ceph_osds_info)
def test_get_host_vol(self):
"""
Ensures the Host Volume settings are properly parsed
"""
host_vol = config_utils.get_host_vol(self.config)
expected = self.persis_vol[consts.HOST_VOL_KEY]
self.assertEqual(expected, host_vol)
def test_get_persist_vol_claims(self):
"""
Ensures the Claim parameters of the Host Volume are properly parsed
"""
persist_vol_claims = config_utils.get_persist_vol_claims(self.config)
persist_vol_claims_cfg = list()
for persist_vol in self.persis_vol[consts.HOST_VOL_KEY]:
if consts.CLAIM_PARAMS_KEY in persist_vol:
persist_vol_claims_cfg.append(persist_vol[consts.CLAIM_PARAMS_KEY])
self.assertEqual(persist_vol_claims_cfg, persist_vol_claims)
def test_get_first_master_host(self):
"""
Ensures the hostname and IP of the first master host found in the config are properly parsed
"""
hostname, ip = config_utils.get_first_master_host(self.config)
for node in self.node_list:
if node[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MASTER:
hostname_cfg, ip_cfg = node[consts.HOST_KEY][consts.HOSTNAME_KEY], node[consts.HOST_KEY][consts.IP_KEY]
break
self.assertItemsEqual((hostname_cfg, ip_cfg), (hostname, ip))
def test_get_nodes_ip_name_type(self):
"""
Ensures the hostname, IP and type value of all configured hosts are properly parsed
"""
nodes_ip_name_type = config_utils.get_nodes_ip_name_type(self.config)
nodes_ip_name_type_cfg = list()
for node in self.node_list:
nodes_ip_name_type_cfg.append((node[consts.HOST_KEY][consts.HOSTNAME_KEY],
node[consts.HOST_KEY][consts.IP_KEY],
node[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(nodes_ip_name_type_cfg, nodes_ip_name_type)
def test_get_master_nodes_ip_name_type(self):
"""
Ensures the hostname, IP and type value of all configured master hosts are properly parsed
"""
master_ip_name_type = config_utils.get_master_nodes_ip_name_type(self.config)
master_ip_name_type_cfg = list()
for node in self.node_list:
if node[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MASTER:
master_ip_name_type_cfg.append((node[consts.HOST_KEY][consts.HOSTNAME_KEY],
node[consts.HOST_KEY][consts.IP_KEY],
node[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(master_ip_name_type_cfg, master_ip_name_type)
def test_get_master_node_ips(self):
"""
Ensures the IP address of all configured master hosts are properly parsed
"""
master_node_ips = config_utils.get_master_node_ips(self.config)
master_node_ips_cfg = list()
for node in self.node_list:
if node[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MASTER:
master_node_ips_cfg.append(node[consts.HOST_KEY][consts.IP_KEY])
self.assertEqual(master_node_ips_cfg, master_node_ips)
def test_get_minion_nodes_ip_name_type(self):
"""
Ensures the hostname, IP and type value of all configured minion hosts are properly parsed
"""
minion_ip_name_type = config_utils.get_minion_nodes_ip_name_type(self.config)
minion_ip_name_type_cfg = list()
for node in self.node_list:
if node[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MINION:
minion_ip_name_type_cfg.append((node[consts.HOST_KEY][consts.HOSTNAME_KEY],
node[consts.HOST_KEY][consts.IP_KEY],
node[consts.HOST_KEY][consts.NODE_TYPE_KEY]))
self.assertEqual(minion_ip_name_type_cfg, minion_ip_name_type)
def test_get_minion_node_ips(self):
"""
Ensures the IP address of all configured minion hosts are properly parsed
"""
minion_node_ips = config_utils.get_minion_node_ips(self.config)
minion_node_ips_cfg = list()
for node in self.node_list:
if node[consts.HOST_KEY][consts.NODE_TYPE_KEY] == consts.NODE_TYPE_MINION:
minion_node_ips_cfg.append(node[consts.HOST_KEY][consts.IP_KEY])
self.assertItemsEqual(minion_node_ips_cfg, minion_node_ips)
def test_is_logging_enabled(self):
"""
Tests to ensure that different string and boolean values return their
expected values
"""
this_cfg = {}
this_cfg.update(self.config)
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = True
self.assertTrue(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'True'
self.assertTrue(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'true'
self.assertTrue(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'yes'
self.assertTrue(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'foo'
self.assertFalse(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = False
self.assertFalse(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'False'
self.assertFalse(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'false'
self.assertFalse(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = 'no'
self.assertFalse(config_utils.is_logging_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.ENABLE_LOG_KEY] = None
self.assertFalse(config_utils.is_logging_enabled(self.config))
def test_get_log_level(self):
"""
Ensures that the logging level is getting properly parsed
"""
expected_log_level = self.config[consts.K8S_KEY][consts.LOG_LEVEL_KEY]
log_level = config_utils.get_log_level(self.config)
self.assertEqual(expected_log_level, log_level)
def test_get_logging_port(self):
"""
Ensures that the port returned is what is expected and is always a
string
"""
expected_port = self.config[consts.K8S_KEY][consts.LOG_PORT_KEY]
port = config_utils.get_logging_port(self.config)
self.assertEqual(expected_port, port)
# tests that a numeric value is returned as a string
this_cfg = {}
this_cfg.update(self.config)
this_cfg[consts.K8S_KEY][consts.LOG_PORT_KEY] = 1000
port = config_utils.get_logging_port(this_cfg)
self.assertEqual('1000', port)
def test_is_cpu_alloc(self):
"""
Tests to ensure that different string and boolean values return their
expected values
"""
this_cfg = {}
this_cfg.update(self.config)
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = True
self.assertTrue(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'True'
self.assertTrue(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'true'
self.assertTrue(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'yes'
self.assertTrue(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'foo'
self.assertFalse(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = False
self.assertFalse(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'False'
self.assertFalse(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'false'
self.assertFalse(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = 'no'
self.assertFalse(config_utils.is_cpu_alloc(self.config))
this_cfg[consts.K8S_KEY][consts.CPU_ALLOC_KEY] = None
self.assertFalse(config_utils.is_cpu_alloc(self.config))
def test_is_metrics_server(self):
"""
Tests to ensure that different string and boolean values return their
expected values
"""
this_cfg = {}
this_cfg.update(self.config)
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = True
self.assertTrue(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'True'
self.assertTrue(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'true'
self.assertTrue(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'yes'
self.assertTrue(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'foo'
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = False
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'False'
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'false'
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = 'no'
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
this_cfg[consts.K8S_KEY][consts.METRICS_SERVER_KEY] = None
self.assertFalse(config_utils.is_metrics_server_enabled(self.config))
def test_get_password(self):
node_confs = config_utils.get_node_configs(self.config)
for node_conf in node_confs:
password = config_utils.get_node_password(
self.config, node_conf[consts.HOST_KEY][consts.HOSTNAME_KEY])
self.assertEqual('password', password)
| 43.50165 | 119 | 0.681739 | 25,543 | 0.968933 | 0 | 0 | 0 | 0 | 0 | 0 | 5,188 | 0.196798 |
7d44b7e8f5a379cc7b50059795fc7b51e4005b04 | 361 | py | Python | hy-data-analysis-with-python-spring-2020/part03-e07_meeting_planes/src/meeting_planes.py | Melimet/DAP2020 | 0854fe4ce8ace6abf6dc0bbcf71984595ff6d42a | [
"MIT"
] | null | null | null | hy-data-analysis-with-python-spring-2020/part03-e07_meeting_planes/src/meeting_planes.py | Melimet/DAP2020 | 0854fe4ce8ace6abf6dc0bbcf71984595ff6d42a | [
"MIT"
] | null | null | null | hy-data-analysis-with-python-spring-2020/part03-e07_meeting_planes/src/meeting_planes.py | Melimet/DAP2020 | 0854fe4ce8ace6abf6dc0bbcf71984595ff6d42a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import numpy as np
def meeting_planes(a1, b1, c1, a2, b2, c2, a3, b3, c3):
return []
def main():
a1=1
b1=4
c1=5
a2=3
b2=2
c2=1
a3=2
b3=4
c3=1
x, y, z = meeting_planes(a1, b1, c1, a2, b2, c2, a3, b3, c3)
print(f"Planes meet at x={x}, y={y} and z={z}")
if __name__ == "__main__":
main()
| 15.041667 | 64 | 0.518006 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 68 | 0.188366 |
7d473618101c1bf818cfd31f50f7230e32057c47 | 566 | py | Python | setup.py | iatlab/datas-utils | b8eef303de5a5d5a57182c0627b721dde0b6b300 | [
"MIT"
] | null | null | null | setup.py | iatlab/datas-utils | b8eef303de5a5d5a57182c0627b721dde0b6b300 | [
"MIT"
] | null | null | null | setup.py | iatlab/datas-utils | b8eef303de5a5d5a57182c0627b721dde0b6b300 | [
"MIT"
] | null | null | null | # -*- coding:utf-8 -*-
from setuptools import setup
setup(
name = "datas_utils",
packages = ["datas_utils",
"datas_utils.env",
"datas_utils.log",
"datas_utils.aws",
],
version = "0.0.1",
description = "Tools for Datas Project",
author = "Makoto P. Kato",
author_email = "mpkato@acm.org",
license = "MIT License",
url = "https://github.com/iatlab/datas_utils",
install_requires = ['boto3>=1.9.3', 'mysql-connector-python>=8.0.12'],
tests_require=['nose'],
)
| 28.3 | 74 | 0.55477 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 267 | 0.471731 |
7d47e3a2c72557ea55cdd53dea81dcefd1f28f34 | 181 | py | Python | Lib/fontTools/ttLib/tables/T_S_I_B_.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 240 | 2021-01-11T14:49:24.000Z | 2022-03-29T22:33:49.000Z | Lib/fontTools/ttLib/tables/T_S_I_B_.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 77 | 2021-01-12T20:23:30.000Z | 2022-03-28T12:14:34.000Z | Lib/fontTools/ttLib/tables/T_S_I_B_.py | twardoch/fonttools-py27 | 75b852d3f59fc0d03c6e78581530597d4c6368a1 | [
"MIT",
"BSD-3-Clause"
] | 28 | 2021-01-17T05:44:11.000Z | 2022-01-11T19:58:46.000Z | from __future__ import print_function, division, absolute_import
from fontTools.misc.py23 import *
from .T_S_I_V_ import table_T_S_I_V_
class table_T_S_I_B_(table_T_S_I_V_):
pass
| 25.857143 | 64 | 0.845304 | 43 | 0.237569 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 |
7d4905910c88a739b35dc4edd1e33b3e65ae835a | 1,012 | py | Python | src/paths_to_inodes_paths.py | poponealex/suprenam | d57c99a2e43ad659b9ed70830402f46e7d31e02e | [
"MIT"
] | 8 | 2022-03-05T19:41:37.000Z | 2022-03-06T08:04:43.000Z | src/paths_to_inodes_paths.py | poponealex/suprenam | d57c99a2e43ad659b9ed70830402f46e7d31e02e | [
"MIT"
] | 2 | 2022-01-25T18:57:17.000Z | 2022-03-14T13:24:59.000Z | src/paths_to_inodes_paths.py | poponealex/suprenam | d57c99a2e43ad659b9ed70830402f46e7d31e02e | [
"MIT"
] | null | null | null | from pathlib import Path
from typing import List
from src.user_errors import NoItemToRenameError
from src.user_types import Inode, InodesPaths
def paths_to_inodes_paths(paths: List[Path]) -> InodesPaths:
"""
Given a list of paths, return a mapping from inodes to paths.
Args:
paths: list of Path objects
Raises:
FileNotFoundError: if any of the paths does not exist.
NoItemToRenameError: empty single text file as a command-line argument.
Returns:
A mapping from inodes to paths.
"""
result = {}
missing_paths = []
for path in paths:
if path.exists():
result[Inode(path.stat().st_ino)] = path
else:
missing_paths.append(path)
if missing_paths:
n = len(missing_paths)
raise FileNotFoundError(f"{n} missing item{'s'[:n^1]}: {list(map(str,missing_paths))}.")
elif not result:
raise NoItemToRenameError("No item to rename was provided.")
else:
return result
| 28.111111 | 96 | 0.651186 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 430 | 0.424901 |
7d4b8c1cbb3320cfef8a2500aa0c61c3209c9888 | 9,655 | py | Python | faro/utils.py | cgiraldo/FARO | aa599fe8eebb489fe032549ec52771574a6d04bd | [
"MIT"
] | null | null | null | faro/utils.py | cgiraldo/FARO | aa599fe8eebb489fe032549ec52771574a6d04bd | [
"MIT"
] | null | null | null | faro/utils.py | cgiraldo/FARO | aa599fe8eebb489fe032549ec52771574a6d04bd | [
"MIT"
] | null | null | null | import re
import gensim.utils as gensim_utils
def normalize_text_proximity(message):
""" Clean text of dots between words
Keyword arguments:
message -- a plain sentence or paragraph
"""
sent = message.lower()
sent = sent.replace("á", "a")
sent = sent.replace("é", "e")
sent = sent.replace("í", "i")
sent = sent.replace("ó", "o")
sent = sent.replace("ú", "u")
sent = re.sub(r'(?i)(?<=[a-z])\.(?=[a-z])', "", sent)
return sent
def clean_text(message):
""" Delete extra characters from text before validation
Keyword arguments:
message -- a plain sentence or paragraph
"""
sent = re.sub(r'[\-_*+,\(\).:]{1,}', "", message)
sent = re.sub(r'[ ]{1,}', "", sent)
sent = re.sub(r'(?i)\bnº', "", sent)
return sent
def preprocess_text(message):
""" Delete some artifacts from text
Keyword arguments:
message -- a plain sentence or paragraph
"""
uni_message = gensim_utils.to_unicode(message)
uni_message = uni_message.replace("\t", " ")
uni_message = uni_message.replace("\r\n", " ")
uni_message = uni_message.replace("\r", " ")
uni_message = uni_message.replace("\n", " ")
return uni_message
def word2features(sent, i):
""" Extract features of a node in the "sent" list for a CRF
Keyword arguments:
sent -- a list of triples <word, PoS tag, label>
i -- index of the node to extract the featues
"""
word = sent[i][0]
postag = sent[i][1]
features = {
'bias': 1.0,
'word': word,
'word.lower()': word.lower(),
'word.istitle()': word.istitle(),
'word[-3:]': word[-3:],
'word[:3]': word[:3],
'word.isdigit()': word.isdigit(),
'postag': postag,
}
if i > 0:
word1 = sent[i-1][0]
postag1 = sent[i-1][1]
features.update({
'-1:word': word1,
'-1:word.lower()': word1.lower(),
'-1:word.istitle': word1.istitle(),
'-1:postag': postag1,
})
else:
features['BOS'] = True
# EXTRA
if i > 2:
word1 = sent[i-2][0]
postag1 = sent[i-2][1]
features.update({
'-2:word': word1,
'-2:word.lower()': word1.lower(),
'-2:word.istitle': word1.istitle(),
'-2:word.postag': postag1,
})
if i > 3:
word1 = sent[i-3][0]
postag1 = sent[i-3][1]
features.update({
'-3:word': word1,
'-3:word.lower()': word1.lower(),
'-3:word.istitle': word1.istitle(),
'-3:word.postag': postag1,
})
if i > 2:
word0 = sent[i][0]
postag0 = sent[i][1]
word1 = sent[i-1][0]
postag1 = sent[i-1][1]
features.update({
'-01:word': word1 + word0,
'-01:word.lower()': (word1 + " " + word0).lower(),
'-01:word0_postag1': postag1 + word0,
'-01:word1_postag0': postag0 + word1,
})
if i > 3:
word0 = sent[i][0]
word1 = sent[i-2][0]
postag0 = sent[i][1]
postag1 = sent[i-2][1]
features.update({
'-02:word': word1 + word0,
'-02:word.lower()': (word1 + " " + word0).lower(),
'-02:word0_postag1': postag1 + word0,
'-02:word1_postag0': postag0 + word1,
})
if i < len(sent) - 2:
word1 = sent[i+2][0]
postag1 = sent[i+2][1]
features.update({
'+2:word': word1,
'+2:word.lower()': word1.lower(),
'+2:word.istitle': word1.istitle(),
'+2:word.postag': postag1,
})
if i < len(sent)-1:
word1 = sent[i+1][0]
postag1 = sent[i+1][1]
features.update({
'+1:word': word1,
'+1:word.lower()': word1.lower(),
'+1:word.istitle()': word1.istitle(),
'+1:postag': postag1,
})
else:
features['EOS'] = True
return features
def char2features_mail(sent, i):
""" Extract features of a node (for the mail CRF)
Keyword arguments:
sent -- a list of pairs <word, label>
i -- index of the node to extract the featues
"""
word = sent[i][0]
features = {
'bias': 1.0,
'char.lower()': word.lower(),
}
if i > 0:
word1 = sent[i-1][0]
features.update({
'-1:char.lower()': word1.lower(),
})
else:
features['BOS'] = True
if i < len(sent)-1:
word1 = sent[i+1][0]
features.update({
'+1:char.lower()': word1.lower(),
})
else:
features['EOS'] = True
# EXTRA
if i > 2:
word1 = sent[i-2][0]
features.update({
'-2:char.lower()': word1.lower(),
})
if i > 3:
word1 = sent[i-3][0]
features.update({
'-3:char.lower()': word1.lower(),
})
if i > 4:
word1 = sent[i-4][0]
features.update({
'-4:char.lower()': word1.lower(),
})
if i > 5:
word1 = sent[i-5][0]
features.update({
'-5:char.lower()': word1.lower(),
})
if i > 6:
word1 = sent[i-6][0]
features.update({
'-6:char.lower()': word1.lower(),
})
if i > 7:
word1 = sent[i-7][0]
features.update({
'-7:char.lower()': word1.lower(),
})
if i > 8:
word1 = sent[i-8][0]
features.update({
'-8:char.lower()': word1.lower(),
})
if i < len(sent) - 2:
word1 = sent[i+2][0]
features.update({
'+2:char.lower()': word1.lower(),
})
if i < len(sent) - 3:
word1 = sent[i+3][0]
features.update({
'+3:char.lower()': word1.lower(),
})
if i < len(sent) - 4:
word1 = sent[i+4][0]
features.update({
'+4:char.lower()': word1.lower(),
})
if i < len(sent) - 5:
word1 = sent[i+5][0]
features.update({
'+5:char.lower()': word1.lower(),
})
if i < len(sent) - 6:
word1 = sent[i+6][0]
features.update({
'+6:char.lower()': word1.lower(),
})
if i < len(sent) - 7:
word1 = sent[i+7][0]
features.update({
'+7:char.lower()': word1.lower(),
})
if i < len(sent) - 8:
word1 = sent[i+8][0]
features.update({
'+8:char.lower()': word1.lower(),
})
return features
def char2features_space(sent, i):
""" Extract features of a node (for the whitespace-CRF detector)
Keyword arguments:
sent -- a list of pairs <word, label>
i -- index of the node to extract the featues
"""
word = sent[i][0]
features = {
'bias': 1.0,
'char': word,
'char.lower()': word.lower(),
}
if i > 0:
word1 = sent[i-1][0]
features.update({
'-1:char': word1,
'-1:char.lower()': word1.lower(),
'-1:char.isdigit()': word1.isdigit(),
})
else:
features['BOS'] = True
if i < len(sent)-1:
word1 = sent[i+1][0]
features.update({
'+1:char': word1,
'+1:char.lower()': word1.lower(),
'+1:char.isdigit()': word1.isdigit(),
})
else:
features['EOS'] = True
# EXTRA
if i > 2:
word1 = sent[i-2][0]
features.update({
'-2:char': word1,
'-2:char.lower()': word1.lower(),
'-2:char.isdigit()': word1.isdigit(),
})
if i > 2:
word1 = sent[i-2][0]
word2 = sent[i-1][0]
features.update({
'-21:char.lower()': word1.lower() + word2.lower(),
'-21:char.isdigit()': word1.isdigit() and word2.isdigit(),
})
if i > 3:
word1 = sent[i-3][0]
features.update({
'-3:char': word1,
'-3:char.lower()': word1.lower(),
'-3:char.isdigit()': word1.isdigit(),
})
if i > 3:
word1 = sent[i-3][0]
word2 = sent[i-2][0]
features.update({
'-32:char.lower()': word1.lower() + word2.lower(),
'-32:char.isdigit()': word1.isdigit() and word2.isdigit(),
})
if i < len(sent) - 2:
word1 = sent[i+2][0]
features.update({
'+2:char': word1,
'+2:char.lower()': word1.lower(),
'+2:char.isdigit()': word1.isdigit(),
})
if i < len(sent) - 2:
word1 = sent[i+1][0]
word2 = sent[i+2][0]
features.update({
'+21:char.lower()': word1.lower() + word2.lower(),
'+21:char.isdigit()': word1.isdigit() and word2.isdigit(),
})
if i < len(sent) - 3:
word1 = sent[i+3][0]
features.update({
'+3:char': word1,
'+3:char.lower()': word1.lower(),
'+3:char.isdigit()': word1.isdigit(),
})
if i < len(sent) - 3:
word1 = sent[i+2][0]
word2 = sent[i+3][0]
features.update({
'+32:char.lower()': word1.lower() + word2.lower(),
'+32:char.isdigit()': word1.isdigit() and word2.isdigit(),
})
if i < len(sent) - 3:
word0 = sent[i][0]
word1 = sent[i+1][0]
word2 = sent[i+2][0]
features.update({
'+02:lower()': (word0 + word1 + word2).lower(),
'+02:isdigit()': (word0 + word1 + word2).isdigit(),
})
return features
| 24.017413 | 70 | 0.46028 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 2,401 | 0.248525 |
7d4be85eed72cdda5ea7001420bb48d96b241f0b | 21,722 | py | Python | pysnmp/CHECKPOINT-TRAP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 11 | 2021-02-02T16:27:16.000Z | 2021-08-31T06:22:49.000Z | pysnmp/CHECKPOINT-TRAP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 75 | 2021-02-24T17:30:31.000Z | 2021-12-08T00:01:18.000Z | pysnmp/CHECKPOINT-TRAP-MIB.py | agustinhenze/mibs.snmplabs.com | 1fc5c07860542b89212f4c8ab807057d9a9206c7 | [
"Apache-2.0"
] | 10 | 2019-04-30T05:51:36.000Z | 2022-02-16T03:33:41.000Z | #
# PySNMP MIB module CHECKPOINT-TRAP-MIB (http://snmplabs.com/pysmi)
# ASN.1 source file:///Users/davwang4/Dev/mibs.snmplabs.com/asn1/CHECKPOINT-TRAP-MIB
# Produced by pysmi-0.3.4 at Mon Apr 29 17:31:16 2019
# On host DAVWANG4-M-1475 platform Darwin version 18.5.0 by user davwang4
# Using Python version 3.7.3 (default, Mar 27 2019, 09:23:15)
#
Integer, OctetString, ObjectIdentifier = mibBuilder.importSymbols("ASN1", "Integer", "OctetString", "ObjectIdentifier")
NamedValues, = mibBuilder.importSymbols("ASN1-ENUMERATION", "NamedValues")
ConstraintsIntersection, ValueRangeConstraint, ConstraintsUnion, ValueSizeConstraint, SingleValueConstraint = mibBuilder.importSymbols("ASN1-REFINEMENT", "ConstraintsIntersection", "ValueRangeConstraint", "ConstraintsUnion", "ValueSizeConstraint", "SingleValueConstraint")
tempertureSensorStatus, haProblemVerified, fanSpeedSensorType, multiDiskFreeAvailablePercent, raidDiskID, memActiveReal64, haBlockState, haProblemPriority, voltageSensorName, svnNetIfState, fwLSConnState, fanSpeedSensorValue, haIfName, raidVolumeID, voltageSensorType, voltageSensorValue, raidDiskFlags, multiDiskName, fwLocalLoggingStat, fanSpeedSensorStatus, haIP, fanSpeedSensorUnit, tempertureSensorName, haTrusted, haStatShort, haStatus, multiProcIndex, svnNetIfName, haState, multiProcRunQueue, voltageSensorUnit, multiProcUsage, memTotalReal64, multiProcInterrupts, multiProcSystemTime, voltageSensorStatus, tempertureSensorUnit, haProblemStatus, tempertureSensorValue, fwLSConnOverall, fwLSConnStateDesc, fanSpeedSensorName, raidVolumeState, raidDiskVolumeID, fwLSConnOverallDesc, haIdentifier, memTotalVirtual64, memActiveVirtual64, raidDiskState, haStatCode, haStatLong, haProblemName, multiProcIdleTime, haProblemDescr, fwLSConnName, multiProcUserTime, fwLocalLoggingDesc, tempertureSensorType, haShared, svnNetIfAddress, svnNetIfOperState = mibBuilder.importSymbols("CHECKPOINT-MIB", "tempertureSensorStatus", "haProblemVerified", "fanSpeedSensorType", "multiDiskFreeAvailablePercent", "raidDiskID", "memActiveReal64", "haBlockState", "haProblemPriority", "voltageSensorName", "svnNetIfState", "fwLSConnState", "fanSpeedSensorValue", "haIfName", "raidVolumeID", "voltageSensorType", "voltageSensorValue", "raidDiskFlags", "multiDiskName", "fwLocalLoggingStat", "fanSpeedSensorStatus", "haIP", "fanSpeedSensorUnit", "tempertureSensorName", "haTrusted", "haStatShort", "haStatus", "multiProcIndex", "svnNetIfName", "haState", "multiProcRunQueue", "voltageSensorUnit", "multiProcUsage", "memTotalReal64", "multiProcInterrupts", "multiProcSystemTime", "voltageSensorStatus", "tempertureSensorUnit", "haProblemStatus", "tempertureSensorValue", "fwLSConnOverall", "fwLSConnStateDesc", "fanSpeedSensorName", "raidVolumeState", "raidDiskVolumeID", "fwLSConnOverallDesc", "haIdentifier", "memTotalVirtual64", "memActiveVirtual64", "raidDiskState", "haStatCode", "haStatLong", "haProblemName", "multiProcIdleTime", "haProblemDescr", "fwLSConnName", "multiProcUserTime", "fwLocalLoggingDesc", "tempertureSensorType", "haShared", "svnNetIfAddress", "svnNetIfOperState")
NotificationGroup, ModuleCompliance = mibBuilder.importSymbols("SNMPv2-CONF", "NotificationGroup", "ModuleCompliance")
Counter32, NotificationType, iso, Integer32, IpAddress, TimeTicks, ObjectIdentity, Bits, Unsigned32, MibIdentifier, ModuleIdentity, Gauge32, MibScalar, MibTable, MibTableRow, MibTableColumn, Counter64, enterprises = mibBuilder.importSymbols("SNMPv2-SMI", "Counter32", "NotificationType", "iso", "Integer32", "IpAddress", "TimeTicks", "ObjectIdentity", "Bits", "Unsigned32", "MibIdentifier", "ModuleIdentity", "Gauge32", "MibScalar", "MibTable", "MibTableRow", "MibTableColumn", "Counter64", "enterprises")
TextualConvention, DisplayString = mibBuilder.importSymbols("SNMPv2-TC", "TextualConvention", "DisplayString")
chkpntTrapMibModule = ModuleIdentity((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 0, 0))
chkpntTrapMibModule.setRevisions(('2013-12-26 13:09',))
if mibBuilder.loadTexts: chkpntTrapMibModule.setLastUpdated('201312261309Z')
if mibBuilder.loadTexts: chkpntTrapMibModule.setOrganization('Check Point')
checkpoint = MibIdentifier((1, 3, 6, 1, 4, 1, 2620))
products = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1))
chkpntTrap = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000))
chkpntTrapInfo = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 0))
chkpntTrapNet = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 1))
chkpntTrapDisk = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 2))
chkpntTrapCPU = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 3))
chkpntTrapMemory = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 4))
chkpntTrapHWSensor = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 5))
chkpntTrapHA = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 6))
chkpntTrapLSConn = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 7))
chkpntTrapOID = MibScalar((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 0, 10), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chkpntTrapOID.setStatus('current')
chkpntTrapOIDValue = MibScalar((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 0, 11), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chkpntTrapOIDValue.setStatus('current')
chkpntTrapMsgText = MibScalar((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 0, 12), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chkpntTrapMsgText.setStatus('current')
chkpntTrapSeverity = MibScalar((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 0, 13), Integer32().subtype(subtypeSpec=ValueRangeConstraint(1, 65535))).setMaxAccess("readonly")
if mibBuilder.loadTexts: chkpntTrapSeverity.setStatus('current')
chkpntTrapCategory = MibScalar((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 0, 14), DisplayString()).setMaxAccess("readonly")
if mibBuilder.loadTexts: chkpntTrapCategory.setStatus('current')
chkpntDiskSpaceTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 2, 1)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "multiDiskName"), ("CHECKPOINT-MIB", "multiDiskFreeAvailablePercent"))
if mibBuilder.loadTexts: chkpntDiskSpaceTrap.setStatus('current')
chkpntRAIDVolumeTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 2, 2)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "raidVolumeID"), ("CHECKPOINT-MIB", "raidVolumeState"))
if mibBuilder.loadTexts: chkpntRAIDVolumeTrap.setStatus('current')
chkpntRAIDDiskTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 2, 3)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "raidDiskVolumeID"), ("CHECKPOINT-MIB", "raidDiskID"), ("CHECKPOINT-MIB", "raidDiskState"))
if mibBuilder.loadTexts: chkpntRAIDDiskTrap.setStatus('current')
chkpntRAIDDiskFlagsTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 2, 4)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "raidDiskVolumeID"), ("CHECKPOINT-MIB", "raidDiskID"), ("CHECKPOINT-MIB", "raidDiskState"), ("CHECKPOINT-MIB", "raidDiskFlags"))
if mibBuilder.loadTexts: chkpntRAIDDiskFlagsTrap.setStatus('current')
chkpntTrapNetIfState = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 1, 1)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "svnNetIfName"), ("CHECKPOINT-MIB", "svnNetIfAddress"), ("CHECKPOINT-MIB", "svnNetIfState"))
if mibBuilder.loadTexts: chkpntTrapNetIfState.setStatus('current')
chkpntTrapNetIfUnplugged = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 1, 2)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "svnNetIfName"), ("CHECKPOINT-MIB", "svnNetIfAddress"))
if mibBuilder.loadTexts: chkpntTrapNetIfUnplugged.setStatus('current')
chkpntTrapNewConnRate = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 1, 3)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"))
if mibBuilder.loadTexts: chkpntTrapNewConnRate.setStatus('current')
chkpntTrapConcurrentConnRate = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 1, 4)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"))
if mibBuilder.loadTexts: chkpntTrapConcurrentConnRate.setStatus('current')
chkpntTrapBytesThroughput = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 1, 5)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"))
if mibBuilder.loadTexts: chkpntTrapBytesThroughput.setStatus('current')
chkpntTrapAcceptedPacketRate = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 1, 6)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"))
if mibBuilder.loadTexts: chkpntTrapAcceptedPacketRate.setStatus('current')
chkpntTrapNetIfOperState = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 1, 7)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "svnNetIfName"), ("CHECKPOINT-MIB", "svnNetIfAddress"), ("CHECKPOINT-MIB", "svnNetIfOperState"))
if mibBuilder.loadTexts: chkpntTrapNetIfOperState.setStatus('current')
chkpntCPUCoreUtilTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 3, 1)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "multiProcIndex"), ("CHECKPOINT-MIB", "multiProcUserTime"), ("CHECKPOINT-MIB", "multiProcSystemTime"), ("CHECKPOINT-MIB", "multiProcIdleTime"), ("CHECKPOINT-MIB", "multiProcUsage"), ("CHECKPOINT-MIB", "multiProcRunQueue"), ("CHECKPOINT-MIB", "multiProcInterrupts"))
if mibBuilder.loadTexts: chkpntCPUCoreUtilTrap.setStatus('current')
chkpntCPUCoreInterruptsTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 3, 2)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "multiProcIndex"), ("CHECKPOINT-MIB", "multiProcUserTime"), ("CHECKPOINT-MIB", "multiProcSystemTime"), ("CHECKPOINT-MIB", "multiProcIdleTime"), ("CHECKPOINT-MIB", "multiProcUsage"), ("CHECKPOINT-MIB", "multiProcRunQueue"), ("CHECKPOINT-MIB", "multiProcInterrupts"))
if mibBuilder.loadTexts: chkpntCPUCoreInterruptsTrap.setStatus('current')
chkpntSwapMemoryTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 4, 1)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "memTotalVirtual64"), ("CHECKPOINT-MIB", "memActiveVirtual64"))
if mibBuilder.loadTexts: chkpntSwapMemoryTrap.setStatus('current')
chkpntRealMemoryTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 4, 2)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "memTotalReal64"), ("CHECKPOINT-MIB", "memActiveReal64"))
if mibBuilder.loadTexts: chkpntRealMemoryTrap.setStatus('current')
chkpntTrapTempertureSensor = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 5, 1))
chkpntTrapFanSpeedSensor = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 5, 2))
chkpntTrapVoltageSensor = MibIdentifier((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 5, 3))
chkpntTempertureTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 5, 1, 1)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "tempertureSensorName"), ("CHECKPOINT-MIB", "tempertureSensorValue"), ("CHECKPOINT-MIB", "tempertureSensorUnit"), ("CHECKPOINT-MIB", "tempertureSensorType"), ("CHECKPOINT-MIB", "tempertureSensorStatus"))
if mibBuilder.loadTexts: chkpntTempertureTrap.setStatus('current')
chkpntFanSpeedTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 5, 2, 1)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "fanSpeedSensorName"), ("CHECKPOINT-MIB", "fanSpeedSensorValue"), ("CHECKPOINT-MIB", "fanSpeedSensorUnit"), ("CHECKPOINT-MIB", "fanSpeedSensorType"), ("CHECKPOINT-MIB", "fanSpeedSensorStatus"))
if mibBuilder.loadTexts: chkpntFanSpeedTrap.setStatus('current')
chkpntVoltageTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 5, 3, 1)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "voltageSensorName"), ("CHECKPOINT-MIB", "voltageSensorValue"), ("CHECKPOINT-MIB", "voltageSensorUnit"), ("CHECKPOINT-MIB", "voltageSensorType"), ("CHECKPOINT-MIB", "voltageSensorStatus"))
if mibBuilder.loadTexts: chkpntVoltageTrap.setStatus('current')
chkpntClusterMemberStateTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 6, 1)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "haIdentifier"), ("CHECKPOINT-MIB", "haState"))
if mibBuilder.loadTexts: chkpntClusterMemberStateTrap.setStatus('current')
chkpntClusterBlockStateTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 6, 2)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "haIdentifier"), ("CHECKPOINT-MIB", "haBlockState"), ("CHECKPOINT-MIB", "haState"))
if mibBuilder.loadTexts: chkpntClusterBlockStateTrap.setStatus('current')
chkpntClusterStateTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 6, 3)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "haIdentifier"), ("CHECKPOINT-MIB", "haBlockState"), ("CHECKPOINT-MIB", "haState"), ("CHECKPOINT-MIB", "haStatCode"), ("CHECKPOINT-MIB", "haStatShort"), ("CHECKPOINT-MIB", "haStatLong"))
if mibBuilder.loadTexts: chkpntClusterStateTrap.setStatus('current')
chkpntClusterProblemStateTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 6, 4)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "haProblemName"), ("CHECKPOINT-MIB", "haProblemStatus"), ("CHECKPOINT-MIB", "haProblemPriority"), ("CHECKPOINT-MIB", "haProblemVerified"), ("CHECKPOINT-MIB", "haProblemDescr"))
if mibBuilder.loadTexts: chkpntClusterProblemStateTrap.setStatus('current')
chkpntClusterInterfaceStateTrap = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 6, 5)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "haIfName"), ("CHECKPOINT-MIB", "haIP"), ("CHECKPOINT-MIB", "haStatus"), ("CHECKPOINT-MIB", "haTrusted"), ("CHECKPOINT-MIB", "haShared"))
if mibBuilder.loadTexts: chkpntClusterInterfaceStateTrap.setStatus('current')
chkpntTrapLSConnState = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 7, 1)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "fwLSConnName"), ("CHECKPOINT-MIB", "fwLSConnState"), ("CHECKPOINT-MIB", "fwLSConnStateDesc"), ("CHECKPOINT-MIB", "fwLocalLoggingDesc"), ("CHECKPOINT-MIB", "fwLocalLoggingStat"))
if mibBuilder.loadTexts: chkpntTrapLSConnState.setStatus('current')
chkpntTrapOverallLSConnState = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 7, 2)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "fwLSConnOverall"), ("CHECKPOINT-MIB", "fwLSConnOverallDesc"), ("CHECKPOINT-MIB", "fwLocalLoggingDesc"), ("CHECKPOINT-MIB", "fwLocalLoggingStat"))
if mibBuilder.loadTexts: chkpntTrapOverallLSConnState.setStatus('current')
chkpntTrapLocalLoggingState = NotificationType((1, 3, 6, 1, 4, 1, 2620, 1, 2000, 7, 3)).setObjects(("CHECKPOINT-TRAP-MIB", "chkpntTrapOID"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapOIDValue"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapMsgText"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapSeverity"), ("CHECKPOINT-TRAP-MIB", "chkpntTrapCategory"), ("CHECKPOINT-MIB", "fwLSConnOverall"), ("CHECKPOINT-MIB", "fwLSConnOverallDesc"), ("CHECKPOINT-MIB", "fwLocalLoggingDesc"), ("CHECKPOINT-MIB", "fwLocalLoggingStat"))
if mibBuilder.loadTexts: chkpntTrapLocalLoggingState.setStatus('current')
mibBuilder.exportSymbols("CHECKPOINT-TRAP-MIB", chkpntTrapBytesThroughput=chkpntTrapBytesThroughput, chkpntClusterBlockStateTrap=chkpntClusterBlockStateTrap, chkpntTrap=chkpntTrap, chkpntRAIDDiskTrap=chkpntRAIDDiskTrap, chkpntCPUCoreInterruptsTrap=chkpntCPUCoreInterruptsTrap, chkpntTempertureTrap=chkpntTempertureTrap, chkpntTrapConcurrentConnRate=chkpntTrapConcurrentConnRate, chkpntTrapNewConnRate=chkpntTrapNewConnRate, chkpntFanSpeedTrap=chkpntFanSpeedTrap, chkpntSwapMemoryTrap=chkpntSwapMemoryTrap, chkpntVoltageTrap=chkpntVoltageTrap, chkpntTrapFanSpeedSensor=chkpntTrapFanSpeedSensor, chkpntCPUCoreUtilTrap=chkpntCPUCoreUtilTrap, chkpntTrapMsgText=chkpntTrapMsgText, checkpoint=checkpoint, chkpntRealMemoryTrap=chkpntRealMemoryTrap, chkpntTrapOID=chkpntTrapOID, chkpntTrapSeverity=chkpntTrapSeverity, chkpntClusterStateTrap=chkpntClusterStateTrap, chkpntTrapOverallLSConnState=chkpntTrapOverallLSConnState, chkpntTrapTempertureSensor=chkpntTrapTempertureSensor, chkpntClusterProblemStateTrap=chkpntClusterProblemStateTrap, chkpntClusterInterfaceStateTrap=chkpntClusterInterfaceStateTrap, chkpntTrapHWSensor=chkpntTrapHWSensor, chkpntTrapCategory=chkpntTrapCategory, chkpntTrapLocalLoggingState=chkpntTrapLocalLoggingState, chkpntTrapLSConnState=chkpntTrapLSConnState, chkpntTrapLSConn=chkpntTrapLSConn, chkpntTrapMibModule=chkpntTrapMibModule, chkpntTrapMemory=chkpntTrapMemory, chkpntTrapNetIfUnplugged=chkpntTrapNetIfUnplugged, chkpntTrapCPU=chkpntTrapCPU, chkpntDiskSpaceTrap=chkpntDiskSpaceTrap, products=products, chkpntTrapNet=chkpntTrapNet, chkpntTrapAcceptedPacketRate=chkpntTrapAcceptedPacketRate, chkpntTrapNetIfOperState=chkpntTrapNetIfOperState, chkpntTrapNetIfState=chkpntTrapNetIfState, chkpntTrapOIDValue=chkpntTrapOIDValue, chkpntRAIDVolumeTrap=chkpntRAIDVolumeTrap, chkpntClusterMemberStateTrap=chkpntClusterMemberStateTrap, chkpntTrapInfo=chkpntTrapInfo, chkpntRAIDDiskFlagsTrap=chkpntRAIDDiskFlagsTrap, chkpntTrapHA=chkpntTrapHA, chkpntTrapVoltageSensor=chkpntTrapVoltageSensor, chkpntTrapDisk=chkpntTrapDisk, PYSNMP_MODULE_ID=chkpntTrapMibModule)
| 226.270833 | 2,269 | 0.768115 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 10,348 | 0.476383 |
7d4c5d1e663bf350e478bc71a505fc49721c08e6 | 452 | py | Python | assistant/tests/internetcheck.py | SPARC-Auburn/Lab-Assistant | f86577f4ea53297f3c9febb84d967650d7196e61 | [
"Apache-2.0"
] | 9 | 2017-09-06T13:23:32.000Z | 2020-07-19T17:05:23.000Z | assistant/tests/internetcheck.py | SPARC-Auburn/Lab-Assistant | f86577f4ea53297f3c9febb84d967650d7196e61 | [
"Apache-2.0"
] | 29 | 2017-09-06T21:50:08.000Z | 2017-12-07T00:37:57.000Z | assistant/tests/internetcheck.py | SPARC-Auburn/Lab-Assistant | f86577f4ea53297f3c9febb84d967650d7196e61 | [
"Apache-2.0"
] | 6 | 2016-11-20T01:01:55.000Z | 2019-10-16T16:29:33.000Z | import socket
def is_connected():
REMOTE_SERVER = "www.google.com"
try:
# see if we can resolve the host name -- tells us if there is
# a DNS listening
host = socket.gethostbyname(REMOTE_SERVER)
# connect to the host -- tells us if the host is actually
# reachable
s = socket.create_connection((host, 80), 2)
return True
except:
pass
return False
print is_connected()
| 22.6 | 69 | 0.615044 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 162 | 0.358407 |
7d4d805a56faeaf0887f5c53b13a814262347351 | 185,256 | py | Python | hrl_dynamic_mpc/src/dMdq_func.py | gt-ros-pkg/hrl-haptic-manip | 6458187075033ecd3a22fbcdc1a632df39b0cba1 | [
"Apache-2.0"
] | 1 | 2017-07-13T14:58:35.000Z | 2017-07-13T14:58:35.000Z | hrl_dynamic_mpc/src/dMdq_func.py | gt-ros-pkg/hrl-haptic-manip | 6458187075033ecd3a22fbcdc1a632df39b0cba1 | [
"Apache-2.0"
] | null | null | null | hrl_dynamic_mpc/src/dMdq_func.py | gt-ros-pkg/hrl-haptic-manip | 6458187075033ecd3a22fbcdc1a632df39b0cba1 | [
"Apache-2.0"
] | 2 | 2017-03-08T14:44:22.000Z | 2019-07-15T23:46:35.000Z | #
#
# Copyright (c) 2013, Georgia Tech Research Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Georgia Tech Research Corporation nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY GEORGIA TECH RESEARCH CORPORATION ''AS IS'' AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL GEORGIA TECH BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# \authors: Marc Killpack (Healthcare Robotics Lab, Georgia Tech.)
# \adviser: Charles Kemp (Healthcare Robotics Lab, Georgia Tech.)
# Generated from sympybotics library (https://github.com/cdsousa/sympybotics)
from math import sin, cos
def dMdq(parms, q, jt_num):
if jt_num == 1:
#
dMdq1_out = [0]*49
#
x0 = cos(q[1])
dx0 = -sin(q[1])
x1 = -x0
dx1 = -dx0
x2 = cos(q[2])
dx2 = 0
x3 = x1*x2
dx3 = dx1*x2 + dx2*x1
x4 = -sin(q[1])
dx4 = -cos(q[1])
x5 = -x4
dx5 = -dx4
x6 = 0.27857*x0 - 0.03175*x5
dx6 = 0.27857*dx0 - 0.03175*dx5
x7 = -x2
dx7 = -dx2
x8 = x6*x7
dx8 = dx6*x7 + dx7*x6
x9 = cos(q[3])
dx9 = 0
x10 = sin(q[2])
dx10 = 0
x11 = x1*x10
dx11 = dx1*x10 + dx10*x1
x12 = -x11
dx12 = -dx11
x13 = sin(q[3])
dx13 = 0
x14 = x12*x13 + x5*x9
dx14 = dx12*x13 + dx13*x12 + dx5*x9 + dx9*x5
x15 = -x3
dx15 = -dx3
x16 = -x15
dx16 = -dx15
x17 = -0.00502*x13*x15 + x8*x9
dx17 = -0.00502*dx13*x15 - 0.00502*dx15*x13 + dx8*x9 + dx9*x8
x18 = sin(q[4])
dx18 = 0
x19 = 0.27747*x16 + x17
dx19 = 0.27747*dx16 + dx17
x20 = cos(q[4])
dx20 = 0
x21 = x10*x6
dx21 = dx10*x6 + dx6*x10
x22 = -x21
dx22 = -dx21
x23 = x22 + 0.00502*x5
dx23 = dx22 + 0.00502*dx5
x24 = x11*x9 + x13*x5
dx24 = dx11*x9 + dx13*x5 + dx5*x13 + dx9*x11
x25 = x23 + 0.27747*x24
dx25 = dx23 + 0.27747*dx24
x26 = -x18*x19 - x20*x25
dx26 = -dx18*x19 - dx19*x18 - dx20*x25 - dx25*x20
x27 = x16*x18 + x20*x24
dx27 = dx16*x18 + dx18*x16 + dx20*x24 + dx24*x20
x28 = -x27
dx28 = -dx27
x29 = sin(q[5])
dx29 = 0
x30 = cos(q[5])
dx30 = 0
x31 = x14*x30 + x28*x29
dx31 = dx14*x30 + dx28*x29 + dx29*x28 + dx30*x14
x32 = -x26
dx32 = -dx26
x33 = -x14*x29 - x27*x30
dx33 = -dx14*x29 - dx27*x30 - dx29*x14 - dx30*x27
x34 = -x33
dx34 = -dx33
x35 = -x15*x20 - x18*x24
dx35 = -dx15*x20 - dx18*x24 - dx20*x15 - dx24*x18
x36 = -x35
dx36 = -dx35
x37 = sin(q[6])
dx37 = 0
x38 = cos(q[6])
dx38 = 0
x39 = -x31*x38 - x36*x37
dx39 = -dx31*x38 - dx36*x37 - dx37*x36 - dx38*x31
x40 = -x18
dx40 = -dx18
x41 = x19*x20 + x25*x40
dx41 = dx19*x20 + dx20*x19 + dx25*x40 + dx40*x25
x42 = -x41
dx42 = -dx41
x43 = -x13*x8 - 0.00502*x15*x9
dx43 = -dx13*x8 - 0.00502*dx15*x9 - dx8*x13 - 0.00502*dx9*x15
x44 = x29*x42 + x30*x43
dx44 = dx29*x42 + dx30*x43 + dx42*x29 + dx43*x30
x45 = -x44
dx45 = -dx44
x46 = x32*x38 + x37*x45
dx46 = dx32*x38 + dx37*x45 + dx38*x32 + dx45*x37
x47 = -parms[79]*x34 + parms[80]*x39 + parms[81]*x46
dx47 = -dx34*parms[79] + dx39*parms[80] + dx46*parms[81]
x48 = -x32*x37 - x38*x44
dx48 = -dx32*x37 - dx37*x32 - dx38*x44 - dx44*x38
x49 = -x31
dx49 = -dx31
x50 = x36*x38 + x37*x49
dx50 = dx36*x38 + dx37*x49 + dx38*x36 + dx49*x37
x51 = -parms[78]*x34 + parms[80]*x50 - parms[81]*x48
dx51 = -dx34*parms[78] - dx48*parms[81] + dx50*parms[80]
x52 = parms[54]*x14 + parms[56]*x28 + parms[57]*x26 - parms[66]*x34 - parms[67]*x31 - parms[69]*x32 - x37*x51 - x38*x47
dx52 = dx14*parms[54] + dx26*parms[57] + dx28*parms[56] - dx31*parms[67] - dx32*parms[69] - dx34*parms[66] - dx37*x51 - dx38*x47 - dx47*x38 - dx51*x37
x53 = -x14
dx53 = -dx14
x54 = -x29*x43 - x30*x41
dx54 = -dx29*x43 - dx30*x41 - dx41*x30 - dx43*x29
x55 = -x54
dx55 = -dx54
x56 = -parms[66]*x36 - parms[68]*x49 - parms[69]*x54 - parms[78]*x39 + parms[79]*x50 + parms[81]*x55
dx56 = -dx36*parms[66] - dx39*parms[78] - dx49*parms[68] + dx50*parms[79] - dx54*parms[69] + dx55*parms[81]
x57 = -x37
dx57 = -dx37
x58 = -parms[67]*x36 + parms[68]*x33 + parms[69]*x44 + x38*x51 + x47*x57
dx58 = dx33*parms[68] - dx36*parms[67] + dx38*x51 + dx44*parms[69] + dx47*x57 + dx51*x38 + dx57*x47
x59 = -x29
dx59 = -dx29
x60 = parms[55]*x53 + parms[56]*x35 + parms[57]*x41 + x30*x56 + x58*x59
dx60 = dx30*x56 + dx35*parms[56] + dx41*parms[57] + dx53*parms[55] + dx56*x30 + dx58*x59 + dx59*x58
x61 = x20*x60
dx61 = dx20*x60 + dx60*x20
x62 = parms[43]*x16 + parms[44]*x14 + parms[45]*x17 + x40*x52 + x61
dx62 = dx14*parms[44] + dx16*parms[43] + dx17*parms[45] + dx40*x52 + dx52*x40 + dx61
x63 = parms[42]*x15 - parms[44]*x24 + parms[45]*x43 + parms[54]*x36 + parms[55]*x27 + parms[57]*x43 + x29*x56 + x30*x58
dx63 = dx15*parms[42] - dx24*parms[44] + dx27*parms[55] + dx29*x56 + dx30*x58 + dx36*parms[54] + dx43*(parms[45] + parms[57]) + dx56*x29 + dx58*x30
x64 = -x13
dx64 = -dx13
x65 = -parms[31]*x5 + parms[32]*x3 + parms[33]*x8 + x62*x9 + x63*x64
dx65 = dx3*parms[32] - dx5*parms[31] + dx62*x9 + dx63*x64 + dx64*x63 + dx8*parms[33] + dx9*x62
x66 = x2*x65
dx66 = dx2*x65 + dx65*x2
x67 = -x43
dx67 = -dx43
x68 = -parms[78]
dx68 = 0
x69 = parms[73]*x50 + parms[75]*x39 + parms[76]*x34 + parms[80]*x46 + x55*x68
dx69 = dx34*parms[76] + dx39*parms[75] + dx46*parms[80] + dx50*parms[73] + dx55*x68 + dx68*x55
x70 = -parms[80]
dx70 = 0
x71 = parms[72]*x50 + parms[73]*x39 + parms[74]*x34 + parms[79]*x55 + x48*x70
dx71 = dx34*parms[74] + dx39*parms[73] + dx48*x70 + dx50*parms[72] + dx55*parms[79] + dx70*x48
x72 = parms[62]*x31 + parms[64]*x33 + parms[65]*x36 + parms[66]*x54 + parms[67]*x45 + x38*x71 + x57*x69
dx72 = dx31*parms[62] + dx33*parms[64] + dx36*parms[65] + dx38*x71 + dx45*parms[67] + dx54*parms[66] + dx57*x69 + dx69*x57 + dx71*x38
x73 = parms[49]*x27 + parms[51]*x35 + parms[52]*x14 + parms[54]*x67 + parms[56]*x41 - x72
dx73 = dx14*parms[52] + dx27*parms[49] + dx35*parms[51] + dx41*parms[56] + dx67*parms[54] - dx72
x74 = x20*x52
dx74 = dx20*x52 + dx52*x20
x75 = -0.27747*x18
dx75 = -0.27747*dx18
x76 = -x38
dx76 = -dx38
x77 = parms[60]*x31 + parms[61]*x33 + parms[62]*x36 + parms[67]*x32 + parms[68]*x55 + x57*x71 + x69*x76
dx77 = dx31*parms[60] + dx32*parms[67] + dx33*parms[61] + dx36*parms[62] + dx55*parms[68] + dx57*x71 + dx69*x76 + dx71*x57 + dx76*x69
x78 = -parms[66]
dx78 = 0
x79 = -parms[79]
dx79 = 0
x80 = parms[74]*x50 + parms[76]*x39 + parms[77]*x34 + parms[78]*x48 + x46*x79
dx80 = dx34*parms[77] + dx39*parms[76] + dx46*x79 + dx48*parms[78] + dx50*parms[74] + dx79*x46
x81 = parms[61]*x31 + parms[63]*x33 + parms[64]*x36 + parms[68]*x44 + x32*x78 - x80
dx81 = dx31*parms[61] + dx32*x78 + dx33*parms[63] + dx36*parms[64] + dx44*parms[68] + dx78*x32 - dx80
x82 = -x30
dx82 = -dx30
x83 = parms[48]*x27 + parms[49]*x35 + parms[50]*x14 + parms[55]*x43 + parms[56]*x32 + x59*x77 + x81*x82
dx83 = dx14*parms[50] + dx27*parms[48] + dx32*parms[56] + dx35*parms[49] + dx43*parms[55] + dx59*x77 + dx77*x59 + dx81*x82 + dx82*x81
x84 = parms[36]*x24 + parms[37]*x14 + parms[38]*x15 + parms[43]*x23 + parms[44]*x67 + x20*x83 + x40*x73 + x60*x75 - 0.27747*x74
dx84 = dx14*parms[37] + dx15*parms[38] + dx20*x83 + dx23*parms[43] + dx24*parms[36] + dx40*x73 + dx60*x75 + dx67*parms[44] + dx73*x40 - 0.27747*dx74 + dx75*x60 + dx83*x20
x85 = parms[50]*x27 + parms[52]*x35 + parms[53]*x14 + parms[54]*x26 + parms[55]*x42 + x30*x77 + x59*x81
dx85 = dx14*parms[53] + dx26*parms[54] + dx27*parms[50] + dx30*x77 + dx35*parms[52] + dx42*parms[55] + dx59*x81 + dx77*x30 + dx81*x59
x86 = -parms[42]
dx86 = 0
x87 = parms[37]*x24 + parms[39]*x14 + parms[40]*x15 + parms[44]*x17 + x23*x86 + x85
dx87 = dx14*parms[39] + dx15*parms[40] + dx17*parms[44] + dx23*x86 + dx24*parms[37] + dx85 + dx86*x23
x88 = parms[24]*x11 + parms[25]*x3 + parms[26]*x5 + parms[32]*x22 + x64*x87 + x84*x9
dx88 = dx11*parms[24] + dx22*parms[32] + dx3*parms[25] + dx5*parms[26] + dx64*x87 + dx84*x9 + dx87*x64 + dx9*x84
x89 = -x10
dx89 = -dx10
x90 = -x20
dx90 = -dx20
x91 = 0.27747*x18
dx91 = 0.27747*dx18
x92 = -parms[43]
dx92 = 0
x93 = parms[38]*x24 + parms[40]*x14 + parms[41]*x15 + parms[42]*x43 + x17*x92 + x40*x83 + x52*x91 - 0.27747*x61 + x73*x90
dx93 = dx14*parms[40] + dx15*parms[41] + dx17*x92 + dx24*parms[38] + dx40*x83 + dx43*parms[42] + dx52*x91 - 0.27747*dx61 + dx73*x90 + dx83*x40 + dx90*x73 + dx91*x52 + dx92*x17
x94 = x13*x62
dx94 = dx13*x62 + dx62*x13
x95 = x63*x9
dx95 = dx63*x9 + dx9*x63
x96 = parms[25]*x11 + parms[27]*x3 + parms[28]*x5 + parms[32]*x8 - x93 + 0.00502*x94 + 0.00502*x95
dx96 = dx11*parms[25] + dx3*parms[27] + dx5*parms[28] + dx8*parms[32] - dx93 + 0.00502*dx94 + 0.00502*dx95
x97 = parms[42]*x53 + parms[43]*x24 + parms[45]*x23 + x40*x60 - x74
dx97 = dx23*parms[45] + dx24*parms[43] + dx40*x60 + dx53*parms[42] + dx60*x40 - dx74
x98 = parms[30]*x5 + parms[32]*x12 + parms[33]*x21 - x97
dx98 = dx12*parms[32] + dx21*parms[33] + dx5*parms[30] - dx97
x99 = x10*x98
dx99 = dx10*x98 + dx98*x10
x100 = -parms[31]
dx100 = 0
x101 = parms[26]*x11 + parms[28]*x3 + parms[29]*x5 + parms[30]*x21 + x100*x8 + x13*x84 + x87*x9 + 0.00502*x97
dx101 = dx100*x8 + dx11*parms[26] + dx13*x84 + dx21*parms[30] + dx3*parms[28] + dx5*parms[29] + dx8*x100 + dx84*x13 + dx87*x9 + dx9*x87 + 0.00502*dx97
x102 = -0.27857*x2
dx102 = -0.27857*dx2
x103 = -0.27857*x10
dx103 = -0.27857*dx10
x104 = parms[14]*x0 + parms[16]*x4 - 0.03175*parms[30]*x15 - 0.03175*parms[31]*x11 + x102*x98 + x103*x65 + x2*x88 + x89*x96 - 0.03175*x94 - 0.03175*x95
dx104 = dx0*parms[14] + dx102*x98 + dx103*x65 - 0.03175*dx11*parms[31] - 0.03175*dx15*parms[30] + dx2*x88 + dx4*parms[16] + dx65*x103 + dx88*x2 + dx89*x96 - 0.03175*dx94 - 0.03175*dx95 + dx96*x89 + dx98*x102
x105 = -x89
dx105 = -dx89
x106 = 0.00502*x105 + 0.03175
dx106 = 0.00502*dx105
x107 = -x103*x13 - x106*x9
dx107 = -dx103*x13 - dx106*x9 - dx13*x103 - dx9*x106
x108 = x2*x9
dx108 = dx2*x9 + dx9*x2
x109 = -x105*x20 - x108*x18
dx109 = -dx105*x20 - dx108*x18 - dx18*x108 - dx20*x105
x110 = -x109
dx110 = -dx109
x111 = x105*x40 + x108*x20
dx111 = dx105*x40 + dx108*x20 + dx20*x108 + dx40*x105
x112 = -x105
dx112 = -dx105
x113 = x103*x9 + x106*x64
dx113 = dx103*x9 + dx106*x64 + dx64*x106 + dx9*x103
x114 = 0.27747*x112 + x113
dx114 = 0.27747*dx112 + dx113
x115 = -x102
dx115 = -dx102
x116 = 0.27747*x108 + x115
dx116 = 0.27747*dx108 + dx115
x117 = x114*x20 + x116*x40
dx117 = dx114*x20 + dx116*x40 + dx20*x114 + dx40*x116
x118 = x107*x30 + x117*x59
dx118 = dx107*x30 + dx117*x59 + dx30*x107 + dx59*x117
x119 = x2*x64
dx119 = dx2*x64 + dx64*x2
x120 = -x111*x30 - x119*x29
dx120 = -dx111*x30 - dx119*x29 - dx29*x119 - dx30*x111
x121 = -x120
dx121 = -dx120
x122 = -x114*x18 - x116*x20
dx122 = -dx114*x18 - dx116*x20 - dx18*x114 - dx20*x116
x123 = -x122
dx123 = -dx122
x124 = x118*x57 + x123*x38
dx124 = dx118*x57 + dx123*x38 + dx38*x123 + dx57*x118
x125 = x111*x59 + x119*x30
dx125 = dx111*x59 + dx119*x30 + dx30*x119 + dx59*x111
x126 = -x110*x37 - x125*x38
dx126 = -dx110*x37 - dx125*x38 - dx37*x110 - dx38*x125
x127 = -parms[79]*x121 + parms[80]*x126 + parms[81]*x124
dx127 = -dx121*parms[79] + dx124*parms[81] + dx126*parms[80]
x128 = x110*x38 + x125*x57
dx128 = dx110*x38 + dx125*x57 + dx38*x110 + dx57*x125
x129 = -x118*x38 - x123*x37
dx129 = -dx118*x38 - dx123*x37 - dx37*x123 - dx38*x118
x130 = parms[78]*x121 - parms[80]*x128 + parms[81]*x129
dx130 = dx121*parms[78] - dx128*parms[80] + dx129*parms[81]
x131 = -parms[67]*x110 + parms[68]*x120 + parms[69]*x118 + x127*x57 + x130*x76
dx131 = -dx110*parms[67] + dx118*parms[69] + dx120*parms[68] + dx127*x57 + dx130*x76 + dx57*x127 + dx76*x130
x132 = -x107*x29 - x117*x30
dx132 = -dx107*x29 - dx117*x30 - dx29*x107 - dx30*x117
x133 = -x132
dx133 = -dx132
x134 = parms[66]*x110 - parms[68]*x125 + parms[69]*x132 + parms[78]*x126 - parms[79]*x128 - parms[81]*x133
dx134 = dx110*parms[66] - dx125*parms[68] + dx126*parms[78] - dx128*parms[79] + dx132*parms[69] - dx133*parms[81]
x135 = parms[42]*x105 - parms[44]*x108 + parms[45]*x107 + parms[54]*x110 + parms[55]*x111 + parms[57]*x107 + x131*x30 + x134*x59
dx135 = dx105*parms[42] + dx107*(parms[45] + parms[57]) - dx108*parms[44] + dx110*parms[54] + dx111*parms[55] + dx131*x30 + dx134*x59 + dx30*x131 + dx59*x134
x136 = x135*x9
dx136 = dx135*x9 + dx9*x135
x137 = -x119
dx137 = -dx119
x138 = parms[55]*x137 + parms[56]*x109 + parms[57]*x117 + x131*x59 + x134*x82
dx138 = dx109*parms[56] + dx117*parms[57] + dx131*x59 + dx134*x82 + dx137*parms[55] + dx59*x131 + dx82*x134
x139 = x138*x20
dx139 = dx138*x20 + dx20*x138
x140 = parms[54]*x119 - parms[56]*x111 + parms[57]*x122 - parms[66]*x121 - parms[67]*x125 - parms[69]*x123 - x127*x38 - x130*x57
dx140 = -dx111*parms[56] + dx119*parms[54] - dx121*parms[66] + dx122*parms[57] - dx123*parms[69] - dx125*parms[67] - dx127*x38 - dx130*x57 - dx38*x127 - dx57*x130
x141 = parms[74]*x128 + parms[76]*x126 + parms[77]*x121 + parms[78]*x129 + x124*x79
dx141 = dx121*parms[77] + dx124*x79 + dx126*parms[76] + dx128*parms[74] + dx129*parms[78] + dx79*x124
x142 = parms[61]*x125 + parms[63]*x120 + parms[64]*x110 + parms[68]*x118 + x123*x78 - x141
dx142 = dx110*parms[64] + dx118*parms[68] + dx120*parms[63] + dx123*x78 + dx125*parms[61] - dx141 + dx78*x123
x143 = parms[72]*x128 + parms[73]*x126 + parms[74]*x121 + parms[79]*x133 + x129*x70
dx143 = dx121*parms[74] + dx126*parms[73] + dx128*parms[72] + dx129*x70 + dx133*parms[79] + dx70*x129
x144 = parms[73]*x128 + parms[75]*x126 + parms[76]*x121 + parms[80]*x124 + x133*x68
dx144 = dx121*parms[76] + dx124*parms[80] + dx126*parms[75] + dx128*parms[73] + dx133*x68 + dx68*x133
x145 = parms[60]*x125 + parms[61]*x120 + parms[62]*x110 + parms[67]*x123 + parms[68]*x133 + x143*x57 + x144*x76
dx145 = dx110*parms[62] + dx120*parms[61] + dx123*parms[67] + dx125*parms[60] + dx133*parms[68] + dx143*x57 + dx144*x76 + dx57*x143 + dx76*x144
x146 = parms[48]*x111 + parms[49]*x109 + parms[50]*x119 + parms[55]*x107 + parms[56]*x123 + x142*x82 + x145*x59
dx146 = dx107*parms[55] + dx109*parms[49] + dx111*parms[48] + dx119*parms[50] + dx123*parms[56] + dx142*x82 + dx145*x59 + dx59*x145 + dx82*x142
x147 = -x107
dx147 = -dx107
x148 = -parms[67]
dx148 = 0
x149 = parms[62]*x125 + parms[64]*x120 + parms[65]*x110 + parms[66]*x132 + x118*x148 + x143*x38 + x144*x57
dx149 = dx110*parms[65] + dx118*x148 + dx120*parms[64] + dx125*parms[62] + dx132*parms[66] + dx143*x38 + dx144*x57 + dx148*x118 + dx38*x143 + dx57*x144
x150 = parms[49]*x111 + parms[51]*x109 + parms[52]*x119 + parms[54]*x147 + parms[56]*x117 - x149
dx150 = dx109*parms[51] + dx111*parms[49] + dx117*parms[56] + dx119*parms[52] + dx147*parms[54] - dx149
x151 = parms[38]*x108 + parms[40]*x119 + parms[41]*x105 + parms[42]*x107 + x113*x92 - 0.27747*x139 + x140*x91 + x146*x40 + x150*x90
dx151 = dx105*parms[41] + dx107*parms[42] + dx108*parms[38] + dx113*x92 + dx119*parms[40] - 0.27747*dx139 + dx140*x91 + dx146*x40 + dx150*x90 + dx40*x146 + dx90*x150 + dx91*x140 + dx92*x113
x152 = parms[43]*x112 + parms[44]*x119 + parms[45]*x113 + x139 + x140*x40
dx152 = dx112*parms[43] + dx113*parms[45] + dx119*parms[44] + dx139 + dx140*x40 + dx40*x140
x153 = x13*x152
dx153 = dx13*x152 + dx152*x13
x154 = -0.27747*x20
dx154 = -0.27747*dx20
x155 = parms[36]*x108 + parms[37]*x119 + parms[38]*x105 + parms[43]*x115 + parms[44]*x147 + x138*x75 + x140*x154 + x146*x20 + x150*x40
dx155 = dx105*parms[38] + dx108*parms[36] + dx115*parms[43] + dx119*parms[37] + dx138*x75 + dx140*x154 + dx146*x20 + dx147*parms[44] + dx150*x40 + dx154*x140 + dx20*x146 + dx40*x150 + dx75*x138
x156 = -parms[55]
dx156 = 0
x157 = parms[50]*x111 + parms[52]*x109 + parms[53]*x119 + parms[54]*x122 + x117*x156 + x142*x59 + x145*x30
dx157 = dx109*parms[52] + dx111*parms[50] + dx117*x156 + dx119*parms[53] + dx122*parms[54] + dx142*x59 + dx145*x30 + dx156*x117 + dx30*x145 + dx59*x142
x158 = parms[37]*x108 + parms[39]*x119 + parms[40]*x105 + parms[44]*x113 + x115*x86 + x157
dx158 = dx105*parms[40] + dx108*parms[37] + dx113*parms[44] + dx115*x86 + dx119*parms[39] + dx157 + dx86*x115
x159 = parms[42]*x137 + parms[43]*x108 + parms[45]*x115 + x138*x40 + x140*x90
dx159 = dx108*parms[43] + dx115*parms[45] + dx137*parms[42] + dx138*x40 + dx140*x90 + dx40*x138 + dx90*x140
x160 = parms[26]*x2 + parms[28]*x89 + parms[30]*x102 + x100*x103 + x13*x155 + x158*x9 + 0.00502*x159
dx160 = dx100*x103 + dx102*parms[30] + dx103*x100 + dx13*x155 + dx155*x13 + dx158*x9 + 0.00502*dx159 + dx2*parms[26] + dx89*parms[28] + dx9*x158
x161 = -x9
dx161 = -dx9
x162 = x13*x20
dx162 = dx13*x20 + dx20*x13
x163 = x162*x59 + x30*x9
dx163 = dx162*x59 + dx30*x9 + dx59*x162 + dx9*x30
x164 = x13*x40
dx164 = dx13*x40 + dx40*x13
x165 = -x164
dx165 = -dx164
x166 = -x163*x38 - x165*x37
dx166 = -dx163*x38 - dx165*x37 - dx37*x165 - dx38*x163
x167 = x163*x57 + x165*x38
dx167 = dx163*x57 + dx165*x38 + dx38*x165 + dx57*x163
x168 = 0.27747*x13 + 0.00502
dx168 = 0.27747*dx13
x169 = x168*x40
dx169 = dx168*x40 + dx40*x168
x170 = x169*x82
dx170 = dx169*x82 + dx82*x169
x171 = -x170
dx171 = -dx170
x172 = x169*x59
dx172 = dx169*x59 + dx59*x169
x173 = -x162*x30 - x29*x9
dx173 = -dx162*x30 - dx29*x9 - dx30*x162 - dx9*x29
x174 = -x173
dx174 = -dx173
x175 = x168*x90
dx175 = dx168*x90 + dx90*x168
x176 = -x175
dx176 = -dx175
x177 = x172*x57 + x176*x38
dx177 = dx172*x57 + dx176*x38 + dx38*x176 + dx57*x172
x178 = -parms[79]*x174 + parms[80]*x166 + parms[81]*x177
dx178 = dx166*parms[80] - dx174*parms[79] + dx177*parms[81]
x179 = -x172*x38 - x176*x37
dx179 = -dx172*x38 - dx176*x37 - dx37*x176 - dx38*x172
x180 = parms[78]*x174 - parms[80]*x167 + parms[81]*x179
dx180 = -dx167*parms[80] + dx174*parms[78] + dx179*parms[81]
x181 = parms[55]*x161 + parms[56]*x164 + parms[57]*x169 + x59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + x82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
dx181 = dx161*parms[55] - dx163*parms[68]*x82 + dx164*parms[56] + dx165*(parms[66]*x82 - parms[67]*x59) + dx166*parms[78]*x82 - dx167*parms[79]*x82 + dx169*parms[57] + dx170*parms[69]*x82 - dx171*parms[81]*x82 + dx172*parms[69]*x59 + dx173*parms[68]*x59 + dx178*x57*x59 + dx180*x59*x76 + dx57*x178*x59 + dx59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + dx76*x180*x59 + dx82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
x182 = parms[54]*x9 - parms[56]*x162 + parms[57]*x175 - parms[66]*x174 - parms[67]*x163 - parms[69]*x176 - x178*x38 - x180*x57
dx182 = -dx162*parms[56] - dx163*parms[67] - dx174*parms[66] + dx175*parms[57] - dx176*parms[69] - dx178*x38 - dx180*x57 - dx38*x178 - dx57*x180 + dx9*parms[54]
x183 = parms[74]*x167 + parms[76]*x166 + parms[77]*x174 + parms[78]*x179 + x177*x79
dx183 = dx166*parms[76] + dx167*parms[74] + dx174*parms[77] + dx177*x79 + dx179*parms[78] + dx79*x177
x184 = parms[61]*x163 + parms[63]*x173 + parms[64]*x165 + parms[68]*x172 + x176*x78 - x183
dx184 = dx163*parms[61] + dx165*parms[64] + dx172*parms[68] + dx173*parms[63] + dx176*x78 - dx183 + dx78*x176
x185 = parms[73]*x167 + parms[75]*x166 + parms[76]*x174 + parms[80]*x177 + x171*x68
dx185 = dx166*parms[75] + dx167*parms[73] + dx171*x68 + dx174*parms[76] + dx177*parms[80] + dx68*x171
x186 = parms[72]*x167 + parms[73]*x166 + parms[74]*x174 + parms[79]*x171 + x179*x70
dx186 = dx166*parms[73] + dx167*parms[72] + dx171*parms[79] + dx174*parms[74] + dx179*x70 + dx70*x179
x187 = parms[60]*x163 + parms[61]*x173 + parms[62]*x165 + parms[67]*x176 + parms[68]*x171 + x185*x76 + x186*x57
dx187 = dx163*parms[60] + dx165*parms[62] + dx171*parms[68] + dx173*parms[61] + dx176*parms[67] + dx185*x76 + dx186*x57 + dx57*x186 + dx76*x185
x188 = parms[50]*x162 + parms[52]*x164 + parms[53]*x9 + parms[54]*x175 + x156*x169 + x184*x59 + x187*x30
dx188 = dx156*x169 + dx162*parms[50] + dx164*parms[52] + dx169*x156 + dx175*parms[54] + dx184*x59 + dx187*x30 + dx30*x187 + dx59*x184 + dx9*parms[53]
x189 = parms[48]*x162 + parms[49]*x164 + parms[50]*x9 + parms[56]*x176 + x184*x82 + x187*x59
dx189 = dx162*parms[48] + dx164*parms[49] + dx176*parms[56] + dx184*x82 + dx187*x59 + dx59*x187 + dx82*x184 + dx9*parms[50]
x190 = parms[62]*x163 + parms[64]*x173 + parms[65]*x165 + parms[66]*x170 + x148*x172 + x185*x57 + x186*x38
dx190 = dx148*x172 + dx163*parms[62] + dx165*parms[65] + dx170*parms[66] + dx172*x148 + dx173*parms[64] + dx185*x57 + dx186*x38 + dx38*x186 + dx57*x185
x191 = parms[49]*x162 + parms[51]*x164 + parms[52]*x9 + parms[56]*x169 - x190
dx191 = dx162*parms[49] + dx164*parms[51] + dx169*parms[56] - dx190 + dx9*parms[52]
x192 = parms[38]*x13 + parms[40]*x9 - 0.27747*x181*x20 + x182*x91 + x189*x40 + x191*x90
dx192 = dx13*parms[38] - 0.27747*dx181*x20 + dx182*x91 + dx189*x40 + dx191*x90 - 0.27747*dx20*x181 + dx40*x189 + dx9*parms[40] + dx90*x191 + dx91*x182
x193 = x154*x82
dx193 = dx154*x82 + dx82*x154
x194 = -x193
dx194 = -dx193
x195 = x40*x82
dx195 = dx40*x82 + dx82*x40
x196 = -x195
dx196 = -dx195
x197 = x40*x59
dx197 = dx40*x59 + dx59*x40
x198 = -x90
dx198 = -dx90
x199 = x197*x57 + x198*x38
dx199 = dx197*x57 + dx198*x38 + dx38*x198 + dx57*x197
x200 = x154*x59
dx200 = dx154*x59 + dx59*x154
x201 = -x91
dx201 = -dx91
x202 = -x200*x38 - x201*x37
dx202 = -dx200*x38 - dx201*x37 - dx37*x201 - dx38*x200
x203 = -x197*x38 - x198*x37
dx203 = -dx197*x38 - dx198*x37 - dx37*x198 - dx38*x197
x204 = parms[72]*x199 + parms[73]*x203 + parms[74]*x196 + parms[79]*x194 + x202*x70
dx204 = dx194*parms[79] + dx196*parms[74] + dx199*parms[72] + dx202*x70 + dx203*parms[73] + dx70*x202
x205 = x200*x57 + x201*x38
dx205 = dx200*x57 + dx201*x38 + dx38*x201 + dx57*x200
x206 = parms[73]*x199 + parms[75]*x203 + parms[76]*x196 + parms[80]*x205 + x194*x68
dx206 = dx194*x68 + dx196*parms[76] + dx199*parms[73] + dx203*parms[75] + dx205*parms[80] + dx68*x194
x207 = parms[62]*x197 + parms[64]*x195 + parms[65]*x198 + parms[66]*x193 + x148*x200 + x204*x38 + x206*x57
dx207 = dx148*x200 + dx193*parms[66] + dx195*parms[64] + dx197*parms[62] + dx198*parms[65] + dx200*x148 + dx204*x38 + dx206*x57 + dx38*x204 + dx57*x206
x208 = parms[78]*x196 - parms[80]*x199 + parms[81]*x202
dx208 = dx196*parms[78] - dx199*parms[80] + dx202*parms[81]
x209 = -parms[79]*x196 + parms[80]*x203 + parms[81]*x205
dx209 = -dx196*parms[79] + dx203*parms[80] + dx205*parms[81]
x210 = parms[60]*x197 + parms[61]*x195 + parms[62]*x198 + parms[67]*x201 + parms[68]*x194 + x204*x57 + x206*x76
dx210 = dx194*parms[68] + dx195*parms[61] + dx197*parms[60] + dx198*parms[62] + dx201*parms[67] + dx204*x57 + dx206*x76 + dx57*x204 + dx76*x206
x211 = parms[74]*x199 + parms[76]*x203 + parms[77]*x196 + parms[78]*x202 + x205*x79
dx211 = dx196*parms[77] + dx199*parms[74] + dx202*parms[78] + dx203*parms[76] + dx205*x79 + dx79*x205
x212 = parms[61]*x197 + parms[63]*x195 + parms[64]*x198 + parms[68]*x200 + x201*x78 - x211
dx212 = dx195*parms[63] + dx197*parms[61] + dx198*parms[64] + dx200*parms[68] + dx201*x78 - dx211 + dx78*x201
x213 = parms[50]*x40 + parms[52]*x90 + parms[54]*x91 + x154*x156 + x210*x30 + x212*x59
dx213 = dx154*x156 + dx156*x154 + dx210*x30 + dx212*x59 + dx30*x210 + dx40*parms[50] + dx59*x212 + dx90*parms[52] + dx91*parms[54]
x214 = -x59
dx214 = -dx59
x215 = x30*x76
dx215 = dx30*x76 + dx76*x30
x216 = x30*x57
dx216 = dx30*x57 + dx57*x30
x217 = parms[72]*x216 + parms[73]*x215 + parms[74]*x214
dx217 = dx214*parms[74] + dx215*parms[73] + dx216*parms[72]
x218 = parms[73]*x216 + parms[75]*x215 + parms[76]*x214
dx218 = dx214*parms[76] + dx215*parms[75] + dx216*parms[73]
x219 = parms[74]*x216 + parms[76]*x215 + parms[77]*x214
dx219 = dx214*parms[77] + dx215*parms[76] + dx216*parms[74]
x220 = parms[62]*x30 + parms[64]*x59 + x217*x38 + x218*x57
dx220 = dx217*x38 + dx218*x57 + dx30*parms[62] + dx38*x217 + dx57*x218 + dx59*parms[64]
x221 = parms[74]*x38 + parms[76]*x57
dx221 = dx38*parms[74] + dx57*parms[76]
#
dMdq1_out[0] = dx0*(2*parms[12]*x0 + 2*parms[13]*x4 - 0.27857*x66 + x7*x96 + x88*x89 + 0.27857*x99) - dx101*x4 + dx4*(2*parms[13]*x0 + 2*parms[15]*x4 - x101 - 0.03175*x66 + 0.03175*x99) + dx66*(-0.27857*x0 - 0.03175*x4) + dx7*x0*x96 + dx88*x0*x89 + dx89*x0*x88 + dx96*x0*x7 + dx99*(0.27857*x0 + 0.03175*x4)
dMdq1_out[1] = dx104
dMdq1_out[2] = dx101
dMdq1_out[3] = dx93
dMdq1_out[4] = dx85
dMdq1_out[5] = dx72
dMdq1_out[6] = dx80
dMdq1_out[7] = dx104
dMdq1_out[8] = dx102*(parms[32]*x7 + 2*parms[33]*x102 - x159) + dx103*(2*parms[32]*x89 + 2*parms[33]*x103 + x135*x64 + x152*x9) - 0.03175*dx105*parms[30] + dx115*parms[32]*x2 + dx135*x103*x64 + dx136*(0.00502*x89 - 0.03175) - dx151*x89 + dx152*x103*x9 + dx153*(0.00502*x89 - 0.03175) + dx155*x2*x9 + dx158*x2*x64 - dx159*x102 + dx2*(2*parms[24]*x2 + 2*parms[25]*x89 - 0.0635*parms[31] + parms[32]*x115 + x155*x9 + x158*x64) + dx64*(x103*x135 + x158*x2) + dx7*parms[32]*x102 + dx89*(2*parms[25]*x2 + 2*parms[27]*x89 + 0.03175*parms[30] + 2*parms[32]*x103 + 0.00502*x136 - x151 + 0.00502*x153) + dx9*(x103*x152 + x155*x2)
dMdq1_out[9] = dx160
dMdq1_out[10] = dx151
dMdq1_out[11] = dx157
dMdq1_out[12] = dx149
dMdq1_out[13] = dx141
dMdq1_out[14] = dx101
dMdq1_out[15] = dx160
dMdq1_out[16] = dx13*(2*parms[36]*x13 + 2*parms[37]*x9 + 0.01004*parms[43] + x154*x182 + x181*x75 + x189*x20 + x191*x40) + dx154*x13*x182 + 0.00502*dx161*parms[42] + dx181*(x13*x75 + 0.00502*x40) + dx182*(x13*x154 + 0.00502*x90) + dx188*x9 + dx189*x13*x20 + dx191*x13*x40 + dx20*x13*x189 + dx40*(x13*x191 + 0.00502*x181) + dx75*x13*x181 + dx9*(2*parms[37]*x13 + 2*parms[39]*x9 - 0.00502*parms[42] + x188) + 0.00502*dx90*x182
dMdq1_out[17] = dx192
dMdq1_out[18] = dx188
dMdq1_out[19] = dx190
dMdq1_out[20] = dx183
dMdq1_out[21] = dx93
dMdq1_out[22] = dx151
dMdq1_out[23] = dx192
dMdq1_out[24] = dx154*(2*parms[56]*x90 + 2*parms[57]*x154 + x59*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x82*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194)) + dx193*parms[69]*x154*x82 - dx194*parms[81]*x154*x82 + dx195*parms[68]*x154*x59 - dx196*parms[66]*x91 + dx197*(-parms[67]*x91 - parms[68]*x154*x82) + dx198*x154*(parms[66]*x82 - parms[67]*x59) - dx199*parms[79]*x154*x82 + dx200*parms[69]*x154*x59 + dx201*(parms[56]*x40 - parms[69]*x91) + dx203*parms[78]*x154*x82 - dx207*x90 + dx208*(x154*x59*x76 - x57*x91) + dx209*(x154*x57*x59 - x38*x91) + dx210*x40*x59 + dx212*x40*x82 - dx38*x209*x91 + dx40*(2*parms[48]*x40 + 2*parms[49]*x90 + parms[56]*x201 - parms[56]*x91 + x210*x59 + x212*x82) + dx57*(x154*x209*x59 - x208*x91) + dx59*(x154*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x210*x40) + dx76*x154*x208*x59 + dx82*(x154*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194) + x212*x40) + dx90*(2*parms[49]*x40 + 2*parms[51]*x90 + 2*parms[56]*x154 - x207) + dx91*(-parms[56]*x40 + 2*parms[57]*x91 - parms[66]*x196 - parms[67]*x197 - parms[69]*x201 - x208*x57 - x209*x38)
dMdq1_out[25] = dx213
dMdq1_out[26] = dx207
dMdq1_out[27] = dx211
dMdq1_out[28] = dx85
dMdq1_out[29] = dx157
dMdq1_out[30] = dx188
dMdq1_out[31] = dx213
dMdq1_out[32] = dx217*x30*x57 + dx218*x30*x76 - dx219*x59 + dx30*(2*parms[60]*x30 + 2*parms[61]*x59 + x217*x57 + x218*x76) + dx57*x217*x30 + dx59*(2*parms[61]*x30 + 2*parms[63]*x59 - x219) + dx76*x218*x30
dMdq1_out[33] = dx220
dMdq1_out[34] = dx219
dMdq1_out[35] = dx72
dMdq1_out[36] = dx149
dMdq1_out[37] = dx190
dMdq1_out[38] = dx207
dMdq1_out[39] = dx220
dMdq1_out[40] = dx38*(2*parms[72]*x38 + 2*parms[73]*x57) + dx57*(2*parms[73]*x38 + 2*parms[75]*x57)
dMdq1_out[41] = dx221
dMdq1_out[42] = dx80
dMdq1_out[43] = dx141
dMdq1_out[44] = dx183
dMdq1_out[45] = dx211
dMdq1_out[46] = dx219
dMdq1_out[47] = dx221
dMdq1_out[48] = 0
#
return dMdq1_out
if jt_num == 2:
#
dMdq2_out = [0]*49
#
x0 = cos(q[1])
dx0 = 0
x1 = -x0
dx1 = -dx0
x2 = cos(q[2])
dx2 = -sin(q[2])
x3 = x1*x2
dx3 = dx1*x2 + dx2*x1
x4 = -sin(q[1])
dx4 = 0
x5 = -x4
dx5 = -dx4
x6 = 0.27857*x0 - 0.03175*x5
dx6 = 0.27857*dx0 - 0.03175*dx5
x7 = -x2
dx7 = -dx2
x8 = x6*x7
dx8 = dx6*x7 + dx7*x6
x9 = cos(q[3])
dx9 = 0
x10 = sin(q[2])
dx10 = cos(q[2])
x11 = x1*x10
dx11 = dx1*x10 + dx10*x1
x12 = -x11
dx12 = -dx11
x13 = sin(q[3])
dx13 = 0
x14 = x12*x13 + x5*x9
dx14 = dx12*x13 + dx13*x12 + dx5*x9 + dx9*x5
x15 = -x3
dx15 = -dx3
x16 = -x15
dx16 = -dx15
x17 = -0.00502*x13*x15 + x8*x9
dx17 = -0.00502*dx13*x15 - 0.00502*dx15*x13 + dx8*x9 + dx9*x8
x18 = sin(q[4])
dx18 = 0
x19 = 0.27747*x16 + x17
dx19 = 0.27747*dx16 + dx17
x20 = cos(q[4])
dx20 = 0
x21 = x10*x6
dx21 = dx10*x6 + dx6*x10
x22 = -x21
dx22 = -dx21
x23 = x22 + 0.00502*x5
dx23 = dx22 + 0.00502*dx5
x24 = x11*x9 + x13*x5
dx24 = dx11*x9 + dx13*x5 + dx5*x13 + dx9*x11
x25 = x23 + 0.27747*x24
dx25 = dx23 + 0.27747*dx24
x26 = -x18*x19 - x20*x25
dx26 = -dx18*x19 - dx19*x18 - dx20*x25 - dx25*x20
x27 = x16*x18 + x20*x24
dx27 = dx16*x18 + dx18*x16 + dx20*x24 + dx24*x20
x28 = -x27
dx28 = -dx27
x29 = sin(q[5])
dx29 = 0
x30 = cos(q[5])
dx30 = 0
x31 = x14*x30 + x28*x29
dx31 = dx14*x30 + dx28*x29 + dx29*x28 + dx30*x14
x32 = -x26
dx32 = -dx26
x33 = -x14*x29 - x27*x30
dx33 = -dx14*x29 - dx27*x30 - dx29*x14 - dx30*x27
x34 = -x33
dx34 = -dx33
x35 = -x15*x20 - x18*x24
dx35 = -dx15*x20 - dx18*x24 - dx20*x15 - dx24*x18
x36 = -x35
dx36 = -dx35
x37 = sin(q[6])
dx37 = 0
x38 = cos(q[6])
dx38 = 0
x39 = -x31*x38 - x36*x37
dx39 = -dx31*x38 - dx36*x37 - dx37*x36 - dx38*x31
x40 = -x18
dx40 = -dx18
x41 = x19*x20 + x25*x40
dx41 = dx19*x20 + dx20*x19 + dx25*x40 + dx40*x25
x42 = -x41
dx42 = -dx41
x43 = -x13*x8 - 0.00502*x15*x9
dx43 = -dx13*x8 - 0.00502*dx15*x9 - dx8*x13 - 0.00502*dx9*x15
x44 = x29*x42 + x30*x43
dx44 = dx29*x42 + dx30*x43 + dx42*x29 + dx43*x30
x45 = -x44
dx45 = -dx44
x46 = x32*x38 + x37*x45
dx46 = dx32*x38 + dx37*x45 + dx38*x32 + dx45*x37
x47 = -parms[79]*x34 + parms[80]*x39 + parms[81]*x46
dx47 = -dx34*parms[79] + dx39*parms[80] + dx46*parms[81]
x48 = -x32*x37 - x38*x44
dx48 = -dx32*x37 - dx37*x32 - dx38*x44 - dx44*x38
x49 = -x31
dx49 = -dx31
x50 = x36*x38 + x37*x49
dx50 = dx36*x38 + dx37*x49 + dx38*x36 + dx49*x37
x51 = -parms[78]*x34 + parms[80]*x50 - parms[81]*x48
dx51 = -dx34*parms[78] - dx48*parms[81] + dx50*parms[80]
x52 = parms[54]*x14 + parms[56]*x28 + parms[57]*x26 - parms[66]*x34 - parms[67]*x31 - parms[69]*x32 - x37*x51 - x38*x47
dx52 = dx14*parms[54] + dx26*parms[57] + dx28*parms[56] - dx31*parms[67] - dx32*parms[69] - dx34*parms[66] - dx37*x51 - dx38*x47 - dx47*x38 - dx51*x37
x53 = -x14
dx53 = -dx14
x54 = -x29*x43 - x30*x41
dx54 = -dx29*x43 - dx30*x41 - dx41*x30 - dx43*x29
x55 = -x54
dx55 = -dx54
x56 = -parms[66]*x36 - parms[68]*x49 - parms[69]*x54 - parms[78]*x39 + parms[79]*x50 + parms[81]*x55
dx56 = -dx36*parms[66] - dx39*parms[78] - dx49*parms[68] + dx50*parms[79] - dx54*parms[69] + dx55*parms[81]
x57 = -x37
dx57 = -dx37
x58 = -parms[67]*x36 + parms[68]*x33 + parms[69]*x44 + x38*x51 + x47*x57
dx58 = dx33*parms[68] - dx36*parms[67] + dx38*x51 + dx44*parms[69] + dx47*x57 + dx51*x38 + dx57*x47
x59 = -x29
dx59 = -dx29
x60 = parms[55]*x53 + parms[56]*x35 + parms[57]*x41 + x30*x56 + x58*x59
dx60 = dx30*x56 + dx35*parms[56] + dx41*parms[57] + dx53*parms[55] + dx56*x30 + dx58*x59 + dx59*x58
x61 = x20*x60
dx61 = dx20*x60 + dx60*x20
x62 = parms[43]*x16 + parms[44]*x14 + parms[45]*x17 + x40*x52 + x61
dx62 = dx14*parms[44] + dx16*parms[43] + dx17*parms[45] + dx40*x52 + dx52*x40 + dx61
x63 = parms[42]*x15 - parms[44]*x24 + parms[45]*x43 + parms[54]*x36 + parms[55]*x27 + parms[57]*x43 + x29*x56 + x30*x58
dx63 = dx15*parms[42] - dx24*parms[44] + dx27*parms[55] + dx29*x56 + dx30*x58 + dx36*parms[54] + dx43*(parms[45] + parms[57]) + dx56*x29 + dx58*x30
x64 = -x13
dx64 = -dx13
x65 = -parms[31]*x5 + parms[32]*x3 + parms[33]*x8 + x62*x9 + x63*x64
dx65 = dx3*parms[32] - dx5*parms[31] + dx62*x9 + dx63*x64 + dx64*x63 + dx8*parms[33] + dx9*x62
x66 = x2*x65
dx66 = dx2*x65 + dx65*x2
x67 = -x43
dx67 = -dx43
x68 = -parms[78]
dx68 = 0
x69 = parms[73]*x50 + parms[75]*x39 + parms[76]*x34 + parms[80]*x46 + x55*x68
dx69 = dx34*parms[76] + dx39*parms[75] + dx46*parms[80] + dx50*parms[73] + dx55*x68 + dx68*x55
x70 = -parms[80]
dx70 = 0
x71 = parms[72]*x50 + parms[73]*x39 + parms[74]*x34 + parms[79]*x55 + x48*x70
dx71 = dx34*parms[74] + dx39*parms[73] + dx48*x70 + dx50*parms[72] + dx55*parms[79] + dx70*x48
x72 = parms[62]*x31 + parms[64]*x33 + parms[65]*x36 + parms[66]*x54 + parms[67]*x45 + x38*x71 + x57*x69
dx72 = dx31*parms[62] + dx33*parms[64] + dx36*parms[65] + dx38*x71 + dx45*parms[67] + dx54*parms[66] + dx57*x69 + dx69*x57 + dx71*x38
x73 = parms[49]*x27 + parms[51]*x35 + parms[52]*x14 + parms[54]*x67 + parms[56]*x41 - x72
dx73 = dx14*parms[52] + dx27*parms[49] + dx35*parms[51] + dx41*parms[56] + dx67*parms[54] - dx72
x74 = x20*x52
dx74 = dx20*x52 + dx52*x20
x75 = -0.27747*x18
dx75 = -0.27747*dx18
x76 = -x38
dx76 = -dx38
x77 = parms[60]*x31 + parms[61]*x33 + parms[62]*x36 + parms[67]*x32 + parms[68]*x55 + x57*x71 + x69*x76
dx77 = dx31*parms[60] + dx32*parms[67] + dx33*parms[61] + dx36*parms[62] + dx55*parms[68] + dx57*x71 + dx69*x76 + dx71*x57 + dx76*x69
x78 = -parms[66]
dx78 = 0
x79 = -parms[79]
dx79 = 0
x80 = parms[74]*x50 + parms[76]*x39 + parms[77]*x34 + parms[78]*x48 + x46*x79
dx80 = dx34*parms[77] + dx39*parms[76] + dx46*x79 + dx48*parms[78] + dx50*parms[74] + dx79*x46
x81 = parms[61]*x31 + parms[63]*x33 + parms[64]*x36 + parms[68]*x44 + x32*x78 - x80
dx81 = dx31*parms[61] + dx32*x78 + dx33*parms[63] + dx36*parms[64] + dx44*parms[68] + dx78*x32 - dx80
x82 = -x30
dx82 = -dx30
x83 = parms[48]*x27 + parms[49]*x35 + parms[50]*x14 + parms[55]*x43 + parms[56]*x32 + x59*x77 + x81*x82
dx83 = dx14*parms[50] + dx27*parms[48] + dx32*parms[56] + dx35*parms[49] + dx43*parms[55] + dx59*x77 + dx77*x59 + dx81*x82 + dx82*x81
x84 = parms[36]*x24 + parms[37]*x14 + parms[38]*x15 + parms[43]*x23 + parms[44]*x67 + x20*x83 + x40*x73 + x60*x75 - 0.27747*x74
dx84 = dx14*parms[37] + dx15*parms[38] + dx20*x83 + dx23*parms[43] + dx24*parms[36] + dx40*x73 + dx60*x75 + dx67*parms[44] + dx73*x40 - 0.27747*dx74 + dx75*x60 + dx83*x20
x85 = parms[50]*x27 + parms[52]*x35 + parms[53]*x14 + parms[54]*x26 + parms[55]*x42 + x30*x77 + x59*x81
dx85 = dx14*parms[53] + dx26*parms[54] + dx27*parms[50] + dx30*x77 + dx35*parms[52] + dx42*parms[55] + dx59*x81 + dx77*x30 + dx81*x59
x86 = -parms[42]
dx86 = 0
x87 = parms[37]*x24 + parms[39]*x14 + parms[40]*x15 + parms[44]*x17 + x23*x86 + x85
dx87 = dx14*parms[39] + dx15*parms[40] + dx17*parms[44] + dx23*x86 + dx24*parms[37] + dx85 + dx86*x23
x88 = parms[24]*x11 + parms[25]*x3 + parms[26]*x5 + parms[32]*x22 + x64*x87 + x84*x9
dx88 = dx11*parms[24] + dx22*parms[32] + dx3*parms[25] + dx5*parms[26] + dx64*x87 + dx84*x9 + dx87*x64 + dx9*x84
x89 = -x10
dx89 = -dx10
x90 = -x20
dx90 = -dx20
x91 = 0.27747*x18
dx91 = 0.27747*dx18
x92 = -parms[43]
dx92 = 0
x93 = parms[38]*x24 + parms[40]*x14 + parms[41]*x15 + parms[42]*x43 + x17*x92 + x40*x83 + x52*x91 - 0.27747*x61 + x73*x90
dx93 = dx14*parms[40] + dx15*parms[41] + dx17*x92 + dx24*parms[38] + dx40*x83 + dx43*parms[42] + dx52*x91 - 0.27747*dx61 + dx73*x90 + dx83*x40 + dx90*x73 + dx91*x52 + dx92*x17
x94 = x13*x62
dx94 = dx13*x62 + dx62*x13
x95 = x63*x9
dx95 = dx63*x9 + dx9*x63
x96 = parms[25]*x11 + parms[27]*x3 + parms[28]*x5 + parms[32]*x8 - x93 + 0.00502*x94 + 0.00502*x95
dx96 = dx11*parms[25] + dx3*parms[27] + dx5*parms[28] + dx8*parms[32] - dx93 + 0.00502*dx94 + 0.00502*dx95
x97 = parms[42]*x53 + parms[43]*x24 + parms[45]*x23 + x40*x60 - x74
dx97 = dx23*parms[45] + dx24*parms[43] + dx40*x60 + dx53*parms[42] + dx60*x40 - dx74
x98 = parms[30]*x5 + parms[32]*x12 + parms[33]*x21 - x97
dx98 = dx12*parms[32] + dx21*parms[33] + dx5*parms[30] - dx97
x99 = x10*x98
dx99 = dx10*x98 + dx98*x10
x100 = -parms[31]
dx100 = 0
x101 = parms[26]*x11 + parms[28]*x3 + parms[29]*x5 + parms[30]*x21 + x100*x8 + x13*x84 + x87*x9 + 0.00502*x97
dx101 = dx100*x8 + dx11*parms[26] + dx13*x84 + dx21*parms[30] + dx3*parms[28] + dx5*parms[29] + dx8*x100 + dx84*x13 + dx87*x9 + dx9*x87 + 0.00502*dx97
x102 = -0.27857*x2
dx102 = -0.27857*dx2
x103 = -0.27857*x10
dx103 = -0.27857*dx10
x104 = parms[14]*x0 + parms[16]*x4 - 0.03175*parms[30]*x15 - 0.03175*parms[31]*x11 + x102*x98 + x103*x65 + x2*x88 + x89*x96 - 0.03175*x94 - 0.03175*x95
dx104 = dx0*parms[14] + dx102*x98 + dx103*x65 - 0.03175*dx11*parms[31] - 0.03175*dx15*parms[30] + dx2*x88 + dx4*parms[16] + dx65*x103 + dx88*x2 + dx89*x96 - 0.03175*dx94 - 0.03175*dx95 + dx96*x89 + dx98*x102
x105 = -x89
dx105 = -dx89
x106 = 0.00502*x105 + 0.03175
dx106 = 0.00502*dx105
x107 = -x103*x13 - x106*x9
dx107 = -dx103*x13 - dx106*x9 - dx13*x103 - dx9*x106
x108 = x2*x9
dx108 = dx2*x9 + dx9*x2
x109 = -x105*x20 - x108*x18
dx109 = -dx105*x20 - dx108*x18 - dx18*x108 - dx20*x105
x110 = -x109
dx110 = -dx109
x111 = x105*x40 + x108*x20
dx111 = dx105*x40 + dx108*x20 + dx20*x108 + dx40*x105
x112 = -x105
dx112 = -dx105
x113 = x103*x9 + x106*x64
dx113 = dx103*x9 + dx106*x64 + dx64*x106 + dx9*x103
x114 = 0.27747*x112 + x113
dx114 = 0.27747*dx112 + dx113
x115 = -x102
dx115 = -dx102
x116 = 0.27747*x108 + x115
dx116 = 0.27747*dx108 + dx115
x117 = x114*x20 + x116*x40
dx117 = dx114*x20 + dx116*x40 + dx20*x114 + dx40*x116
x118 = x107*x30 + x117*x59
dx118 = dx107*x30 + dx117*x59 + dx30*x107 + dx59*x117
x119 = x2*x64
dx119 = dx2*x64 + dx64*x2
x120 = -x111*x30 - x119*x29
dx120 = -dx111*x30 - dx119*x29 - dx29*x119 - dx30*x111
x121 = -x120
dx121 = -dx120
x122 = -x114*x18 - x116*x20
dx122 = -dx114*x18 - dx116*x20 - dx18*x114 - dx20*x116
x123 = -x122
dx123 = -dx122
x124 = x118*x57 + x123*x38
dx124 = dx118*x57 + dx123*x38 + dx38*x123 + dx57*x118
x125 = x111*x59 + x119*x30
dx125 = dx111*x59 + dx119*x30 + dx30*x119 + dx59*x111
x126 = -x110*x37 - x125*x38
dx126 = -dx110*x37 - dx125*x38 - dx37*x110 - dx38*x125
x127 = -parms[79]*x121 + parms[80]*x126 + parms[81]*x124
dx127 = -dx121*parms[79] + dx124*parms[81] + dx126*parms[80]
x128 = x110*x38 + x125*x57
dx128 = dx110*x38 + dx125*x57 + dx38*x110 + dx57*x125
x129 = -x118*x38 - x123*x37
dx129 = -dx118*x38 - dx123*x37 - dx37*x123 - dx38*x118
x130 = parms[78]*x121 - parms[80]*x128 + parms[81]*x129
dx130 = dx121*parms[78] - dx128*parms[80] + dx129*parms[81]
x131 = -parms[67]*x110 + parms[68]*x120 + parms[69]*x118 + x127*x57 + x130*x76
dx131 = -dx110*parms[67] + dx118*parms[69] + dx120*parms[68] + dx127*x57 + dx130*x76 + dx57*x127 + dx76*x130
x132 = -x107*x29 - x117*x30
dx132 = -dx107*x29 - dx117*x30 - dx29*x107 - dx30*x117
x133 = -x132
dx133 = -dx132
x134 = parms[66]*x110 - parms[68]*x125 + parms[69]*x132 + parms[78]*x126 - parms[79]*x128 - parms[81]*x133
dx134 = dx110*parms[66] - dx125*parms[68] + dx126*parms[78] - dx128*parms[79] + dx132*parms[69] - dx133*parms[81]
x135 = parms[42]*x105 - parms[44]*x108 + parms[45]*x107 + parms[54]*x110 + parms[55]*x111 + parms[57]*x107 + x131*x30 + x134*x59
dx135 = dx105*parms[42] + dx107*(parms[45] + parms[57]) - dx108*parms[44] + dx110*parms[54] + dx111*parms[55] + dx131*x30 + dx134*x59 + dx30*x131 + dx59*x134
x136 = x135*x9
dx136 = dx135*x9 + dx9*x135
x137 = -x119
dx137 = -dx119
x138 = parms[55]*x137 + parms[56]*x109 + parms[57]*x117 + x131*x59 + x134*x82
dx138 = dx109*parms[56] + dx117*parms[57] + dx131*x59 + dx134*x82 + dx137*parms[55] + dx59*x131 + dx82*x134
x139 = x138*x20
dx139 = dx138*x20 + dx20*x138
x140 = parms[54]*x119 - parms[56]*x111 + parms[57]*x122 - parms[66]*x121 - parms[67]*x125 - parms[69]*x123 - x127*x38 - x130*x57
dx140 = -dx111*parms[56] + dx119*parms[54] - dx121*parms[66] + dx122*parms[57] - dx123*parms[69] - dx125*parms[67] - dx127*x38 - dx130*x57 - dx38*x127 - dx57*x130
x141 = parms[74]*x128 + parms[76]*x126 + parms[77]*x121 + parms[78]*x129 + x124*x79
dx141 = dx121*parms[77] + dx124*x79 + dx126*parms[76] + dx128*parms[74] + dx129*parms[78] + dx79*x124
x142 = parms[61]*x125 + parms[63]*x120 + parms[64]*x110 + parms[68]*x118 + x123*x78 - x141
dx142 = dx110*parms[64] + dx118*parms[68] + dx120*parms[63] + dx123*x78 + dx125*parms[61] - dx141 + dx78*x123
x143 = parms[72]*x128 + parms[73]*x126 + parms[74]*x121 + parms[79]*x133 + x129*x70
dx143 = dx121*parms[74] + dx126*parms[73] + dx128*parms[72] + dx129*x70 + dx133*parms[79] + dx70*x129
x144 = parms[73]*x128 + parms[75]*x126 + parms[76]*x121 + parms[80]*x124 + x133*x68
dx144 = dx121*parms[76] + dx124*parms[80] + dx126*parms[75] + dx128*parms[73] + dx133*x68 + dx68*x133
x145 = parms[60]*x125 + parms[61]*x120 + parms[62]*x110 + parms[67]*x123 + parms[68]*x133 + x143*x57 + x144*x76
dx145 = dx110*parms[62] + dx120*parms[61] + dx123*parms[67] + dx125*parms[60] + dx133*parms[68] + dx143*x57 + dx144*x76 + dx57*x143 + dx76*x144
x146 = parms[48]*x111 + parms[49]*x109 + parms[50]*x119 + parms[55]*x107 + parms[56]*x123 + x142*x82 + x145*x59
dx146 = dx107*parms[55] + dx109*parms[49] + dx111*parms[48] + dx119*parms[50] + dx123*parms[56] + dx142*x82 + dx145*x59 + dx59*x145 + dx82*x142
x147 = -x107
dx147 = -dx107
x148 = -parms[67]
dx148 = 0
x149 = parms[62]*x125 + parms[64]*x120 + parms[65]*x110 + parms[66]*x132 + x118*x148 + x143*x38 + x144*x57
dx149 = dx110*parms[65] + dx118*x148 + dx120*parms[64] + dx125*parms[62] + dx132*parms[66] + dx143*x38 + dx144*x57 + dx148*x118 + dx38*x143 + dx57*x144
x150 = parms[49]*x111 + parms[51]*x109 + parms[52]*x119 + parms[54]*x147 + parms[56]*x117 - x149
dx150 = dx109*parms[51] + dx111*parms[49] + dx117*parms[56] + dx119*parms[52] + dx147*parms[54] - dx149
x151 = parms[38]*x108 + parms[40]*x119 + parms[41]*x105 + parms[42]*x107 + x113*x92 - 0.27747*x139 + x140*x91 + x146*x40 + x150*x90
dx151 = dx105*parms[41] + dx107*parms[42] + dx108*parms[38] + dx113*x92 + dx119*parms[40] - 0.27747*dx139 + dx140*x91 + dx146*x40 + dx150*x90 + dx40*x146 + dx90*x150 + dx91*x140 + dx92*x113
x152 = parms[43]*x112 + parms[44]*x119 + parms[45]*x113 + x139 + x140*x40
dx152 = dx112*parms[43] + dx113*parms[45] + dx119*parms[44] + dx139 + dx140*x40 + dx40*x140
x153 = x13*x152
dx153 = dx13*x152 + dx152*x13
x154 = -0.27747*x20
dx154 = -0.27747*dx20
x155 = parms[36]*x108 + parms[37]*x119 + parms[38]*x105 + parms[43]*x115 + parms[44]*x147 + x138*x75 + x140*x154 + x146*x20 + x150*x40
dx155 = dx105*parms[38] + dx108*parms[36] + dx115*parms[43] + dx119*parms[37] + dx138*x75 + dx140*x154 + dx146*x20 + dx147*parms[44] + dx150*x40 + dx154*x140 + dx20*x146 + dx40*x150 + dx75*x138
x156 = -parms[55]
dx156 = 0
x157 = parms[50]*x111 + parms[52]*x109 + parms[53]*x119 + parms[54]*x122 + x117*x156 + x142*x59 + x145*x30
dx157 = dx109*parms[52] + dx111*parms[50] + dx117*x156 + dx119*parms[53] + dx122*parms[54] + dx142*x59 + dx145*x30 + dx156*x117 + dx30*x145 + dx59*x142
x158 = parms[37]*x108 + parms[39]*x119 + parms[40]*x105 + parms[44]*x113 + x115*x86 + x157
dx158 = dx105*parms[40] + dx108*parms[37] + dx113*parms[44] + dx115*x86 + dx119*parms[39] + dx157 + dx86*x115
x159 = parms[42]*x137 + parms[43]*x108 + parms[45]*x115 + x138*x40 + x140*x90
dx159 = dx108*parms[43] + dx115*parms[45] + dx137*parms[42] + dx138*x40 + dx140*x90 + dx40*x138 + dx90*x140
x160 = parms[26]*x2 + parms[28]*x89 + parms[30]*x102 + x100*x103 + x13*x155 + x158*x9 + 0.00502*x159
dx160 = dx100*x103 + dx102*parms[30] + dx103*x100 + dx13*x155 + dx155*x13 + dx158*x9 + 0.00502*dx159 + dx2*parms[26] + dx89*parms[28] + dx9*x158
x161 = -x9
dx161 = -dx9
x162 = x13*x20
dx162 = dx13*x20 + dx20*x13
x163 = x162*x59 + x30*x9
dx163 = dx162*x59 + dx30*x9 + dx59*x162 + dx9*x30
x164 = x13*x40
dx164 = dx13*x40 + dx40*x13
x165 = -x164
dx165 = -dx164
x166 = -x163*x38 - x165*x37
dx166 = -dx163*x38 - dx165*x37 - dx37*x165 - dx38*x163
x167 = x163*x57 + x165*x38
dx167 = dx163*x57 + dx165*x38 + dx38*x165 + dx57*x163
x168 = 0.27747*x13 + 0.00502
dx168 = 0.27747*dx13
x169 = x168*x40
dx169 = dx168*x40 + dx40*x168
x170 = x169*x82
dx170 = dx169*x82 + dx82*x169
x171 = -x170
dx171 = -dx170
x172 = x169*x59
dx172 = dx169*x59 + dx59*x169
x173 = -x162*x30 - x29*x9
dx173 = -dx162*x30 - dx29*x9 - dx30*x162 - dx9*x29
x174 = -x173
dx174 = -dx173
x175 = x168*x90
dx175 = dx168*x90 + dx90*x168
x176 = -x175
dx176 = -dx175
x177 = x172*x57 + x176*x38
dx177 = dx172*x57 + dx176*x38 + dx38*x176 + dx57*x172
x178 = -parms[79]*x174 + parms[80]*x166 + parms[81]*x177
dx178 = dx166*parms[80] - dx174*parms[79] + dx177*parms[81]
x179 = -x172*x38 - x176*x37
dx179 = -dx172*x38 - dx176*x37 - dx37*x176 - dx38*x172
x180 = parms[78]*x174 - parms[80]*x167 + parms[81]*x179
dx180 = -dx167*parms[80] + dx174*parms[78] + dx179*parms[81]
x181 = parms[55]*x161 + parms[56]*x164 + parms[57]*x169 + x59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + x82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
dx181 = dx161*parms[55] - dx163*parms[68]*x82 + dx164*parms[56] + dx165*(parms[66]*x82 - parms[67]*x59) + dx166*parms[78]*x82 - dx167*parms[79]*x82 + dx169*parms[57] + dx170*parms[69]*x82 - dx171*parms[81]*x82 + dx172*parms[69]*x59 + dx173*parms[68]*x59 + dx178*x57*x59 + dx180*x59*x76 + dx57*x178*x59 + dx59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + dx76*x180*x59 + dx82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
x182 = parms[54]*x9 - parms[56]*x162 + parms[57]*x175 - parms[66]*x174 - parms[67]*x163 - parms[69]*x176 - x178*x38 - x180*x57
dx182 = -dx162*parms[56] - dx163*parms[67] - dx174*parms[66] + dx175*parms[57] - dx176*parms[69] - dx178*x38 - dx180*x57 - dx38*x178 - dx57*x180 + dx9*parms[54]
x183 = parms[74]*x167 + parms[76]*x166 + parms[77]*x174 + parms[78]*x179 + x177*x79
dx183 = dx166*parms[76] + dx167*parms[74] + dx174*parms[77] + dx177*x79 + dx179*parms[78] + dx79*x177
x184 = parms[61]*x163 + parms[63]*x173 + parms[64]*x165 + parms[68]*x172 + x176*x78 - x183
dx184 = dx163*parms[61] + dx165*parms[64] + dx172*parms[68] + dx173*parms[63] + dx176*x78 - dx183 + dx78*x176
x185 = parms[73]*x167 + parms[75]*x166 + parms[76]*x174 + parms[80]*x177 + x171*x68
dx185 = dx166*parms[75] + dx167*parms[73] + dx171*x68 + dx174*parms[76] + dx177*parms[80] + dx68*x171
x186 = parms[72]*x167 + parms[73]*x166 + parms[74]*x174 + parms[79]*x171 + x179*x70
dx186 = dx166*parms[73] + dx167*parms[72] + dx171*parms[79] + dx174*parms[74] + dx179*x70 + dx70*x179
x187 = parms[60]*x163 + parms[61]*x173 + parms[62]*x165 + parms[67]*x176 + parms[68]*x171 + x185*x76 + x186*x57
dx187 = dx163*parms[60] + dx165*parms[62] + dx171*parms[68] + dx173*parms[61] + dx176*parms[67] + dx185*x76 + dx186*x57 + dx57*x186 + dx76*x185
x188 = parms[50]*x162 + parms[52]*x164 + parms[53]*x9 + parms[54]*x175 + x156*x169 + x184*x59 + x187*x30
dx188 = dx156*x169 + dx162*parms[50] + dx164*parms[52] + dx169*x156 + dx175*parms[54] + dx184*x59 + dx187*x30 + dx30*x187 + dx59*x184 + dx9*parms[53]
x189 = parms[48]*x162 + parms[49]*x164 + parms[50]*x9 + parms[56]*x176 + x184*x82 + x187*x59
dx189 = dx162*parms[48] + dx164*parms[49] + dx176*parms[56] + dx184*x82 + dx187*x59 + dx59*x187 + dx82*x184 + dx9*parms[50]
x190 = parms[62]*x163 + parms[64]*x173 + parms[65]*x165 + parms[66]*x170 + x148*x172 + x185*x57 + x186*x38
dx190 = dx148*x172 + dx163*parms[62] + dx165*parms[65] + dx170*parms[66] + dx172*x148 + dx173*parms[64] + dx185*x57 + dx186*x38 + dx38*x186 + dx57*x185
x191 = parms[49]*x162 + parms[51]*x164 + parms[52]*x9 + parms[56]*x169 - x190
dx191 = dx162*parms[49] + dx164*parms[51] + dx169*parms[56] - dx190 + dx9*parms[52]
x192 = parms[38]*x13 + parms[40]*x9 - 0.27747*x181*x20 + x182*x91 + x189*x40 + x191*x90
dx192 = dx13*parms[38] - 0.27747*dx181*x20 + dx182*x91 + dx189*x40 + dx191*x90 - 0.27747*dx20*x181 + dx40*x189 + dx9*parms[40] + dx90*x191 + dx91*x182
x193 = x154*x82
dx193 = dx154*x82 + dx82*x154
x194 = -x193
dx194 = -dx193
x195 = x40*x82
dx195 = dx40*x82 + dx82*x40
x196 = -x195
dx196 = -dx195
x197 = x40*x59
dx197 = dx40*x59 + dx59*x40
x198 = -x90
dx198 = -dx90
x199 = x197*x57 + x198*x38
dx199 = dx197*x57 + dx198*x38 + dx38*x198 + dx57*x197
x200 = x154*x59
dx200 = dx154*x59 + dx59*x154
x201 = -x91
dx201 = -dx91
x202 = -x200*x38 - x201*x37
dx202 = -dx200*x38 - dx201*x37 - dx37*x201 - dx38*x200
x203 = -x197*x38 - x198*x37
dx203 = -dx197*x38 - dx198*x37 - dx37*x198 - dx38*x197
x204 = parms[72]*x199 + parms[73]*x203 + parms[74]*x196 + parms[79]*x194 + x202*x70
dx204 = dx194*parms[79] + dx196*parms[74] + dx199*parms[72] + dx202*x70 + dx203*parms[73] + dx70*x202
x205 = x200*x57 + x201*x38
dx205 = dx200*x57 + dx201*x38 + dx38*x201 + dx57*x200
x206 = parms[73]*x199 + parms[75]*x203 + parms[76]*x196 + parms[80]*x205 + x194*x68
dx206 = dx194*x68 + dx196*parms[76] + dx199*parms[73] + dx203*parms[75] + dx205*parms[80] + dx68*x194
x207 = parms[62]*x197 + parms[64]*x195 + parms[65]*x198 + parms[66]*x193 + x148*x200 + x204*x38 + x206*x57
dx207 = dx148*x200 + dx193*parms[66] + dx195*parms[64] + dx197*parms[62] + dx198*parms[65] + dx200*x148 + dx204*x38 + dx206*x57 + dx38*x204 + dx57*x206
x208 = parms[78]*x196 - parms[80]*x199 + parms[81]*x202
dx208 = dx196*parms[78] - dx199*parms[80] + dx202*parms[81]
x209 = -parms[79]*x196 + parms[80]*x203 + parms[81]*x205
dx209 = -dx196*parms[79] + dx203*parms[80] + dx205*parms[81]
x210 = parms[60]*x197 + parms[61]*x195 + parms[62]*x198 + parms[67]*x201 + parms[68]*x194 + x204*x57 + x206*x76
dx210 = dx194*parms[68] + dx195*parms[61] + dx197*parms[60] + dx198*parms[62] + dx201*parms[67] + dx204*x57 + dx206*x76 + dx57*x204 + dx76*x206
x211 = parms[74]*x199 + parms[76]*x203 + parms[77]*x196 + parms[78]*x202 + x205*x79
dx211 = dx196*parms[77] + dx199*parms[74] + dx202*parms[78] + dx203*parms[76] + dx205*x79 + dx79*x205
x212 = parms[61]*x197 + parms[63]*x195 + parms[64]*x198 + parms[68]*x200 + x201*x78 - x211
dx212 = dx195*parms[63] + dx197*parms[61] + dx198*parms[64] + dx200*parms[68] + dx201*x78 - dx211 + dx78*x201
x213 = parms[50]*x40 + parms[52]*x90 + parms[54]*x91 + x154*x156 + x210*x30 + x212*x59
dx213 = dx154*x156 + dx156*x154 + dx210*x30 + dx212*x59 + dx30*x210 + dx40*parms[50] + dx59*x212 + dx90*parms[52] + dx91*parms[54]
x214 = -x59
dx214 = -dx59
x215 = x30*x76
dx215 = dx30*x76 + dx76*x30
x216 = x30*x57
dx216 = dx30*x57 + dx57*x30
x217 = parms[72]*x216 + parms[73]*x215 + parms[74]*x214
dx217 = dx214*parms[74] + dx215*parms[73] + dx216*parms[72]
x218 = parms[73]*x216 + parms[75]*x215 + parms[76]*x214
dx218 = dx214*parms[76] + dx215*parms[75] + dx216*parms[73]
x219 = parms[74]*x216 + parms[76]*x215 + parms[77]*x214
dx219 = dx214*parms[77] + dx215*parms[76] + dx216*parms[74]
x220 = parms[62]*x30 + parms[64]*x59 + x217*x38 + x218*x57
dx220 = dx217*x38 + dx218*x57 + dx30*parms[62] + dx38*x217 + dx57*x218 + dx59*parms[64]
x221 = parms[74]*x38 + parms[76]*x57
dx221 = dx38*parms[74] + dx57*parms[76]
#
dMdq2_out[0] = dx0*(2*parms[12]*x0 + 2*parms[13]*x4 - 0.27857*x66 + x7*x96 + x88*x89 + 0.27857*x99) - dx101*x4 + dx4*(2*parms[13]*x0 + 2*parms[15]*x4 - x101 - 0.03175*x66 + 0.03175*x99) + dx66*(-0.27857*x0 - 0.03175*x4) + dx7*x0*x96 + dx88*x0*x89 + dx89*x0*x88 + dx96*x0*x7 + dx99*(0.27857*x0 + 0.03175*x4)
dMdq2_out[1] = dx104
dMdq2_out[2] = dx101
dMdq2_out[3] = dx93
dMdq2_out[4] = dx85
dMdq2_out[5] = dx72
dMdq2_out[6] = dx80
dMdq2_out[7] = dx104
dMdq2_out[8] = dx102*(parms[32]*x7 + 2*parms[33]*x102 - x159) + dx103*(2*parms[32]*x89 + 2*parms[33]*x103 + x135*x64 + x152*x9) - 0.03175*dx105*parms[30] + dx115*parms[32]*x2 + dx135*x103*x64 + dx136*(0.00502*x89 - 0.03175) - dx151*x89 + dx152*x103*x9 + dx153*(0.00502*x89 - 0.03175) + dx155*x2*x9 + dx158*x2*x64 - dx159*x102 + dx2*(2*parms[24]*x2 + 2*parms[25]*x89 - 0.0635*parms[31] + parms[32]*x115 + x155*x9 + x158*x64) + dx64*(x103*x135 + x158*x2) + dx7*parms[32]*x102 + dx89*(2*parms[25]*x2 + 2*parms[27]*x89 + 0.03175*parms[30] + 2*parms[32]*x103 + 0.00502*x136 - x151 + 0.00502*x153) + dx9*(x103*x152 + x155*x2)
dMdq2_out[9] = dx160
dMdq2_out[10] = dx151
dMdq2_out[11] = dx157
dMdq2_out[12] = dx149
dMdq2_out[13] = dx141
dMdq2_out[14] = dx101
dMdq2_out[15] = dx160
dMdq2_out[16] = dx13*(2*parms[36]*x13 + 2*parms[37]*x9 + 0.01004*parms[43] + x154*x182 + x181*x75 + x189*x20 + x191*x40) + dx154*x13*x182 + 0.00502*dx161*parms[42] + dx181*(x13*x75 + 0.00502*x40) + dx182*(x13*x154 + 0.00502*x90) + dx188*x9 + dx189*x13*x20 + dx191*x13*x40 + dx20*x13*x189 + dx40*(x13*x191 + 0.00502*x181) + dx75*x13*x181 + dx9*(2*parms[37]*x13 + 2*parms[39]*x9 - 0.00502*parms[42] + x188) + 0.00502*dx90*x182
dMdq2_out[17] = dx192
dMdq2_out[18] = dx188
dMdq2_out[19] = dx190
dMdq2_out[20] = dx183
dMdq2_out[21] = dx93
dMdq2_out[22] = dx151
dMdq2_out[23] = dx192
dMdq2_out[24] = dx154*(2*parms[56]*x90 + 2*parms[57]*x154 + x59*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x82*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194)) + dx193*parms[69]*x154*x82 - dx194*parms[81]*x154*x82 + dx195*parms[68]*x154*x59 - dx196*parms[66]*x91 + dx197*(-parms[67]*x91 - parms[68]*x154*x82) + dx198*x154*(parms[66]*x82 - parms[67]*x59) - dx199*parms[79]*x154*x82 + dx200*parms[69]*x154*x59 + dx201*(parms[56]*x40 - parms[69]*x91) + dx203*parms[78]*x154*x82 - dx207*x90 + dx208*(x154*x59*x76 - x57*x91) + dx209*(x154*x57*x59 - x38*x91) + dx210*x40*x59 + dx212*x40*x82 - dx38*x209*x91 + dx40*(2*parms[48]*x40 + 2*parms[49]*x90 + parms[56]*x201 - parms[56]*x91 + x210*x59 + x212*x82) + dx57*(x154*x209*x59 - x208*x91) + dx59*(x154*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x210*x40) + dx76*x154*x208*x59 + dx82*(x154*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194) + x212*x40) + dx90*(2*parms[49]*x40 + 2*parms[51]*x90 + 2*parms[56]*x154 - x207) + dx91*(-parms[56]*x40 + 2*parms[57]*x91 - parms[66]*x196 - parms[67]*x197 - parms[69]*x201 - x208*x57 - x209*x38)
dMdq2_out[25] = dx213
dMdq2_out[26] = dx207
dMdq2_out[27] = dx211
dMdq2_out[28] = dx85
dMdq2_out[29] = dx157
dMdq2_out[30] = dx188
dMdq2_out[31] = dx213
dMdq2_out[32] = dx217*x30*x57 + dx218*x30*x76 - dx219*x59 + dx30*(2*parms[60]*x30 + 2*parms[61]*x59 + x217*x57 + x218*x76) + dx57*x217*x30 + dx59*(2*parms[61]*x30 + 2*parms[63]*x59 - x219) + dx76*x218*x30
dMdq2_out[33] = dx220
dMdq2_out[34] = dx219
dMdq2_out[35] = dx72
dMdq2_out[36] = dx149
dMdq2_out[37] = dx190
dMdq2_out[38] = dx207
dMdq2_out[39] = dx220
dMdq2_out[40] = dx38*(2*parms[72]*x38 + 2*parms[73]*x57) + dx57*(2*parms[73]*x38 + 2*parms[75]*x57)
dMdq2_out[41] = dx221
dMdq2_out[42] = dx80
dMdq2_out[43] = dx141
dMdq2_out[44] = dx183
dMdq2_out[45] = dx211
dMdq2_out[46] = dx219
dMdq2_out[47] = dx221
dMdq2_out[48] = 0
#
return dMdq2_out
if jt_num == 3:
#
dMdq3_out = [0]*49
#
x0 = cos(q[1])
dx0 = 0
x1 = -x0
dx1 = -dx0
x2 = cos(q[2])
dx2 = 0
x3 = x1*x2
dx3 = dx1*x2 + dx2*x1
x4 = -sin(q[1])
dx4 = 0
x5 = -x4
dx5 = -dx4
x6 = 0.27857*x0 - 0.03175*x5
dx6 = 0.27857*dx0 - 0.03175*dx5
x7 = -x2
dx7 = -dx2
x8 = x6*x7
dx8 = dx6*x7 + dx7*x6
x9 = cos(q[3])
dx9 = -sin(q[3])
x10 = sin(q[2])
dx10 = 0
x11 = x1*x10
dx11 = dx1*x10 + dx10*x1
x12 = -x11
dx12 = -dx11
x13 = sin(q[3])
dx13 = cos(q[3])
x14 = x12*x13 + x5*x9
dx14 = dx12*x13 + dx13*x12 + dx5*x9 + dx9*x5
x15 = -x3
dx15 = -dx3
x16 = -x15
dx16 = -dx15
x17 = -0.00502*x13*x15 + x8*x9
dx17 = -0.00502*dx13*x15 - 0.00502*dx15*x13 + dx8*x9 + dx9*x8
x18 = sin(q[4])
dx18 = 0
x19 = 0.27747*x16 + x17
dx19 = 0.27747*dx16 + dx17
x20 = cos(q[4])
dx20 = 0
x21 = x10*x6
dx21 = dx10*x6 + dx6*x10
x22 = -x21
dx22 = -dx21
x23 = x22 + 0.00502*x5
dx23 = dx22 + 0.00502*dx5
x24 = x11*x9 + x13*x5
dx24 = dx11*x9 + dx13*x5 + dx5*x13 + dx9*x11
x25 = x23 + 0.27747*x24
dx25 = dx23 + 0.27747*dx24
x26 = -x18*x19 - x20*x25
dx26 = -dx18*x19 - dx19*x18 - dx20*x25 - dx25*x20
x27 = x16*x18 + x20*x24
dx27 = dx16*x18 + dx18*x16 + dx20*x24 + dx24*x20
x28 = -x27
dx28 = -dx27
x29 = sin(q[5])
dx29 = 0
x30 = cos(q[5])
dx30 = 0
x31 = x14*x30 + x28*x29
dx31 = dx14*x30 + dx28*x29 + dx29*x28 + dx30*x14
x32 = -x26
dx32 = -dx26
x33 = -x14*x29 - x27*x30
dx33 = -dx14*x29 - dx27*x30 - dx29*x14 - dx30*x27
x34 = -x33
dx34 = -dx33
x35 = -x15*x20 - x18*x24
dx35 = -dx15*x20 - dx18*x24 - dx20*x15 - dx24*x18
x36 = -x35
dx36 = -dx35
x37 = sin(q[6])
dx37 = 0
x38 = cos(q[6])
dx38 = 0
x39 = -x31*x38 - x36*x37
dx39 = -dx31*x38 - dx36*x37 - dx37*x36 - dx38*x31
x40 = -x18
dx40 = -dx18
x41 = x19*x20 + x25*x40
dx41 = dx19*x20 + dx20*x19 + dx25*x40 + dx40*x25
x42 = -x41
dx42 = -dx41
x43 = -x13*x8 - 0.00502*x15*x9
dx43 = -dx13*x8 - 0.00502*dx15*x9 - dx8*x13 - 0.00502*dx9*x15
x44 = x29*x42 + x30*x43
dx44 = dx29*x42 + dx30*x43 + dx42*x29 + dx43*x30
x45 = -x44
dx45 = -dx44
x46 = x32*x38 + x37*x45
dx46 = dx32*x38 + dx37*x45 + dx38*x32 + dx45*x37
x47 = -parms[79]*x34 + parms[80]*x39 + parms[81]*x46
dx47 = -dx34*parms[79] + dx39*parms[80] + dx46*parms[81]
x48 = -x32*x37 - x38*x44
dx48 = -dx32*x37 - dx37*x32 - dx38*x44 - dx44*x38
x49 = -x31
dx49 = -dx31
x50 = x36*x38 + x37*x49
dx50 = dx36*x38 + dx37*x49 + dx38*x36 + dx49*x37
x51 = -parms[78]*x34 + parms[80]*x50 - parms[81]*x48
dx51 = -dx34*parms[78] - dx48*parms[81] + dx50*parms[80]
x52 = parms[54]*x14 + parms[56]*x28 + parms[57]*x26 - parms[66]*x34 - parms[67]*x31 - parms[69]*x32 - x37*x51 - x38*x47
dx52 = dx14*parms[54] + dx26*parms[57] + dx28*parms[56] - dx31*parms[67] - dx32*parms[69] - dx34*parms[66] - dx37*x51 - dx38*x47 - dx47*x38 - dx51*x37
x53 = -x14
dx53 = -dx14
x54 = -x29*x43 - x30*x41
dx54 = -dx29*x43 - dx30*x41 - dx41*x30 - dx43*x29
x55 = -x54
dx55 = -dx54
x56 = -parms[66]*x36 - parms[68]*x49 - parms[69]*x54 - parms[78]*x39 + parms[79]*x50 + parms[81]*x55
dx56 = -dx36*parms[66] - dx39*parms[78] - dx49*parms[68] + dx50*parms[79] - dx54*parms[69] + dx55*parms[81]
x57 = -x37
dx57 = -dx37
x58 = -parms[67]*x36 + parms[68]*x33 + parms[69]*x44 + x38*x51 + x47*x57
dx58 = dx33*parms[68] - dx36*parms[67] + dx38*x51 + dx44*parms[69] + dx47*x57 + dx51*x38 + dx57*x47
x59 = -x29
dx59 = -dx29
x60 = parms[55]*x53 + parms[56]*x35 + parms[57]*x41 + x30*x56 + x58*x59
dx60 = dx30*x56 + dx35*parms[56] + dx41*parms[57] + dx53*parms[55] + dx56*x30 + dx58*x59 + dx59*x58
x61 = x20*x60
dx61 = dx20*x60 + dx60*x20
x62 = parms[43]*x16 + parms[44]*x14 + parms[45]*x17 + x40*x52 + x61
dx62 = dx14*parms[44] + dx16*parms[43] + dx17*parms[45] + dx40*x52 + dx52*x40 + dx61
x63 = parms[42]*x15 - parms[44]*x24 + parms[45]*x43 + parms[54]*x36 + parms[55]*x27 + parms[57]*x43 + x29*x56 + x30*x58
dx63 = dx15*parms[42] - dx24*parms[44] + dx27*parms[55] + dx29*x56 + dx30*x58 + dx36*parms[54] + dx43*(parms[45] + parms[57]) + dx56*x29 + dx58*x30
x64 = -x13
dx64 = -dx13
x65 = -parms[31]*x5 + parms[32]*x3 + parms[33]*x8 + x62*x9 + x63*x64
dx65 = dx3*parms[32] - dx5*parms[31] + dx62*x9 + dx63*x64 + dx64*x63 + dx8*parms[33] + dx9*x62
x66 = x2*x65
dx66 = dx2*x65 + dx65*x2
x67 = -x43
dx67 = -dx43
x68 = -parms[78]
dx68 = 0
x69 = parms[73]*x50 + parms[75]*x39 + parms[76]*x34 + parms[80]*x46 + x55*x68
dx69 = dx34*parms[76] + dx39*parms[75] + dx46*parms[80] + dx50*parms[73] + dx55*x68 + dx68*x55
x70 = -parms[80]
dx70 = 0
x71 = parms[72]*x50 + parms[73]*x39 + parms[74]*x34 + parms[79]*x55 + x48*x70
dx71 = dx34*parms[74] + dx39*parms[73] + dx48*x70 + dx50*parms[72] + dx55*parms[79] + dx70*x48
x72 = parms[62]*x31 + parms[64]*x33 + parms[65]*x36 + parms[66]*x54 + parms[67]*x45 + x38*x71 + x57*x69
dx72 = dx31*parms[62] + dx33*parms[64] + dx36*parms[65] + dx38*x71 + dx45*parms[67] + dx54*parms[66] + dx57*x69 + dx69*x57 + dx71*x38
x73 = parms[49]*x27 + parms[51]*x35 + parms[52]*x14 + parms[54]*x67 + parms[56]*x41 - x72
dx73 = dx14*parms[52] + dx27*parms[49] + dx35*parms[51] + dx41*parms[56] + dx67*parms[54] - dx72
x74 = x20*x52
dx74 = dx20*x52 + dx52*x20
x75 = -0.27747*x18
dx75 = -0.27747*dx18
x76 = -x38
dx76 = -dx38
x77 = parms[60]*x31 + parms[61]*x33 + parms[62]*x36 + parms[67]*x32 + parms[68]*x55 + x57*x71 + x69*x76
dx77 = dx31*parms[60] + dx32*parms[67] + dx33*parms[61] + dx36*parms[62] + dx55*parms[68] + dx57*x71 + dx69*x76 + dx71*x57 + dx76*x69
x78 = -parms[66]
dx78 = 0
x79 = -parms[79]
dx79 = 0
x80 = parms[74]*x50 + parms[76]*x39 + parms[77]*x34 + parms[78]*x48 + x46*x79
dx80 = dx34*parms[77] + dx39*parms[76] + dx46*x79 + dx48*parms[78] + dx50*parms[74] + dx79*x46
x81 = parms[61]*x31 + parms[63]*x33 + parms[64]*x36 + parms[68]*x44 + x32*x78 - x80
dx81 = dx31*parms[61] + dx32*x78 + dx33*parms[63] + dx36*parms[64] + dx44*parms[68] + dx78*x32 - dx80
x82 = -x30
dx82 = -dx30
x83 = parms[48]*x27 + parms[49]*x35 + parms[50]*x14 + parms[55]*x43 + parms[56]*x32 + x59*x77 + x81*x82
dx83 = dx14*parms[50] + dx27*parms[48] + dx32*parms[56] + dx35*parms[49] + dx43*parms[55] + dx59*x77 + dx77*x59 + dx81*x82 + dx82*x81
x84 = parms[36]*x24 + parms[37]*x14 + parms[38]*x15 + parms[43]*x23 + parms[44]*x67 + x20*x83 + x40*x73 + x60*x75 - 0.27747*x74
dx84 = dx14*parms[37] + dx15*parms[38] + dx20*x83 + dx23*parms[43] + dx24*parms[36] + dx40*x73 + dx60*x75 + dx67*parms[44] + dx73*x40 - 0.27747*dx74 + dx75*x60 + dx83*x20
x85 = parms[50]*x27 + parms[52]*x35 + parms[53]*x14 + parms[54]*x26 + parms[55]*x42 + x30*x77 + x59*x81
dx85 = dx14*parms[53] + dx26*parms[54] + dx27*parms[50] + dx30*x77 + dx35*parms[52] + dx42*parms[55] + dx59*x81 + dx77*x30 + dx81*x59
x86 = -parms[42]
dx86 = 0
x87 = parms[37]*x24 + parms[39]*x14 + parms[40]*x15 + parms[44]*x17 + x23*x86 + x85
dx87 = dx14*parms[39] + dx15*parms[40] + dx17*parms[44] + dx23*x86 + dx24*parms[37] + dx85 + dx86*x23
x88 = parms[24]*x11 + parms[25]*x3 + parms[26]*x5 + parms[32]*x22 + x64*x87 + x84*x9
dx88 = dx11*parms[24] + dx22*parms[32] + dx3*parms[25] + dx5*parms[26] + dx64*x87 + dx84*x9 + dx87*x64 + dx9*x84
x89 = -x10
dx89 = -dx10
x90 = -x20
dx90 = -dx20
x91 = 0.27747*x18
dx91 = 0.27747*dx18
x92 = -parms[43]
dx92 = 0
x93 = parms[38]*x24 + parms[40]*x14 + parms[41]*x15 + parms[42]*x43 + x17*x92 + x40*x83 + x52*x91 - 0.27747*x61 + x73*x90
dx93 = dx14*parms[40] + dx15*parms[41] + dx17*x92 + dx24*parms[38] + dx40*x83 + dx43*parms[42] + dx52*x91 - 0.27747*dx61 + dx73*x90 + dx83*x40 + dx90*x73 + dx91*x52 + dx92*x17
x94 = x13*x62
dx94 = dx13*x62 + dx62*x13
x95 = x63*x9
dx95 = dx63*x9 + dx9*x63
x96 = parms[25]*x11 + parms[27]*x3 + parms[28]*x5 + parms[32]*x8 - x93 + 0.00502*x94 + 0.00502*x95
dx96 = dx11*parms[25] + dx3*parms[27] + dx5*parms[28] + dx8*parms[32] - dx93 + 0.00502*dx94 + 0.00502*dx95
x97 = parms[42]*x53 + parms[43]*x24 + parms[45]*x23 + x40*x60 - x74
dx97 = dx23*parms[45] + dx24*parms[43] + dx40*x60 + dx53*parms[42] + dx60*x40 - dx74
x98 = parms[30]*x5 + parms[32]*x12 + parms[33]*x21 - x97
dx98 = dx12*parms[32] + dx21*parms[33] + dx5*parms[30] - dx97
x99 = x10*x98
dx99 = dx10*x98 + dx98*x10
x100 = -parms[31]
dx100 = 0
x101 = parms[26]*x11 + parms[28]*x3 + parms[29]*x5 + parms[30]*x21 + x100*x8 + x13*x84 + x87*x9 + 0.00502*x97
dx101 = dx100*x8 + dx11*parms[26] + dx13*x84 + dx21*parms[30] + dx3*parms[28] + dx5*parms[29] + dx8*x100 + dx84*x13 + dx87*x9 + dx9*x87 + 0.00502*dx97
x102 = -0.27857*x2
dx102 = -0.27857*dx2
x103 = -0.27857*x10
dx103 = -0.27857*dx10
x104 = parms[14]*x0 + parms[16]*x4 - 0.03175*parms[30]*x15 - 0.03175*parms[31]*x11 + x102*x98 + x103*x65 + x2*x88 + x89*x96 - 0.03175*x94 - 0.03175*x95
dx104 = dx0*parms[14] + dx102*x98 + dx103*x65 - 0.03175*dx11*parms[31] - 0.03175*dx15*parms[30] + dx2*x88 + dx4*parms[16] + dx65*x103 + dx88*x2 + dx89*x96 - 0.03175*dx94 - 0.03175*dx95 + dx96*x89 + dx98*x102
x105 = -x89
dx105 = -dx89
x106 = 0.00502*x105 + 0.03175
dx106 = 0.00502*dx105
x107 = -x103*x13 - x106*x9
dx107 = -dx103*x13 - dx106*x9 - dx13*x103 - dx9*x106
x108 = x2*x9
dx108 = dx2*x9 + dx9*x2
x109 = -x105*x20 - x108*x18
dx109 = -dx105*x20 - dx108*x18 - dx18*x108 - dx20*x105
x110 = -x109
dx110 = -dx109
x111 = x105*x40 + x108*x20
dx111 = dx105*x40 + dx108*x20 + dx20*x108 + dx40*x105
x112 = -x105
dx112 = -dx105
x113 = x103*x9 + x106*x64
dx113 = dx103*x9 + dx106*x64 + dx64*x106 + dx9*x103
x114 = 0.27747*x112 + x113
dx114 = 0.27747*dx112 + dx113
x115 = -x102
dx115 = -dx102
x116 = 0.27747*x108 + x115
dx116 = 0.27747*dx108 + dx115
x117 = x114*x20 + x116*x40
dx117 = dx114*x20 + dx116*x40 + dx20*x114 + dx40*x116
x118 = x107*x30 + x117*x59
dx118 = dx107*x30 + dx117*x59 + dx30*x107 + dx59*x117
x119 = x2*x64
dx119 = dx2*x64 + dx64*x2
x120 = -x111*x30 - x119*x29
dx120 = -dx111*x30 - dx119*x29 - dx29*x119 - dx30*x111
x121 = -x120
dx121 = -dx120
x122 = -x114*x18 - x116*x20
dx122 = -dx114*x18 - dx116*x20 - dx18*x114 - dx20*x116
x123 = -x122
dx123 = -dx122
x124 = x118*x57 + x123*x38
dx124 = dx118*x57 + dx123*x38 + dx38*x123 + dx57*x118
x125 = x111*x59 + x119*x30
dx125 = dx111*x59 + dx119*x30 + dx30*x119 + dx59*x111
x126 = -x110*x37 - x125*x38
dx126 = -dx110*x37 - dx125*x38 - dx37*x110 - dx38*x125
x127 = -parms[79]*x121 + parms[80]*x126 + parms[81]*x124
dx127 = -dx121*parms[79] + dx124*parms[81] + dx126*parms[80]
x128 = x110*x38 + x125*x57
dx128 = dx110*x38 + dx125*x57 + dx38*x110 + dx57*x125
x129 = -x118*x38 - x123*x37
dx129 = -dx118*x38 - dx123*x37 - dx37*x123 - dx38*x118
x130 = parms[78]*x121 - parms[80]*x128 + parms[81]*x129
dx130 = dx121*parms[78] - dx128*parms[80] + dx129*parms[81]
x131 = -parms[67]*x110 + parms[68]*x120 + parms[69]*x118 + x127*x57 + x130*x76
dx131 = -dx110*parms[67] + dx118*parms[69] + dx120*parms[68] + dx127*x57 + dx130*x76 + dx57*x127 + dx76*x130
x132 = -x107*x29 - x117*x30
dx132 = -dx107*x29 - dx117*x30 - dx29*x107 - dx30*x117
x133 = -x132
dx133 = -dx132
x134 = parms[66]*x110 - parms[68]*x125 + parms[69]*x132 + parms[78]*x126 - parms[79]*x128 - parms[81]*x133
dx134 = dx110*parms[66] - dx125*parms[68] + dx126*parms[78] - dx128*parms[79] + dx132*parms[69] - dx133*parms[81]
x135 = parms[42]*x105 - parms[44]*x108 + parms[45]*x107 + parms[54]*x110 + parms[55]*x111 + parms[57]*x107 + x131*x30 + x134*x59
dx135 = dx105*parms[42] + dx107*(parms[45] + parms[57]) - dx108*parms[44] + dx110*parms[54] + dx111*parms[55] + dx131*x30 + dx134*x59 + dx30*x131 + dx59*x134
x136 = x135*x9
dx136 = dx135*x9 + dx9*x135
x137 = -x119
dx137 = -dx119
x138 = parms[55]*x137 + parms[56]*x109 + parms[57]*x117 + x131*x59 + x134*x82
dx138 = dx109*parms[56] + dx117*parms[57] + dx131*x59 + dx134*x82 + dx137*parms[55] + dx59*x131 + dx82*x134
x139 = x138*x20
dx139 = dx138*x20 + dx20*x138
x140 = parms[54]*x119 - parms[56]*x111 + parms[57]*x122 - parms[66]*x121 - parms[67]*x125 - parms[69]*x123 - x127*x38 - x130*x57
dx140 = -dx111*parms[56] + dx119*parms[54] - dx121*parms[66] + dx122*parms[57] - dx123*parms[69] - dx125*parms[67] - dx127*x38 - dx130*x57 - dx38*x127 - dx57*x130
x141 = parms[74]*x128 + parms[76]*x126 + parms[77]*x121 + parms[78]*x129 + x124*x79
dx141 = dx121*parms[77] + dx124*x79 + dx126*parms[76] + dx128*parms[74] + dx129*parms[78] + dx79*x124
x142 = parms[61]*x125 + parms[63]*x120 + parms[64]*x110 + parms[68]*x118 + x123*x78 - x141
dx142 = dx110*parms[64] + dx118*parms[68] + dx120*parms[63] + dx123*x78 + dx125*parms[61] - dx141 + dx78*x123
x143 = parms[72]*x128 + parms[73]*x126 + parms[74]*x121 + parms[79]*x133 + x129*x70
dx143 = dx121*parms[74] + dx126*parms[73] + dx128*parms[72] + dx129*x70 + dx133*parms[79] + dx70*x129
x144 = parms[73]*x128 + parms[75]*x126 + parms[76]*x121 + parms[80]*x124 + x133*x68
dx144 = dx121*parms[76] + dx124*parms[80] + dx126*parms[75] + dx128*parms[73] + dx133*x68 + dx68*x133
x145 = parms[60]*x125 + parms[61]*x120 + parms[62]*x110 + parms[67]*x123 + parms[68]*x133 + x143*x57 + x144*x76
dx145 = dx110*parms[62] + dx120*parms[61] + dx123*parms[67] + dx125*parms[60] + dx133*parms[68] + dx143*x57 + dx144*x76 + dx57*x143 + dx76*x144
x146 = parms[48]*x111 + parms[49]*x109 + parms[50]*x119 + parms[55]*x107 + parms[56]*x123 + x142*x82 + x145*x59
dx146 = dx107*parms[55] + dx109*parms[49] + dx111*parms[48] + dx119*parms[50] + dx123*parms[56] + dx142*x82 + dx145*x59 + dx59*x145 + dx82*x142
x147 = -x107
dx147 = -dx107
x148 = -parms[67]
dx148 = 0
x149 = parms[62]*x125 + parms[64]*x120 + parms[65]*x110 + parms[66]*x132 + x118*x148 + x143*x38 + x144*x57
dx149 = dx110*parms[65] + dx118*x148 + dx120*parms[64] + dx125*parms[62] + dx132*parms[66] + dx143*x38 + dx144*x57 + dx148*x118 + dx38*x143 + dx57*x144
x150 = parms[49]*x111 + parms[51]*x109 + parms[52]*x119 + parms[54]*x147 + parms[56]*x117 - x149
dx150 = dx109*parms[51] + dx111*parms[49] + dx117*parms[56] + dx119*parms[52] + dx147*parms[54] - dx149
x151 = parms[38]*x108 + parms[40]*x119 + parms[41]*x105 + parms[42]*x107 + x113*x92 - 0.27747*x139 + x140*x91 + x146*x40 + x150*x90
dx151 = dx105*parms[41] + dx107*parms[42] + dx108*parms[38] + dx113*x92 + dx119*parms[40] - 0.27747*dx139 + dx140*x91 + dx146*x40 + dx150*x90 + dx40*x146 + dx90*x150 + dx91*x140 + dx92*x113
x152 = parms[43]*x112 + parms[44]*x119 + parms[45]*x113 + x139 + x140*x40
dx152 = dx112*parms[43] + dx113*parms[45] + dx119*parms[44] + dx139 + dx140*x40 + dx40*x140
x153 = x13*x152
dx153 = dx13*x152 + dx152*x13
x154 = -0.27747*x20
dx154 = -0.27747*dx20
x155 = parms[36]*x108 + parms[37]*x119 + parms[38]*x105 + parms[43]*x115 + parms[44]*x147 + x138*x75 + x140*x154 + x146*x20 + x150*x40
dx155 = dx105*parms[38] + dx108*parms[36] + dx115*parms[43] + dx119*parms[37] + dx138*x75 + dx140*x154 + dx146*x20 + dx147*parms[44] + dx150*x40 + dx154*x140 + dx20*x146 + dx40*x150 + dx75*x138
x156 = -parms[55]
dx156 = 0
x157 = parms[50]*x111 + parms[52]*x109 + parms[53]*x119 + parms[54]*x122 + x117*x156 + x142*x59 + x145*x30
dx157 = dx109*parms[52] + dx111*parms[50] + dx117*x156 + dx119*parms[53] + dx122*parms[54] + dx142*x59 + dx145*x30 + dx156*x117 + dx30*x145 + dx59*x142
x158 = parms[37]*x108 + parms[39]*x119 + parms[40]*x105 + parms[44]*x113 + x115*x86 + x157
dx158 = dx105*parms[40] + dx108*parms[37] + dx113*parms[44] + dx115*x86 + dx119*parms[39] + dx157 + dx86*x115
x159 = parms[42]*x137 + parms[43]*x108 + parms[45]*x115 + x138*x40 + x140*x90
dx159 = dx108*parms[43] + dx115*parms[45] + dx137*parms[42] + dx138*x40 + dx140*x90 + dx40*x138 + dx90*x140
x160 = parms[26]*x2 + parms[28]*x89 + parms[30]*x102 + x100*x103 + x13*x155 + x158*x9 + 0.00502*x159
dx160 = dx100*x103 + dx102*parms[30] + dx103*x100 + dx13*x155 + dx155*x13 + dx158*x9 + 0.00502*dx159 + dx2*parms[26] + dx89*parms[28] + dx9*x158
x161 = -x9
dx161 = -dx9
x162 = x13*x20
dx162 = dx13*x20 + dx20*x13
x163 = x162*x59 + x30*x9
dx163 = dx162*x59 + dx30*x9 + dx59*x162 + dx9*x30
x164 = x13*x40
dx164 = dx13*x40 + dx40*x13
x165 = -x164
dx165 = -dx164
x166 = -x163*x38 - x165*x37
dx166 = -dx163*x38 - dx165*x37 - dx37*x165 - dx38*x163
x167 = x163*x57 + x165*x38
dx167 = dx163*x57 + dx165*x38 + dx38*x165 + dx57*x163
x168 = 0.27747*x13 + 0.00502
dx168 = 0.27747*dx13
x169 = x168*x40
dx169 = dx168*x40 + dx40*x168
x170 = x169*x82
dx170 = dx169*x82 + dx82*x169
x171 = -x170
dx171 = -dx170
x172 = x169*x59
dx172 = dx169*x59 + dx59*x169
x173 = -x162*x30 - x29*x9
dx173 = -dx162*x30 - dx29*x9 - dx30*x162 - dx9*x29
x174 = -x173
dx174 = -dx173
x175 = x168*x90
dx175 = dx168*x90 + dx90*x168
x176 = -x175
dx176 = -dx175
x177 = x172*x57 + x176*x38
dx177 = dx172*x57 + dx176*x38 + dx38*x176 + dx57*x172
x178 = -parms[79]*x174 + parms[80]*x166 + parms[81]*x177
dx178 = dx166*parms[80] - dx174*parms[79] + dx177*parms[81]
x179 = -x172*x38 - x176*x37
dx179 = -dx172*x38 - dx176*x37 - dx37*x176 - dx38*x172
x180 = parms[78]*x174 - parms[80]*x167 + parms[81]*x179
dx180 = -dx167*parms[80] + dx174*parms[78] + dx179*parms[81]
x181 = parms[55]*x161 + parms[56]*x164 + parms[57]*x169 + x59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + x82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
dx181 = dx161*parms[55] - dx163*parms[68]*x82 + dx164*parms[56] + dx165*(parms[66]*x82 - parms[67]*x59) + dx166*parms[78]*x82 - dx167*parms[79]*x82 + dx169*parms[57] + dx170*parms[69]*x82 - dx171*parms[81]*x82 + dx172*parms[69]*x59 + dx173*parms[68]*x59 + dx178*x57*x59 + dx180*x59*x76 + dx57*x178*x59 + dx59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + dx76*x180*x59 + dx82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
x182 = parms[54]*x9 - parms[56]*x162 + parms[57]*x175 - parms[66]*x174 - parms[67]*x163 - parms[69]*x176 - x178*x38 - x180*x57
dx182 = -dx162*parms[56] - dx163*parms[67] - dx174*parms[66] + dx175*parms[57] - dx176*parms[69] - dx178*x38 - dx180*x57 - dx38*x178 - dx57*x180 + dx9*parms[54]
x183 = parms[74]*x167 + parms[76]*x166 + parms[77]*x174 + parms[78]*x179 + x177*x79
dx183 = dx166*parms[76] + dx167*parms[74] + dx174*parms[77] + dx177*x79 + dx179*parms[78] + dx79*x177
x184 = parms[61]*x163 + parms[63]*x173 + parms[64]*x165 + parms[68]*x172 + x176*x78 - x183
dx184 = dx163*parms[61] + dx165*parms[64] + dx172*parms[68] + dx173*parms[63] + dx176*x78 - dx183 + dx78*x176
x185 = parms[73]*x167 + parms[75]*x166 + parms[76]*x174 + parms[80]*x177 + x171*x68
dx185 = dx166*parms[75] + dx167*parms[73] + dx171*x68 + dx174*parms[76] + dx177*parms[80] + dx68*x171
x186 = parms[72]*x167 + parms[73]*x166 + parms[74]*x174 + parms[79]*x171 + x179*x70
dx186 = dx166*parms[73] + dx167*parms[72] + dx171*parms[79] + dx174*parms[74] + dx179*x70 + dx70*x179
x187 = parms[60]*x163 + parms[61]*x173 + parms[62]*x165 + parms[67]*x176 + parms[68]*x171 + x185*x76 + x186*x57
dx187 = dx163*parms[60] + dx165*parms[62] + dx171*parms[68] + dx173*parms[61] + dx176*parms[67] + dx185*x76 + dx186*x57 + dx57*x186 + dx76*x185
x188 = parms[50]*x162 + parms[52]*x164 + parms[53]*x9 + parms[54]*x175 + x156*x169 + x184*x59 + x187*x30
dx188 = dx156*x169 + dx162*parms[50] + dx164*parms[52] + dx169*x156 + dx175*parms[54] + dx184*x59 + dx187*x30 + dx30*x187 + dx59*x184 + dx9*parms[53]
x189 = parms[48]*x162 + parms[49]*x164 + parms[50]*x9 + parms[56]*x176 + x184*x82 + x187*x59
dx189 = dx162*parms[48] + dx164*parms[49] + dx176*parms[56] + dx184*x82 + dx187*x59 + dx59*x187 + dx82*x184 + dx9*parms[50]
x190 = parms[62]*x163 + parms[64]*x173 + parms[65]*x165 + parms[66]*x170 + x148*x172 + x185*x57 + x186*x38
dx190 = dx148*x172 + dx163*parms[62] + dx165*parms[65] + dx170*parms[66] + dx172*x148 + dx173*parms[64] + dx185*x57 + dx186*x38 + dx38*x186 + dx57*x185
x191 = parms[49]*x162 + parms[51]*x164 + parms[52]*x9 + parms[56]*x169 - x190
dx191 = dx162*parms[49] + dx164*parms[51] + dx169*parms[56] - dx190 + dx9*parms[52]
x192 = parms[38]*x13 + parms[40]*x9 - 0.27747*x181*x20 + x182*x91 + x189*x40 + x191*x90
dx192 = dx13*parms[38] - 0.27747*dx181*x20 + dx182*x91 + dx189*x40 + dx191*x90 - 0.27747*dx20*x181 + dx40*x189 + dx9*parms[40] + dx90*x191 + dx91*x182
x193 = x154*x82
dx193 = dx154*x82 + dx82*x154
x194 = -x193
dx194 = -dx193
x195 = x40*x82
dx195 = dx40*x82 + dx82*x40
x196 = -x195
dx196 = -dx195
x197 = x40*x59
dx197 = dx40*x59 + dx59*x40
x198 = -x90
dx198 = -dx90
x199 = x197*x57 + x198*x38
dx199 = dx197*x57 + dx198*x38 + dx38*x198 + dx57*x197
x200 = x154*x59
dx200 = dx154*x59 + dx59*x154
x201 = -x91
dx201 = -dx91
x202 = -x200*x38 - x201*x37
dx202 = -dx200*x38 - dx201*x37 - dx37*x201 - dx38*x200
x203 = -x197*x38 - x198*x37
dx203 = -dx197*x38 - dx198*x37 - dx37*x198 - dx38*x197
x204 = parms[72]*x199 + parms[73]*x203 + parms[74]*x196 + parms[79]*x194 + x202*x70
dx204 = dx194*parms[79] + dx196*parms[74] + dx199*parms[72] + dx202*x70 + dx203*parms[73] + dx70*x202
x205 = x200*x57 + x201*x38
dx205 = dx200*x57 + dx201*x38 + dx38*x201 + dx57*x200
x206 = parms[73]*x199 + parms[75]*x203 + parms[76]*x196 + parms[80]*x205 + x194*x68
dx206 = dx194*x68 + dx196*parms[76] + dx199*parms[73] + dx203*parms[75] + dx205*parms[80] + dx68*x194
x207 = parms[62]*x197 + parms[64]*x195 + parms[65]*x198 + parms[66]*x193 + x148*x200 + x204*x38 + x206*x57
dx207 = dx148*x200 + dx193*parms[66] + dx195*parms[64] + dx197*parms[62] + dx198*parms[65] + dx200*x148 + dx204*x38 + dx206*x57 + dx38*x204 + dx57*x206
x208 = parms[78]*x196 - parms[80]*x199 + parms[81]*x202
dx208 = dx196*parms[78] - dx199*parms[80] + dx202*parms[81]
x209 = -parms[79]*x196 + parms[80]*x203 + parms[81]*x205
dx209 = -dx196*parms[79] + dx203*parms[80] + dx205*parms[81]
x210 = parms[60]*x197 + parms[61]*x195 + parms[62]*x198 + parms[67]*x201 + parms[68]*x194 + x204*x57 + x206*x76
dx210 = dx194*parms[68] + dx195*parms[61] + dx197*parms[60] + dx198*parms[62] + dx201*parms[67] + dx204*x57 + dx206*x76 + dx57*x204 + dx76*x206
x211 = parms[74]*x199 + parms[76]*x203 + parms[77]*x196 + parms[78]*x202 + x205*x79
dx211 = dx196*parms[77] + dx199*parms[74] + dx202*parms[78] + dx203*parms[76] + dx205*x79 + dx79*x205
x212 = parms[61]*x197 + parms[63]*x195 + parms[64]*x198 + parms[68]*x200 + x201*x78 - x211
dx212 = dx195*parms[63] + dx197*parms[61] + dx198*parms[64] + dx200*parms[68] + dx201*x78 - dx211 + dx78*x201
x213 = parms[50]*x40 + parms[52]*x90 + parms[54]*x91 + x154*x156 + x210*x30 + x212*x59
dx213 = dx154*x156 + dx156*x154 + dx210*x30 + dx212*x59 + dx30*x210 + dx40*parms[50] + dx59*x212 + dx90*parms[52] + dx91*parms[54]
x214 = -x59
dx214 = -dx59
x215 = x30*x76
dx215 = dx30*x76 + dx76*x30
x216 = x30*x57
dx216 = dx30*x57 + dx57*x30
x217 = parms[72]*x216 + parms[73]*x215 + parms[74]*x214
dx217 = dx214*parms[74] + dx215*parms[73] + dx216*parms[72]
x218 = parms[73]*x216 + parms[75]*x215 + parms[76]*x214
dx218 = dx214*parms[76] + dx215*parms[75] + dx216*parms[73]
x219 = parms[74]*x216 + parms[76]*x215 + parms[77]*x214
dx219 = dx214*parms[77] + dx215*parms[76] + dx216*parms[74]
x220 = parms[62]*x30 + parms[64]*x59 + x217*x38 + x218*x57
dx220 = dx217*x38 + dx218*x57 + dx30*parms[62] + dx38*x217 + dx57*x218 + dx59*parms[64]
x221 = parms[74]*x38 + parms[76]*x57
dx221 = dx38*parms[74] + dx57*parms[76]
#
dMdq3_out[0] = dx0*(2*parms[12]*x0 + 2*parms[13]*x4 - 0.27857*x66 + x7*x96 + x88*x89 + 0.27857*x99) - dx101*x4 + dx4*(2*parms[13]*x0 + 2*parms[15]*x4 - x101 - 0.03175*x66 + 0.03175*x99) + dx66*(-0.27857*x0 - 0.03175*x4) + dx7*x0*x96 + dx88*x0*x89 + dx89*x0*x88 + dx96*x0*x7 + dx99*(0.27857*x0 + 0.03175*x4)
dMdq3_out[1] = dx104
dMdq3_out[2] = dx101
dMdq3_out[3] = dx93
dMdq3_out[4] = dx85
dMdq3_out[5] = dx72
dMdq3_out[6] = dx80
dMdq3_out[7] = dx104
dMdq3_out[8] = dx102*(parms[32]*x7 + 2*parms[33]*x102 - x159) + dx103*(2*parms[32]*x89 + 2*parms[33]*x103 + x135*x64 + x152*x9) - 0.03175*dx105*parms[30] + dx115*parms[32]*x2 + dx135*x103*x64 + dx136*(0.00502*x89 - 0.03175) - dx151*x89 + dx152*x103*x9 + dx153*(0.00502*x89 - 0.03175) + dx155*x2*x9 + dx158*x2*x64 - dx159*x102 + dx2*(2*parms[24]*x2 + 2*parms[25]*x89 - 0.0635*parms[31] + parms[32]*x115 + x155*x9 + x158*x64) + dx64*(x103*x135 + x158*x2) + dx7*parms[32]*x102 + dx89*(2*parms[25]*x2 + 2*parms[27]*x89 + 0.03175*parms[30] + 2*parms[32]*x103 + 0.00502*x136 - x151 + 0.00502*x153) + dx9*(x103*x152 + x155*x2)
dMdq3_out[9] = dx160
dMdq3_out[10] = dx151
dMdq3_out[11] = dx157
dMdq3_out[12] = dx149
dMdq3_out[13] = dx141
dMdq3_out[14] = dx101
dMdq3_out[15] = dx160
dMdq3_out[16] = dx13*(2*parms[36]*x13 + 2*parms[37]*x9 + 0.01004*parms[43] + x154*x182 + x181*x75 + x189*x20 + x191*x40) + dx154*x13*x182 + 0.00502*dx161*parms[42] + dx181*(x13*x75 + 0.00502*x40) + dx182*(x13*x154 + 0.00502*x90) + dx188*x9 + dx189*x13*x20 + dx191*x13*x40 + dx20*x13*x189 + dx40*(x13*x191 + 0.00502*x181) + dx75*x13*x181 + dx9*(2*parms[37]*x13 + 2*parms[39]*x9 - 0.00502*parms[42] + x188) + 0.00502*dx90*x182
dMdq3_out[17] = dx192
dMdq3_out[18] = dx188
dMdq3_out[19] = dx190
dMdq3_out[20] = dx183
dMdq3_out[21] = dx93
dMdq3_out[22] = dx151
dMdq3_out[23] = dx192
dMdq3_out[24] = dx154*(2*parms[56]*x90 + 2*parms[57]*x154 + x59*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x82*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194)) + dx193*parms[69]*x154*x82 - dx194*parms[81]*x154*x82 + dx195*parms[68]*x154*x59 - dx196*parms[66]*x91 + dx197*(-parms[67]*x91 - parms[68]*x154*x82) + dx198*x154*(parms[66]*x82 - parms[67]*x59) - dx199*parms[79]*x154*x82 + dx200*parms[69]*x154*x59 + dx201*(parms[56]*x40 - parms[69]*x91) + dx203*parms[78]*x154*x82 - dx207*x90 + dx208*(x154*x59*x76 - x57*x91) + dx209*(x154*x57*x59 - x38*x91) + dx210*x40*x59 + dx212*x40*x82 - dx38*x209*x91 + dx40*(2*parms[48]*x40 + 2*parms[49]*x90 + parms[56]*x201 - parms[56]*x91 + x210*x59 + x212*x82) + dx57*(x154*x209*x59 - x208*x91) + dx59*(x154*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x210*x40) + dx76*x154*x208*x59 + dx82*(x154*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194) + x212*x40) + dx90*(2*parms[49]*x40 + 2*parms[51]*x90 + 2*parms[56]*x154 - x207) + dx91*(-parms[56]*x40 + 2*parms[57]*x91 - parms[66]*x196 - parms[67]*x197 - parms[69]*x201 - x208*x57 - x209*x38)
dMdq3_out[25] = dx213
dMdq3_out[26] = dx207
dMdq3_out[27] = dx211
dMdq3_out[28] = dx85
dMdq3_out[29] = dx157
dMdq3_out[30] = dx188
dMdq3_out[31] = dx213
dMdq3_out[32] = dx217*x30*x57 + dx218*x30*x76 - dx219*x59 + dx30*(2*parms[60]*x30 + 2*parms[61]*x59 + x217*x57 + x218*x76) + dx57*x217*x30 + dx59*(2*parms[61]*x30 + 2*parms[63]*x59 - x219) + dx76*x218*x30
dMdq3_out[33] = dx220
dMdq3_out[34] = dx219
dMdq3_out[35] = dx72
dMdq3_out[36] = dx149
dMdq3_out[37] = dx190
dMdq3_out[38] = dx207
dMdq3_out[39] = dx220
dMdq3_out[40] = dx38*(2*parms[72]*x38 + 2*parms[73]*x57) + dx57*(2*parms[73]*x38 + 2*parms[75]*x57)
dMdq3_out[41] = dx221
dMdq3_out[42] = dx80
dMdq3_out[43] = dx141
dMdq3_out[44] = dx183
dMdq3_out[45] = dx211
dMdq3_out[46] = dx219
dMdq3_out[47] = dx221
dMdq3_out[48] = 0
#
return dMdq3_out
if jt_num == 4:
#
dMdq4_out = [0]*49
#
x0 = cos(q[1])
dx0 = 0
x1 = -x0
dx1 = -dx0
x2 = cos(q[2])
dx2 = 0
x3 = x1*x2
dx3 = dx1*x2 + dx2*x1
x4 = -sin(q[1])
dx4 = 0
x5 = -x4
dx5 = -dx4
x6 = 0.27857*x0 - 0.03175*x5
dx6 = 0.27857*dx0 - 0.03175*dx5
x7 = -x2
dx7 = -dx2
x8 = x6*x7
dx8 = dx6*x7 + dx7*x6
x9 = cos(q[3])
dx9 = 0
x10 = sin(q[2])
dx10 = 0
x11 = x1*x10
dx11 = dx1*x10 + dx10*x1
x12 = -x11
dx12 = -dx11
x13 = sin(q[3])
dx13 = 0
x14 = x12*x13 + x5*x9
dx14 = dx12*x13 + dx13*x12 + dx5*x9 + dx9*x5
x15 = -x3
dx15 = -dx3
x16 = -x15
dx16 = -dx15
x17 = -0.00502*x13*x15 + x8*x9
dx17 = -0.00502*dx13*x15 - 0.00502*dx15*x13 + dx8*x9 + dx9*x8
x18 = sin(q[4])
dx18 = cos(q[4])
x19 = 0.27747*x16 + x17
dx19 = 0.27747*dx16 + dx17
x20 = cos(q[4])
dx20 = -sin(q[4])
x21 = x10*x6
dx21 = dx10*x6 + dx6*x10
x22 = -x21
dx22 = -dx21
x23 = x22 + 0.00502*x5
dx23 = dx22 + 0.00502*dx5
x24 = x11*x9 + x13*x5
dx24 = dx11*x9 + dx13*x5 + dx5*x13 + dx9*x11
x25 = x23 + 0.27747*x24
dx25 = dx23 + 0.27747*dx24
x26 = -x18*x19 - x20*x25
dx26 = -dx18*x19 - dx19*x18 - dx20*x25 - dx25*x20
x27 = x16*x18 + x20*x24
dx27 = dx16*x18 + dx18*x16 + dx20*x24 + dx24*x20
x28 = -x27
dx28 = -dx27
x29 = sin(q[5])
dx29 = 0
x30 = cos(q[5])
dx30 = 0
x31 = x14*x30 + x28*x29
dx31 = dx14*x30 + dx28*x29 + dx29*x28 + dx30*x14
x32 = -x26
dx32 = -dx26
x33 = -x14*x29 - x27*x30
dx33 = -dx14*x29 - dx27*x30 - dx29*x14 - dx30*x27
x34 = -x33
dx34 = -dx33
x35 = -x15*x20 - x18*x24
dx35 = -dx15*x20 - dx18*x24 - dx20*x15 - dx24*x18
x36 = -x35
dx36 = -dx35
x37 = sin(q[6])
dx37 = 0
x38 = cos(q[6])
dx38 = 0
x39 = -x31*x38 - x36*x37
dx39 = -dx31*x38 - dx36*x37 - dx37*x36 - dx38*x31
x40 = -x18
dx40 = -dx18
x41 = x19*x20 + x25*x40
dx41 = dx19*x20 + dx20*x19 + dx25*x40 + dx40*x25
x42 = -x41
dx42 = -dx41
x43 = -x13*x8 - 0.00502*x15*x9
dx43 = -dx13*x8 - 0.00502*dx15*x9 - dx8*x13 - 0.00502*dx9*x15
x44 = x29*x42 + x30*x43
dx44 = dx29*x42 + dx30*x43 + dx42*x29 + dx43*x30
x45 = -x44
dx45 = -dx44
x46 = x32*x38 + x37*x45
dx46 = dx32*x38 + dx37*x45 + dx38*x32 + dx45*x37
x47 = -parms[79]*x34 + parms[80]*x39 + parms[81]*x46
dx47 = -dx34*parms[79] + dx39*parms[80] + dx46*parms[81]
x48 = -x32*x37 - x38*x44
dx48 = -dx32*x37 - dx37*x32 - dx38*x44 - dx44*x38
x49 = -x31
dx49 = -dx31
x50 = x36*x38 + x37*x49
dx50 = dx36*x38 + dx37*x49 + dx38*x36 + dx49*x37
x51 = -parms[78]*x34 + parms[80]*x50 - parms[81]*x48
dx51 = -dx34*parms[78] - dx48*parms[81] + dx50*parms[80]
x52 = parms[54]*x14 + parms[56]*x28 + parms[57]*x26 - parms[66]*x34 - parms[67]*x31 - parms[69]*x32 - x37*x51 - x38*x47
dx52 = dx14*parms[54] + dx26*parms[57] + dx28*parms[56] - dx31*parms[67] - dx32*parms[69] - dx34*parms[66] - dx37*x51 - dx38*x47 - dx47*x38 - dx51*x37
x53 = -x14
dx53 = -dx14
x54 = -x29*x43 - x30*x41
dx54 = -dx29*x43 - dx30*x41 - dx41*x30 - dx43*x29
x55 = -x54
dx55 = -dx54
x56 = -parms[66]*x36 - parms[68]*x49 - parms[69]*x54 - parms[78]*x39 + parms[79]*x50 + parms[81]*x55
dx56 = -dx36*parms[66] - dx39*parms[78] - dx49*parms[68] + dx50*parms[79] - dx54*parms[69] + dx55*parms[81]
x57 = -x37
dx57 = -dx37
x58 = -parms[67]*x36 + parms[68]*x33 + parms[69]*x44 + x38*x51 + x47*x57
dx58 = dx33*parms[68] - dx36*parms[67] + dx38*x51 + dx44*parms[69] + dx47*x57 + dx51*x38 + dx57*x47
x59 = -x29
dx59 = -dx29
x60 = parms[55]*x53 + parms[56]*x35 + parms[57]*x41 + x30*x56 + x58*x59
dx60 = dx30*x56 + dx35*parms[56] + dx41*parms[57] + dx53*parms[55] + dx56*x30 + dx58*x59 + dx59*x58
x61 = x20*x60
dx61 = dx20*x60 + dx60*x20
x62 = parms[43]*x16 + parms[44]*x14 + parms[45]*x17 + x40*x52 + x61
dx62 = dx14*parms[44] + dx16*parms[43] + dx17*parms[45] + dx40*x52 + dx52*x40 + dx61
x63 = parms[42]*x15 - parms[44]*x24 + parms[45]*x43 + parms[54]*x36 + parms[55]*x27 + parms[57]*x43 + x29*x56 + x30*x58
dx63 = dx15*parms[42] - dx24*parms[44] + dx27*parms[55] + dx29*x56 + dx30*x58 + dx36*parms[54] + dx43*(parms[45] + parms[57]) + dx56*x29 + dx58*x30
x64 = -x13
dx64 = -dx13
x65 = -parms[31]*x5 + parms[32]*x3 + parms[33]*x8 + x62*x9 + x63*x64
dx65 = dx3*parms[32] - dx5*parms[31] + dx62*x9 + dx63*x64 + dx64*x63 + dx8*parms[33] + dx9*x62
x66 = x2*x65
dx66 = dx2*x65 + dx65*x2
x67 = -x43
dx67 = -dx43
x68 = -parms[78]
dx68 = 0
x69 = parms[73]*x50 + parms[75]*x39 + parms[76]*x34 + parms[80]*x46 + x55*x68
dx69 = dx34*parms[76] + dx39*parms[75] + dx46*parms[80] + dx50*parms[73] + dx55*x68 + dx68*x55
x70 = -parms[80]
dx70 = 0
x71 = parms[72]*x50 + parms[73]*x39 + parms[74]*x34 + parms[79]*x55 + x48*x70
dx71 = dx34*parms[74] + dx39*parms[73] + dx48*x70 + dx50*parms[72] + dx55*parms[79] + dx70*x48
x72 = parms[62]*x31 + parms[64]*x33 + parms[65]*x36 + parms[66]*x54 + parms[67]*x45 + x38*x71 + x57*x69
dx72 = dx31*parms[62] + dx33*parms[64] + dx36*parms[65] + dx38*x71 + dx45*parms[67] + dx54*parms[66] + dx57*x69 + dx69*x57 + dx71*x38
x73 = parms[49]*x27 + parms[51]*x35 + parms[52]*x14 + parms[54]*x67 + parms[56]*x41 - x72
dx73 = dx14*parms[52] + dx27*parms[49] + dx35*parms[51] + dx41*parms[56] + dx67*parms[54] - dx72
x74 = x20*x52
dx74 = dx20*x52 + dx52*x20
x75 = -0.27747*x18
dx75 = -0.27747*dx18
x76 = -x38
dx76 = -dx38
x77 = parms[60]*x31 + parms[61]*x33 + parms[62]*x36 + parms[67]*x32 + parms[68]*x55 + x57*x71 + x69*x76
dx77 = dx31*parms[60] + dx32*parms[67] + dx33*parms[61] + dx36*parms[62] + dx55*parms[68] + dx57*x71 + dx69*x76 + dx71*x57 + dx76*x69
x78 = -parms[66]
dx78 = 0
x79 = -parms[79]
dx79 = 0
x80 = parms[74]*x50 + parms[76]*x39 + parms[77]*x34 + parms[78]*x48 + x46*x79
dx80 = dx34*parms[77] + dx39*parms[76] + dx46*x79 + dx48*parms[78] + dx50*parms[74] + dx79*x46
x81 = parms[61]*x31 + parms[63]*x33 + parms[64]*x36 + parms[68]*x44 + x32*x78 - x80
dx81 = dx31*parms[61] + dx32*x78 + dx33*parms[63] + dx36*parms[64] + dx44*parms[68] + dx78*x32 - dx80
x82 = -x30
dx82 = -dx30
x83 = parms[48]*x27 + parms[49]*x35 + parms[50]*x14 + parms[55]*x43 + parms[56]*x32 + x59*x77 + x81*x82
dx83 = dx14*parms[50] + dx27*parms[48] + dx32*parms[56] + dx35*parms[49] + dx43*parms[55] + dx59*x77 + dx77*x59 + dx81*x82 + dx82*x81
x84 = parms[36]*x24 + parms[37]*x14 + parms[38]*x15 + parms[43]*x23 + parms[44]*x67 + x20*x83 + x40*x73 + x60*x75 - 0.27747*x74
dx84 = dx14*parms[37] + dx15*parms[38] + dx20*x83 + dx23*parms[43] + dx24*parms[36] + dx40*x73 + dx60*x75 + dx67*parms[44] + dx73*x40 - 0.27747*dx74 + dx75*x60 + dx83*x20
x85 = parms[50]*x27 + parms[52]*x35 + parms[53]*x14 + parms[54]*x26 + parms[55]*x42 + x30*x77 + x59*x81
dx85 = dx14*parms[53] + dx26*parms[54] + dx27*parms[50] + dx30*x77 + dx35*parms[52] + dx42*parms[55] + dx59*x81 + dx77*x30 + dx81*x59
x86 = -parms[42]
dx86 = 0
x87 = parms[37]*x24 + parms[39]*x14 + parms[40]*x15 + parms[44]*x17 + x23*x86 + x85
dx87 = dx14*parms[39] + dx15*parms[40] + dx17*parms[44] + dx23*x86 + dx24*parms[37] + dx85 + dx86*x23
x88 = parms[24]*x11 + parms[25]*x3 + parms[26]*x5 + parms[32]*x22 + x64*x87 + x84*x9
dx88 = dx11*parms[24] + dx22*parms[32] + dx3*parms[25] + dx5*parms[26] + dx64*x87 + dx84*x9 + dx87*x64 + dx9*x84
x89 = -x10
dx89 = -dx10
x90 = -x20
dx90 = -dx20
x91 = 0.27747*x18
dx91 = 0.27747*dx18
x92 = -parms[43]
dx92 = 0
x93 = parms[38]*x24 + parms[40]*x14 + parms[41]*x15 + parms[42]*x43 + x17*x92 + x40*x83 + x52*x91 - 0.27747*x61 + x73*x90
dx93 = dx14*parms[40] + dx15*parms[41] + dx17*x92 + dx24*parms[38] + dx40*x83 + dx43*parms[42] + dx52*x91 - 0.27747*dx61 + dx73*x90 + dx83*x40 + dx90*x73 + dx91*x52 + dx92*x17
x94 = x13*x62
dx94 = dx13*x62 + dx62*x13
x95 = x63*x9
dx95 = dx63*x9 + dx9*x63
x96 = parms[25]*x11 + parms[27]*x3 + parms[28]*x5 + parms[32]*x8 - x93 + 0.00502*x94 + 0.00502*x95
dx96 = dx11*parms[25] + dx3*parms[27] + dx5*parms[28] + dx8*parms[32] - dx93 + 0.00502*dx94 + 0.00502*dx95
x97 = parms[42]*x53 + parms[43]*x24 + parms[45]*x23 + x40*x60 - x74
dx97 = dx23*parms[45] + dx24*parms[43] + dx40*x60 + dx53*parms[42] + dx60*x40 - dx74
x98 = parms[30]*x5 + parms[32]*x12 + parms[33]*x21 - x97
dx98 = dx12*parms[32] + dx21*parms[33] + dx5*parms[30] - dx97
x99 = x10*x98
dx99 = dx10*x98 + dx98*x10
x100 = -parms[31]
dx100 = 0
x101 = parms[26]*x11 + parms[28]*x3 + parms[29]*x5 + parms[30]*x21 + x100*x8 + x13*x84 + x87*x9 + 0.00502*x97
dx101 = dx100*x8 + dx11*parms[26] + dx13*x84 + dx21*parms[30] + dx3*parms[28] + dx5*parms[29] + dx8*x100 + dx84*x13 + dx87*x9 + dx9*x87 + 0.00502*dx97
x102 = -0.27857*x2
dx102 = -0.27857*dx2
x103 = -0.27857*x10
dx103 = -0.27857*dx10
x104 = parms[14]*x0 + parms[16]*x4 - 0.03175*parms[30]*x15 - 0.03175*parms[31]*x11 + x102*x98 + x103*x65 + x2*x88 + x89*x96 - 0.03175*x94 - 0.03175*x95
dx104 = dx0*parms[14] + dx102*x98 + dx103*x65 - 0.03175*dx11*parms[31] - 0.03175*dx15*parms[30] + dx2*x88 + dx4*parms[16] + dx65*x103 + dx88*x2 + dx89*x96 - 0.03175*dx94 - 0.03175*dx95 + dx96*x89 + dx98*x102
x105 = -x89
dx105 = -dx89
x106 = 0.00502*x105 + 0.03175
dx106 = 0.00502*dx105
x107 = -x103*x13 - x106*x9
dx107 = -dx103*x13 - dx106*x9 - dx13*x103 - dx9*x106
x108 = x2*x9
dx108 = dx2*x9 + dx9*x2
x109 = -x105*x20 - x108*x18
dx109 = -dx105*x20 - dx108*x18 - dx18*x108 - dx20*x105
x110 = -x109
dx110 = -dx109
x111 = x105*x40 + x108*x20
dx111 = dx105*x40 + dx108*x20 + dx20*x108 + dx40*x105
x112 = -x105
dx112 = -dx105
x113 = x103*x9 + x106*x64
dx113 = dx103*x9 + dx106*x64 + dx64*x106 + dx9*x103
x114 = 0.27747*x112 + x113
dx114 = 0.27747*dx112 + dx113
x115 = -x102
dx115 = -dx102
x116 = 0.27747*x108 + x115
dx116 = 0.27747*dx108 + dx115
x117 = x114*x20 + x116*x40
dx117 = dx114*x20 + dx116*x40 + dx20*x114 + dx40*x116
x118 = x107*x30 + x117*x59
dx118 = dx107*x30 + dx117*x59 + dx30*x107 + dx59*x117
x119 = x2*x64
dx119 = dx2*x64 + dx64*x2
x120 = -x111*x30 - x119*x29
dx120 = -dx111*x30 - dx119*x29 - dx29*x119 - dx30*x111
x121 = -x120
dx121 = -dx120
x122 = -x114*x18 - x116*x20
dx122 = -dx114*x18 - dx116*x20 - dx18*x114 - dx20*x116
x123 = -x122
dx123 = -dx122
x124 = x118*x57 + x123*x38
dx124 = dx118*x57 + dx123*x38 + dx38*x123 + dx57*x118
x125 = x111*x59 + x119*x30
dx125 = dx111*x59 + dx119*x30 + dx30*x119 + dx59*x111
x126 = -x110*x37 - x125*x38
dx126 = -dx110*x37 - dx125*x38 - dx37*x110 - dx38*x125
x127 = -parms[79]*x121 + parms[80]*x126 + parms[81]*x124
dx127 = -dx121*parms[79] + dx124*parms[81] + dx126*parms[80]
x128 = x110*x38 + x125*x57
dx128 = dx110*x38 + dx125*x57 + dx38*x110 + dx57*x125
x129 = -x118*x38 - x123*x37
dx129 = -dx118*x38 - dx123*x37 - dx37*x123 - dx38*x118
x130 = parms[78]*x121 - parms[80]*x128 + parms[81]*x129
dx130 = dx121*parms[78] - dx128*parms[80] + dx129*parms[81]
x131 = -parms[67]*x110 + parms[68]*x120 + parms[69]*x118 + x127*x57 + x130*x76
dx131 = -dx110*parms[67] + dx118*parms[69] + dx120*parms[68] + dx127*x57 + dx130*x76 + dx57*x127 + dx76*x130
x132 = -x107*x29 - x117*x30
dx132 = -dx107*x29 - dx117*x30 - dx29*x107 - dx30*x117
x133 = -x132
dx133 = -dx132
x134 = parms[66]*x110 - parms[68]*x125 + parms[69]*x132 + parms[78]*x126 - parms[79]*x128 - parms[81]*x133
dx134 = dx110*parms[66] - dx125*parms[68] + dx126*parms[78] - dx128*parms[79] + dx132*parms[69] - dx133*parms[81]
x135 = parms[42]*x105 - parms[44]*x108 + parms[45]*x107 + parms[54]*x110 + parms[55]*x111 + parms[57]*x107 + x131*x30 + x134*x59
dx135 = dx105*parms[42] + dx107*(parms[45] + parms[57]) - dx108*parms[44] + dx110*parms[54] + dx111*parms[55] + dx131*x30 + dx134*x59 + dx30*x131 + dx59*x134
x136 = x135*x9
dx136 = dx135*x9 + dx9*x135
x137 = -x119
dx137 = -dx119
x138 = parms[55]*x137 + parms[56]*x109 + parms[57]*x117 + x131*x59 + x134*x82
dx138 = dx109*parms[56] + dx117*parms[57] + dx131*x59 + dx134*x82 + dx137*parms[55] + dx59*x131 + dx82*x134
x139 = x138*x20
dx139 = dx138*x20 + dx20*x138
x140 = parms[54]*x119 - parms[56]*x111 + parms[57]*x122 - parms[66]*x121 - parms[67]*x125 - parms[69]*x123 - x127*x38 - x130*x57
dx140 = -dx111*parms[56] + dx119*parms[54] - dx121*parms[66] + dx122*parms[57] - dx123*parms[69] - dx125*parms[67] - dx127*x38 - dx130*x57 - dx38*x127 - dx57*x130
x141 = parms[74]*x128 + parms[76]*x126 + parms[77]*x121 + parms[78]*x129 + x124*x79
dx141 = dx121*parms[77] + dx124*x79 + dx126*parms[76] + dx128*parms[74] + dx129*parms[78] + dx79*x124
x142 = parms[61]*x125 + parms[63]*x120 + parms[64]*x110 + parms[68]*x118 + x123*x78 - x141
dx142 = dx110*parms[64] + dx118*parms[68] + dx120*parms[63] + dx123*x78 + dx125*parms[61] - dx141 + dx78*x123
x143 = parms[72]*x128 + parms[73]*x126 + parms[74]*x121 + parms[79]*x133 + x129*x70
dx143 = dx121*parms[74] + dx126*parms[73] + dx128*parms[72] + dx129*x70 + dx133*parms[79] + dx70*x129
x144 = parms[73]*x128 + parms[75]*x126 + parms[76]*x121 + parms[80]*x124 + x133*x68
dx144 = dx121*parms[76] + dx124*parms[80] + dx126*parms[75] + dx128*parms[73] + dx133*x68 + dx68*x133
x145 = parms[60]*x125 + parms[61]*x120 + parms[62]*x110 + parms[67]*x123 + parms[68]*x133 + x143*x57 + x144*x76
dx145 = dx110*parms[62] + dx120*parms[61] + dx123*parms[67] + dx125*parms[60] + dx133*parms[68] + dx143*x57 + dx144*x76 + dx57*x143 + dx76*x144
x146 = parms[48]*x111 + parms[49]*x109 + parms[50]*x119 + parms[55]*x107 + parms[56]*x123 + x142*x82 + x145*x59
dx146 = dx107*parms[55] + dx109*parms[49] + dx111*parms[48] + dx119*parms[50] + dx123*parms[56] + dx142*x82 + dx145*x59 + dx59*x145 + dx82*x142
x147 = -x107
dx147 = -dx107
x148 = -parms[67]
dx148 = 0
x149 = parms[62]*x125 + parms[64]*x120 + parms[65]*x110 + parms[66]*x132 + x118*x148 + x143*x38 + x144*x57
dx149 = dx110*parms[65] + dx118*x148 + dx120*parms[64] + dx125*parms[62] + dx132*parms[66] + dx143*x38 + dx144*x57 + dx148*x118 + dx38*x143 + dx57*x144
x150 = parms[49]*x111 + parms[51]*x109 + parms[52]*x119 + parms[54]*x147 + parms[56]*x117 - x149
dx150 = dx109*parms[51] + dx111*parms[49] + dx117*parms[56] + dx119*parms[52] + dx147*parms[54] - dx149
x151 = parms[38]*x108 + parms[40]*x119 + parms[41]*x105 + parms[42]*x107 + x113*x92 - 0.27747*x139 + x140*x91 + x146*x40 + x150*x90
dx151 = dx105*parms[41] + dx107*parms[42] + dx108*parms[38] + dx113*x92 + dx119*parms[40] - 0.27747*dx139 + dx140*x91 + dx146*x40 + dx150*x90 + dx40*x146 + dx90*x150 + dx91*x140 + dx92*x113
x152 = parms[43]*x112 + parms[44]*x119 + parms[45]*x113 + x139 + x140*x40
dx152 = dx112*parms[43] + dx113*parms[45] + dx119*parms[44] + dx139 + dx140*x40 + dx40*x140
x153 = x13*x152
dx153 = dx13*x152 + dx152*x13
x154 = -0.27747*x20
dx154 = -0.27747*dx20
x155 = parms[36]*x108 + parms[37]*x119 + parms[38]*x105 + parms[43]*x115 + parms[44]*x147 + x138*x75 + x140*x154 + x146*x20 + x150*x40
dx155 = dx105*parms[38] + dx108*parms[36] + dx115*parms[43] + dx119*parms[37] + dx138*x75 + dx140*x154 + dx146*x20 + dx147*parms[44] + dx150*x40 + dx154*x140 + dx20*x146 + dx40*x150 + dx75*x138
x156 = -parms[55]
dx156 = 0
x157 = parms[50]*x111 + parms[52]*x109 + parms[53]*x119 + parms[54]*x122 + x117*x156 + x142*x59 + x145*x30
dx157 = dx109*parms[52] + dx111*parms[50] + dx117*x156 + dx119*parms[53] + dx122*parms[54] + dx142*x59 + dx145*x30 + dx156*x117 + dx30*x145 + dx59*x142
x158 = parms[37]*x108 + parms[39]*x119 + parms[40]*x105 + parms[44]*x113 + x115*x86 + x157
dx158 = dx105*parms[40] + dx108*parms[37] + dx113*parms[44] + dx115*x86 + dx119*parms[39] + dx157 + dx86*x115
x159 = parms[42]*x137 + parms[43]*x108 + parms[45]*x115 + x138*x40 + x140*x90
dx159 = dx108*parms[43] + dx115*parms[45] + dx137*parms[42] + dx138*x40 + dx140*x90 + dx40*x138 + dx90*x140
x160 = parms[26]*x2 + parms[28]*x89 + parms[30]*x102 + x100*x103 + x13*x155 + x158*x9 + 0.00502*x159
dx160 = dx100*x103 + dx102*parms[30] + dx103*x100 + dx13*x155 + dx155*x13 + dx158*x9 + 0.00502*dx159 + dx2*parms[26] + dx89*parms[28] + dx9*x158
x161 = -x9
dx161 = -dx9
x162 = x13*x20
dx162 = dx13*x20 + dx20*x13
x163 = x162*x59 + x30*x9
dx163 = dx162*x59 + dx30*x9 + dx59*x162 + dx9*x30
x164 = x13*x40
dx164 = dx13*x40 + dx40*x13
x165 = -x164
dx165 = -dx164
x166 = -x163*x38 - x165*x37
dx166 = -dx163*x38 - dx165*x37 - dx37*x165 - dx38*x163
x167 = x163*x57 + x165*x38
dx167 = dx163*x57 + dx165*x38 + dx38*x165 + dx57*x163
x168 = 0.27747*x13 + 0.00502
dx168 = 0.27747*dx13
x169 = x168*x40
dx169 = dx168*x40 + dx40*x168
x170 = x169*x82
dx170 = dx169*x82 + dx82*x169
x171 = -x170
dx171 = -dx170
x172 = x169*x59
dx172 = dx169*x59 + dx59*x169
x173 = -x162*x30 - x29*x9
dx173 = -dx162*x30 - dx29*x9 - dx30*x162 - dx9*x29
x174 = -x173
dx174 = -dx173
x175 = x168*x90
dx175 = dx168*x90 + dx90*x168
x176 = -x175
dx176 = -dx175
x177 = x172*x57 + x176*x38
dx177 = dx172*x57 + dx176*x38 + dx38*x176 + dx57*x172
x178 = -parms[79]*x174 + parms[80]*x166 + parms[81]*x177
dx178 = dx166*parms[80] - dx174*parms[79] + dx177*parms[81]
x179 = -x172*x38 - x176*x37
dx179 = -dx172*x38 - dx176*x37 - dx37*x176 - dx38*x172
x180 = parms[78]*x174 - parms[80]*x167 + parms[81]*x179
dx180 = -dx167*parms[80] + dx174*parms[78] + dx179*parms[81]
x181 = parms[55]*x161 + parms[56]*x164 + parms[57]*x169 + x59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + x82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
dx181 = dx161*parms[55] - dx163*parms[68]*x82 + dx164*parms[56] + dx165*(parms[66]*x82 - parms[67]*x59) + dx166*parms[78]*x82 - dx167*parms[79]*x82 + dx169*parms[57] + dx170*parms[69]*x82 - dx171*parms[81]*x82 + dx172*parms[69]*x59 + dx173*parms[68]*x59 + dx178*x57*x59 + dx180*x59*x76 + dx57*x178*x59 + dx59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + dx76*x180*x59 + dx82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
x182 = parms[54]*x9 - parms[56]*x162 + parms[57]*x175 - parms[66]*x174 - parms[67]*x163 - parms[69]*x176 - x178*x38 - x180*x57
dx182 = -dx162*parms[56] - dx163*parms[67] - dx174*parms[66] + dx175*parms[57] - dx176*parms[69] - dx178*x38 - dx180*x57 - dx38*x178 - dx57*x180 + dx9*parms[54]
x183 = parms[74]*x167 + parms[76]*x166 + parms[77]*x174 + parms[78]*x179 + x177*x79
dx183 = dx166*parms[76] + dx167*parms[74] + dx174*parms[77] + dx177*x79 + dx179*parms[78] + dx79*x177
x184 = parms[61]*x163 + parms[63]*x173 + parms[64]*x165 + parms[68]*x172 + x176*x78 - x183
dx184 = dx163*parms[61] + dx165*parms[64] + dx172*parms[68] + dx173*parms[63] + dx176*x78 - dx183 + dx78*x176
x185 = parms[73]*x167 + parms[75]*x166 + parms[76]*x174 + parms[80]*x177 + x171*x68
dx185 = dx166*parms[75] + dx167*parms[73] + dx171*x68 + dx174*parms[76] + dx177*parms[80] + dx68*x171
x186 = parms[72]*x167 + parms[73]*x166 + parms[74]*x174 + parms[79]*x171 + x179*x70
dx186 = dx166*parms[73] + dx167*parms[72] + dx171*parms[79] + dx174*parms[74] + dx179*x70 + dx70*x179
x187 = parms[60]*x163 + parms[61]*x173 + parms[62]*x165 + parms[67]*x176 + parms[68]*x171 + x185*x76 + x186*x57
dx187 = dx163*parms[60] + dx165*parms[62] + dx171*parms[68] + dx173*parms[61] + dx176*parms[67] + dx185*x76 + dx186*x57 + dx57*x186 + dx76*x185
x188 = parms[50]*x162 + parms[52]*x164 + parms[53]*x9 + parms[54]*x175 + x156*x169 + x184*x59 + x187*x30
dx188 = dx156*x169 + dx162*parms[50] + dx164*parms[52] + dx169*x156 + dx175*parms[54] + dx184*x59 + dx187*x30 + dx30*x187 + dx59*x184 + dx9*parms[53]
x189 = parms[48]*x162 + parms[49]*x164 + parms[50]*x9 + parms[56]*x176 + x184*x82 + x187*x59
dx189 = dx162*parms[48] + dx164*parms[49] + dx176*parms[56] + dx184*x82 + dx187*x59 + dx59*x187 + dx82*x184 + dx9*parms[50]
x190 = parms[62]*x163 + parms[64]*x173 + parms[65]*x165 + parms[66]*x170 + x148*x172 + x185*x57 + x186*x38
dx190 = dx148*x172 + dx163*parms[62] + dx165*parms[65] + dx170*parms[66] + dx172*x148 + dx173*parms[64] + dx185*x57 + dx186*x38 + dx38*x186 + dx57*x185
x191 = parms[49]*x162 + parms[51]*x164 + parms[52]*x9 + parms[56]*x169 - x190
dx191 = dx162*parms[49] + dx164*parms[51] + dx169*parms[56] - dx190 + dx9*parms[52]
x192 = parms[38]*x13 + parms[40]*x9 - 0.27747*x181*x20 + x182*x91 + x189*x40 + x191*x90
dx192 = dx13*parms[38] - 0.27747*dx181*x20 + dx182*x91 + dx189*x40 + dx191*x90 - 0.27747*dx20*x181 + dx40*x189 + dx9*parms[40] + dx90*x191 + dx91*x182
x193 = x154*x82
dx193 = dx154*x82 + dx82*x154
x194 = -x193
dx194 = -dx193
x195 = x40*x82
dx195 = dx40*x82 + dx82*x40
x196 = -x195
dx196 = -dx195
x197 = x40*x59
dx197 = dx40*x59 + dx59*x40
x198 = -x90
dx198 = -dx90
x199 = x197*x57 + x198*x38
dx199 = dx197*x57 + dx198*x38 + dx38*x198 + dx57*x197
x200 = x154*x59
dx200 = dx154*x59 + dx59*x154
x201 = -x91
dx201 = -dx91
x202 = -x200*x38 - x201*x37
dx202 = -dx200*x38 - dx201*x37 - dx37*x201 - dx38*x200
x203 = -x197*x38 - x198*x37
dx203 = -dx197*x38 - dx198*x37 - dx37*x198 - dx38*x197
x204 = parms[72]*x199 + parms[73]*x203 + parms[74]*x196 + parms[79]*x194 + x202*x70
dx204 = dx194*parms[79] + dx196*parms[74] + dx199*parms[72] + dx202*x70 + dx203*parms[73] + dx70*x202
x205 = x200*x57 + x201*x38
dx205 = dx200*x57 + dx201*x38 + dx38*x201 + dx57*x200
x206 = parms[73]*x199 + parms[75]*x203 + parms[76]*x196 + parms[80]*x205 + x194*x68
dx206 = dx194*x68 + dx196*parms[76] + dx199*parms[73] + dx203*parms[75] + dx205*parms[80] + dx68*x194
x207 = parms[62]*x197 + parms[64]*x195 + parms[65]*x198 + parms[66]*x193 + x148*x200 + x204*x38 + x206*x57
dx207 = dx148*x200 + dx193*parms[66] + dx195*parms[64] + dx197*parms[62] + dx198*parms[65] + dx200*x148 + dx204*x38 + dx206*x57 + dx38*x204 + dx57*x206
x208 = parms[78]*x196 - parms[80]*x199 + parms[81]*x202
dx208 = dx196*parms[78] - dx199*parms[80] + dx202*parms[81]
x209 = -parms[79]*x196 + parms[80]*x203 + parms[81]*x205
dx209 = -dx196*parms[79] + dx203*parms[80] + dx205*parms[81]
x210 = parms[60]*x197 + parms[61]*x195 + parms[62]*x198 + parms[67]*x201 + parms[68]*x194 + x204*x57 + x206*x76
dx210 = dx194*parms[68] + dx195*parms[61] + dx197*parms[60] + dx198*parms[62] + dx201*parms[67] + dx204*x57 + dx206*x76 + dx57*x204 + dx76*x206
x211 = parms[74]*x199 + parms[76]*x203 + parms[77]*x196 + parms[78]*x202 + x205*x79
dx211 = dx196*parms[77] + dx199*parms[74] + dx202*parms[78] + dx203*parms[76] + dx205*x79 + dx79*x205
x212 = parms[61]*x197 + parms[63]*x195 + parms[64]*x198 + parms[68]*x200 + x201*x78 - x211
dx212 = dx195*parms[63] + dx197*parms[61] + dx198*parms[64] + dx200*parms[68] + dx201*x78 - dx211 + dx78*x201
x213 = parms[50]*x40 + parms[52]*x90 + parms[54]*x91 + x154*x156 + x210*x30 + x212*x59
dx213 = dx154*x156 + dx156*x154 + dx210*x30 + dx212*x59 + dx30*x210 + dx40*parms[50] + dx59*x212 + dx90*parms[52] + dx91*parms[54]
x214 = -x59
dx214 = -dx59
x215 = x30*x76
dx215 = dx30*x76 + dx76*x30
x216 = x30*x57
dx216 = dx30*x57 + dx57*x30
x217 = parms[72]*x216 + parms[73]*x215 + parms[74]*x214
dx217 = dx214*parms[74] + dx215*parms[73] + dx216*parms[72]
x218 = parms[73]*x216 + parms[75]*x215 + parms[76]*x214
dx218 = dx214*parms[76] + dx215*parms[75] + dx216*parms[73]
x219 = parms[74]*x216 + parms[76]*x215 + parms[77]*x214
dx219 = dx214*parms[77] + dx215*parms[76] + dx216*parms[74]
x220 = parms[62]*x30 + parms[64]*x59 + x217*x38 + x218*x57
dx220 = dx217*x38 + dx218*x57 + dx30*parms[62] + dx38*x217 + dx57*x218 + dx59*parms[64]
x221 = parms[74]*x38 + parms[76]*x57
dx221 = dx38*parms[74] + dx57*parms[76]
#
dMdq4_out[0] = dx0*(2*parms[12]*x0 + 2*parms[13]*x4 - 0.27857*x66 + x7*x96 + x88*x89 + 0.27857*x99) - dx101*x4 + dx4*(2*parms[13]*x0 + 2*parms[15]*x4 - x101 - 0.03175*x66 + 0.03175*x99) + dx66*(-0.27857*x0 - 0.03175*x4) + dx7*x0*x96 + dx88*x0*x89 + dx89*x0*x88 + dx96*x0*x7 + dx99*(0.27857*x0 + 0.03175*x4)
dMdq4_out[1] = dx104
dMdq4_out[2] = dx101
dMdq4_out[3] = dx93
dMdq4_out[4] = dx85
dMdq4_out[5] = dx72
dMdq4_out[6] = dx80
dMdq4_out[7] = dx104
dMdq4_out[8] = dx102*(parms[32]*x7 + 2*parms[33]*x102 - x159) + dx103*(2*parms[32]*x89 + 2*parms[33]*x103 + x135*x64 + x152*x9) - 0.03175*dx105*parms[30] + dx115*parms[32]*x2 + dx135*x103*x64 + dx136*(0.00502*x89 - 0.03175) - dx151*x89 + dx152*x103*x9 + dx153*(0.00502*x89 - 0.03175) + dx155*x2*x9 + dx158*x2*x64 - dx159*x102 + dx2*(2*parms[24]*x2 + 2*parms[25]*x89 - 0.0635*parms[31] + parms[32]*x115 + x155*x9 + x158*x64) + dx64*(x103*x135 + x158*x2) + dx7*parms[32]*x102 + dx89*(2*parms[25]*x2 + 2*parms[27]*x89 + 0.03175*parms[30] + 2*parms[32]*x103 + 0.00502*x136 - x151 + 0.00502*x153) + dx9*(x103*x152 + x155*x2)
dMdq4_out[9] = dx160
dMdq4_out[10] = dx151
dMdq4_out[11] = dx157
dMdq4_out[12] = dx149
dMdq4_out[13] = dx141
dMdq4_out[14] = dx101
dMdq4_out[15] = dx160
dMdq4_out[16] = dx13*(2*parms[36]*x13 + 2*parms[37]*x9 + 0.01004*parms[43] + x154*x182 + x181*x75 + x189*x20 + x191*x40) + dx154*x13*x182 + 0.00502*dx161*parms[42] + dx181*(x13*x75 + 0.00502*x40) + dx182*(x13*x154 + 0.00502*x90) + dx188*x9 + dx189*x13*x20 + dx191*x13*x40 + dx20*x13*x189 + dx40*(x13*x191 + 0.00502*x181) + dx75*x13*x181 + dx9*(2*parms[37]*x13 + 2*parms[39]*x9 - 0.00502*parms[42] + x188) + 0.00502*dx90*x182
dMdq4_out[17] = dx192
dMdq4_out[18] = dx188
dMdq4_out[19] = dx190
dMdq4_out[20] = dx183
dMdq4_out[21] = dx93
dMdq4_out[22] = dx151
dMdq4_out[23] = dx192
dMdq4_out[24] = dx154*(2*parms[56]*x90 + 2*parms[57]*x154 + x59*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x82*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194)) + dx193*parms[69]*x154*x82 - dx194*parms[81]*x154*x82 + dx195*parms[68]*x154*x59 - dx196*parms[66]*x91 + dx197*(-parms[67]*x91 - parms[68]*x154*x82) + dx198*x154*(parms[66]*x82 - parms[67]*x59) - dx199*parms[79]*x154*x82 + dx200*parms[69]*x154*x59 + dx201*(parms[56]*x40 - parms[69]*x91) + dx203*parms[78]*x154*x82 - dx207*x90 + dx208*(x154*x59*x76 - x57*x91) + dx209*(x154*x57*x59 - x38*x91) + dx210*x40*x59 + dx212*x40*x82 - dx38*x209*x91 + dx40*(2*parms[48]*x40 + 2*parms[49]*x90 + parms[56]*x201 - parms[56]*x91 + x210*x59 + x212*x82) + dx57*(x154*x209*x59 - x208*x91) + dx59*(x154*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x210*x40) + dx76*x154*x208*x59 + dx82*(x154*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194) + x212*x40) + dx90*(2*parms[49]*x40 + 2*parms[51]*x90 + 2*parms[56]*x154 - x207) + dx91*(-parms[56]*x40 + 2*parms[57]*x91 - parms[66]*x196 - parms[67]*x197 - parms[69]*x201 - x208*x57 - x209*x38)
dMdq4_out[25] = dx213
dMdq4_out[26] = dx207
dMdq4_out[27] = dx211
dMdq4_out[28] = dx85
dMdq4_out[29] = dx157
dMdq4_out[30] = dx188
dMdq4_out[31] = dx213
dMdq4_out[32] = dx217*x30*x57 + dx218*x30*x76 - dx219*x59 + dx30*(2*parms[60]*x30 + 2*parms[61]*x59 + x217*x57 + x218*x76) + dx57*x217*x30 + dx59*(2*parms[61]*x30 + 2*parms[63]*x59 - x219) + dx76*x218*x30
dMdq4_out[33] = dx220
dMdq4_out[34] = dx219
dMdq4_out[35] = dx72
dMdq4_out[36] = dx149
dMdq4_out[37] = dx190
dMdq4_out[38] = dx207
dMdq4_out[39] = dx220
dMdq4_out[40] = dx38*(2*parms[72]*x38 + 2*parms[73]*x57) + dx57*(2*parms[73]*x38 + 2*parms[75]*x57)
dMdq4_out[41] = dx221
dMdq4_out[42] = dx80
dMdq4_out[43] = dx141
dMdq4_out[44] = dx183
dMdq4_out[45] = dx211
dMdq4_out[46] = dx219
dMdq4_out[47] = dx221
dMdq4_out[48] = 0
#
return dMdq4_out
if jt_num == 5:
#
dMdq5_out = [0]*49
#
x0 = cos(q[1])
dx0 = 0
x1 = -x0
dx1 = -dx0
x2 = cos(q[2])
dx2 = 0
x3 = x1*x2
dx3 = dx1*x2 + dx2*x1
x4 = -sin(q[1])
dx4 = 0
x5 = -x4
dx5 = -dx4
x6 = 0.27857*x0 - 0.03175*x5
dx6 = 0.27857*dx0 - 0.03175*dx5
x7 = -x2
dx7 = -dx2
x8 = x6*x7
dx8 = dx6*x7 + dx7*x6
x9 = cos(q[3])
dx9 = 0
x10 = sin(q[2])
dx10 = 0
x11 = x1*x10
dx11 = dx1*x10 + dx10*x1
x12 = -x11
dx12 = -dx11
x13 = sin(q[3])
dx13 = 0
x14 = x12*x13 + x5*x9
dx14 = dx12*x13 + dx13*x12 + dx5*x9 + dx9*x5
x15 = -x3
dx15 = -dx3
x16 = -x15
dx16 = -dx15
x17 = -0.00502*x13*x15 + x8*x9
dx17 = -0.00502*dx13*x15 - 0.00502*dx15*x13 + dx8*x9 + dx9*x8
x18 = sin(q[4])
dx18 = 0
x19 = 0.27747*x16 + x17
dx19 = 0.27747*dx16 + dx17
x20 = cos(q[4])
dx20 = 0
x21 = x10*x6
dx21 = dx10*x6 + dx6*x10
x22 = -x21
dx22 = -dx21
x23 = x22 + 0.00502*x5
dx23 = dx22 + 0.00502*dx5
x24 = x11*x9 + x13*x5
dx24 = dx11*x9 + dx13*x5 + dx5*x13 + dx9*x11
x25 = x23 + 0.27747*x24
dx25 = dx23 + 0.27747*dx24
x26 = -x18*x19 - x20*x25
dx26 = -dx18*x19 - dx19*x18 - dx20*x25 - dx25*x20
x27 = x16*x18 + x20*x24
dx27 = dx16*x18 + dx18*x16 + dx20*x24 + dx24*x20
x28 = -x27
dx28 = -dx27
x29 = sin(q[5])
dx29 = cos(q[5])
x30 = cos(q[5])
dx30 = -sin(q[5])
x31 = x14*x30 + x28*x29
dx31 = dx14*x30 + dx28*x29 + dx29*x28 + dx30*x14
x32 = -x26
dx32 = -dx26
x33 = -x14*x29 - x27*x30
dx33 = -dx14*x29 - dx27*x30 - dx29*x14 - dx30*x27
x34 = -x33
dx34 = -dx33
x35 = -x15*x20 - x18*x24
dx35 = -dx15*x20 - dx18*x24 - dx20*x15 - dx24*x18
x36 = -x35
dx36 = -dx35
x37 = sin(q[6])
dx37 = 0
x38 = cos(q[6])
dx38 = 0
x39 = -x31*x38 - x36*x37
dx39 = -dx31*x38 - dx36*x37 - dx37*x36 - dx38*x31
x40 = -x18
dx40 = -dx18
x41 = x19*x20 + x25*x40
dx41 = dx19*x20 + dx20*x19 + dx25*x40 + dx40*x25
x42 = -x41
dx42 = -dx41
x43 = -x13*x8 - 0.00502*x15*x9
dx43 = -dx13*x8 - 0.00502*dx15*x9 - dx8*x13 - 0.00502*dx9*x15
x44 = x29*x42 + x30*x43
dx44 = dx29*x42 + dx30*x43 + dx42*x29 + dx43*x30
x45 = -x44
dx45 = -dx44
x46 = x32*x38 + x37*x45
dx46 = dx32*x38 + dx37*x45 + dx38*x32 + dx45*x37
x47 = -parms[79]*x34 + parms[80]*x39 + parms[81]*x46
dx47 = -dx34*parms[79] + dx39*parms[80] + dx46*parms[81]
x48 = -x32*x37 - x38*x44
dx48 = -dx32*x37 - dx37*x32 - dx38*x44 - dx44*x38
x49 = -x31
dx49 = -dx31
x50 = x36*x38 + x37*x49
dx50 = dx36*x38 + dx37*x49 + dx38*x36 + dx49*x37
x51 = -parms[78]*x34 + parms[80]*x50 - parms[81]*x48
dx51 = -dx34*parms[78] - dx48*parms[81] + dx50*parms[80]
x52 = parms[54]*x14 + parms[56]*x28 + parms[57]*x26 - parms[66]*x34 - parms[67]*x31 - parms[69]*x32 - x37*x51 - x38*x47
dx52 = dx14*parms[54] + dx26*parms[57] + dx28*parms[56] - dx31*parms[67] - dx32*parms[69] - dx34*parms[66] - dx37*x51 - dx38*x47 - dx47*x38 - dx51*x37
x53 = -x14
dx53 = -dx14
x54 = -x29*x43 - x30*x41
dx54 = -dx29*x43 - dx30*x41 - dx41*x30 - dx43*x29
x55 = -x54
dx55 = -dx54
x56 = -parms[66]*x36 - parms[68]*x49 - parms[69]*x54 - parms[78]*x39 + parms[79]*x50 + parms[81]*x55
dx56 = -dx36*parms[66] - dx39*parms[78] - dx49*parms[68] + dx50*parms[79] - dx54*parms[69] + dx55*parms[81]
x57 = -x37
dx57 = -dx37
x58 = -parms[67]*x36 + parms[68]*x33 + parms[69]*x44 + x38*x51 + x47*x57
dx58 = dx33*parms[68] - dx36*parms[67] + dx38*x51 + dx44*parms[69] + dx47*x57 + dx51*x38 + dx57*x47
x59 = -x29
dx59 = -dx29
x60 = parms[55]*x53 + parms[56]*x35 + parms[57]*x41 + x30*x56 + x58*x59
dx60 = dx30*x56 + dx35*parms[56] + dx41*parms[57] + dx53*parms[55] + dx56*x30 + dx58*x59 + dx59*x58
x61 = x20*x60
dx61 = dx20*x60 + dx60*x20
x62 = parms[43]*x16 + parms[44]*x14 + parms[45]*x17 + x40*x52 + x61
dx62 = dx14*parms[44] + dx16*parms[43] + dx17*parms[45] + dx40*x52 + dx52*x40 + dx61
x63 = parms[42]*x15 - parms[44]*x24 + parms[45]*x43 + parms[54]*x36 + parms[55]*x27 + parms[57]*x43 + x29*x56 + x30*x58
dx63 = dx15*parms[42] - dx24*parms[44] + dx27*parms[55] + dx29*x56 + dx30*x58 + dx36*parms[54] + dx43*(parms[45] + parms[57]) + dx56*x29 + dx58*x30
x64 = -x13
dx64 = -dx13
x65 = -parms[31]*x5 + parms[32]*x3 + parms[33]*x8 + x62*x9 + x63*x64
dx65 = dx3*parms[32] - dx5*parms[31] + dx62*x9 + dx63*x64 + dx64*x63 + dx8*parms[33] + dx9*x62
x66 = x2*x65
dx66 = dx2*x65 + dx65*x2
x67 = -x43
dx67 = -dx43
x68 = -parms[78]
dx68 = 0
x69 = parms[73]*x50 + parms[75]*x39 + parms[76]*x34 + parms[80]*x46 + x55*x68
dx69 = dx34*parms[76] + dx39*parms[75] + dx46*parms[80] + dx50*parms[73] + dx55*x68 + dx68*x55
x70 = -parms[80]
dx70 = 0
x71 = parms[72]*x50 + parms[73]*x39 + parms[74]*x34 + parms[79]*x55 + x48*x70
dx71 = dx34*parms[74] + dx39*parms[73] + dx48*x70 + dx50*parms[72] + dx55*parms[79] + dx70*x48
x72 = parms[62]*x31 + parms[64]*x33 + parms[65]*x36 + parms[66]*x54 + parms[67]*x45 + x38*x71 + x57*x69
dx72 = dx31*parms[62] + dx33*parms[64] + dx36*parms[65] + dx38*x71 + dx45*parms[67] + dx54*parms[66] + dx57*x69 + dx69*x57 + dx71*x38
x73 = parms[49]*x27 + parms[51]*x35 + parms[52]*x14 + parms[54]*x67 + parms[56]*x41 - x72
dx73 = dx14*parms[52] + dx27*parms[49] + dx35*parms[51] + dx41*parms[56] + dx67*parms[54] - dx72
x74 = x20*x52
dx74 = dx20*x52 + dx52*x20
x75 = -0.27747*x18
dx75 = -0.27747*dx18
x76 = -x38
dx76 = -dx38
x77 = parms[60]*x31 + parms[61]*x33 + parms[62]*x36 + parms[67]*x32 + parms[68]*x55 + x57*x71 + x69*x76
dx77 = dx31*parms[60] + dx32*parms[67] + dx33*parms[61] + dx36*parms[62] + dx55*parms[68] + dx57*x71 + dx69*x76 + dx71*x57 + dx76*x69
x78 = -parms[66]
dx78 = 0
x79 = -parms[79]
dx79 = 0
x80 = parms[74]*x50 + parms[76]*x39 + parms[77]*x34 + parms[78]*x48 + x46*x79
dx80 = dx34*parms[77] + dx39*parms[76] + dx46*x79 + dx48*parms[78] + dx50*parms[74] + dx79*x46
x81 = parms[61]*x31 + parms[63]*x33 + parms[64]*x36 + parms[68]*x44 + x32*x78 - x80
dx81 = dx31*parms[61] + dx32*x78 + dx33*parms[63] + dx36*parms[64] + dx44*parms[68] + dx78*x32 - dx80
x82 = -x30
dx82 = -dx30
x83 = parms[48]*x27 + parms[49]*x35 + parms[50]*x14 + parms[55]*x43 + parms[56]*x32 + x59*x77 + x81*x82
dx83 = dx14*parms[50] + dx27*parms[48] + dx32*parms[56] + dx35*parms[49] + dx43*parms[55] + dx59*x77 + dx77*x59 + dx81*x82 + dx82*x81
x84 = parms[36]*x24 + parms[37]*x14 + parms[38]*x15 + parms[43]*x23 + parms[44]*x67 + x20*x83 + x40*x73 + x60*x75 - 0.27747*x74
dx84 = dx14*parms[37] + dx15*parms[38] + dx20*x83 + dx23*parms[43] + dx24*parms[36] + dx40*x73 + dx60*x75 + dx67*parms[44] + dx73*x40 - 0.27747*dx74 + dx75*x60 + dx83*x20
x85 = parms[50]*x27 + parms[52]*x35 + parms[53]*x14 + parms[54]*x26 + parms[55]*x42 + x30*x77 + x59*x81
dx85 = dx14*parms[53] + dx26*parms[54] + dx27*parms[50] + dx30*x77 + dx35*parms[52] + dx42*parms[55] + dx59*x81 + dx77*x30 + dx81*x59
x86 = -parms[42]
dx86 = 0
x87 = parms[37]*x24 + parms[39]*x14 + parms[40]*x15 + parms[44]*x17 + x23*x86 + x85
dx87 = dx14*parms[39] + dx15*parms[40] + dx17*parms[44] + dx23*x86 + dx24*parms[37] + dx85 + dx86*x23
x88 = parms[24]*x11 + parms[25]*x3 + parms[26]*x5 + parms[32]*x22 + x64*x87 + x84*x9
dx88 = dx11*parms[24] + dx22*parms[32] + dx3*parms[25] + dx5*parms[26] + dx64*x87 + dx84*x9 + dx87*x64 + dx9*x84
x89 = -x10
dx89 = -dx10
x90 = -x20
dx90 = -dx20
x91 = 0.27747*x18
dx91 = 0.27747*dx18
x92 = -parms[43]
dx92 = 0
x93 = parms[38]*x24 + parms[40]*x14 + parms[41]*x15 + parms[42]*x43 + x17*x92 + x40*x83 + x52*x91 - 0.27747*x61 + x73*x90
dx93 = dx14*parms[40] + dx15*parms[41] + dx17*x92 + dx24*parms[38] + dx40*x83 + dx43*parms[42] + dx52*x91 - 0.27747*dx61 + dx73*x90 + dx83*x40 + dx90*x73 + dx91*x52 + dx92*x17
x94 = x13*x62
dx94 = dx13*x62 + dx62*x13
x95 = x63*x9
dx95 = dx63*x9 + dx9*x63
x96 = parms[25]*x11 + parms[27]*x3 + parms[28]*x5 + parms[32]*x8 - x93 + 0.00502*x94 + 0.00502*x95
dx96 = dx11*parms[25] + dx3*parms[27] + dx5*parms[28] + dx8*parms[32] - dx93 + 0.00502*dx94 + 0.00502*dx95
x97 = parms[42]*x53 + parms[43]*x24 + parms[45]*x23 + x40*x60 - x74
dx97 = dx23*parms[45] + dx24*parms[43] + dx40*x60 + dx53*parms[42] + dx60*x40 - dx74
x98 = parms[30]*x5 + parms[32]*x12 + parms[33]*x21 - x97
dx98 = dx12*parms[32] + dx21*parms[33] + dx5*parms[30] - dx97
x99 = x10*x98
dx99 = dx10*x98 + dx98*x10
x100 = -parms[31]
dx100 = 0
x101 = parms[26]*x11 + parms[28]*x3 + parms[29]*x5 + parms[30]*x21 + x100*x8 + x13*x84 + x87*x9 + 0.00502*x97
dx101 = dx100*x8 + dx11*parms[26] + dx13*x84 + dx21*parms[30] + dx3*parms[28] + dx5*parms[29] + dx8*x100 + dx84*x13 + dx87*x9 + dx9*x87 + 0.00502*dx97
x102 = -0.27857*x2
dx102 = -0.27857*dx2
x103 = -0.27857*x10
dx103 = -0.27857*dx10
x104 = parms[14]*x0 + parms[16]*x4 - 0.03175*parms[30]*x15 - 0.03175*parms[31]*x11 + x102*x98 + x103*x65 + x2*x88 + x89*x96 - 0.03175*x94 - 0.03175*x95
dx104 = dx0*parms[14] + dx102*x98 + dx103*x65 - 0.03175*dx11*parms[31] - 0.03175*dx15*parms[30] + dx2*x88 + dx4*parms[16] + dx65*x103 + dx88*x2 + dx89*x96 - 0.03175*dx94 - 0.03175*dx95 + dx96*x89 + dx98*x102
x105 = -x89
dx105 = -dx89
x106 = 0.00502*x105 + 0.03175
dx106 = 0.00502*dx105
x107 = -x103*x13 - x106*x9
dx107 = -dx103*x13 - dx106*x9 - dx13*x103 - dx9*x106
x108 = x2*x9
dx108 = dx2*x9 + dx9*x2
x109 = -x105*x20 - x108*x18
dx109 = -dx105*x20 - dx108*x18 - dx18*x108 - dx20*x105
x110 = -x109
dx110 = -dx109
x111 = x105*x40 + x108*x20
dx111 = dx105*x40 + dx108*x20 + dx20*x108 + dx40*x105
x112 = -x105
dx112 = -dx105
x113 = x103*x9 + x106*x64
dx113 = dx103*x9 + dx106*x64 + dx64*x106 + dx9*x103
x114 = 0.27747*x112 + x113
dx114 = 0.27747*dx112 + dx113
x115 = -x102
dx115 = -dx102
x116 = 0.27747*x108 + x115
dx116 = 0.27747*dx108 + dx115
x117 = x114*x20 + x116*x40
dx117 = dx114*x20 + dx116*x40 + dx20*x114 + dx40*x116
x118 = x107*x30 + x117*x59
dx118 = dx107*x30 + dx117*x59 + dx30*x107 + dx59*x117
x119 = x2*x64
dx119 = dx2*x64 + dx64*x2
x120 = -x111*x30 - x119*x29
dx120 = -dx111*x30 - dx119*x29 - dx29*x119 - dx30*x111
x121 = -x120
dx121 = -dx120
x122 = -x114*x18 - x116*x20
dx122 = -dx114*x18 - dx116*x20 - dx18*x114 - dx20*x116
x123 = -x122
dx123 = -dx122
x124 = x118*x57 + x123*x38
dx124 = dx118*x57 + dx123*x38 + dx38*x123 + dx57*x118
x125 = x111*x59 + x119*x30
dx125 = dx111*x59 + dx119*x30 + dx30*x119 + dx59*x111
x126 = -x110*x37 - x125*x38
dx126 = -dx110*x37 - dx125*x38 - dx37*x110 - dx38*x125
x127 = -parms[79]*x121 + parms[80]*x126 + parms[81]*x124
dx127 = -dx121*parms[79] + dx124*parms[81] + dx126*parms[80]
x128 = x110*x38 + x125*x57
dx128 = dx110*x38 + dx125*x57 + dx38*x110 + dx57*x125
x129 = -x118*x38 - x123*x37
dx129 = -dx118*x38 - dx123*x37 - dx37*x123 - dx38*x118
x130 = parms[78]*x121 - parms[80]*x128 + parms[81]*x129
dx130 = dx121*parms[78] - dx128*parms[80] + dx129*parms[81]
x131 = -parms[67]*x110 + parms[68]*x120 + parms[69]*x118 + x127*x57 + x130*x76
dx131 = -dx110*parms[67] + dx118*parms[69] + dx120*parms[68] + dx127*x57 + dx130*x76 + dx57*x127 + dx76*x130
x132 = -x107*x29 - x117*x30
dx132 = -dx107*x29 - dx117*x30 - dx29*x107 - dx30*x117
x133 = -x132
dx133 = -dx132
x134 = parms[66]*x110 - parms[68]*x125 + parms[69]*x132 + parms[78]*x126 - parms[79]*x128 - parms[81]*x133
dx134 = dx110*parms[66] - dx125*parms[68] + dx126*parms[78] - dx128*parms[79] + dx132*parms[69] - dx133*parms[81]
x135 = parms[42]*x105 - parms[44]*x108 + parms[45]*x107 + parms[54]*x110 + parms[55]*x111 + parms[57]*x107 + x131*x30 + x134*x59
dx135 = dx105*parms[42] + dx107*(parms[45] + parms[57]) - dx108*parms[44] + dx110*parms[54] + dx111*parms[55] + dx131*x30 + dx134*x59 + dx30*x131 + dx59*x134
x136 = x135*x9
dx136 = dx135*x9 + dx9*x135
x137 = -x119
dx137 = -dx119
x138 = parms[55]*x137 + parms[56]*x109 + parms[57]*x117 + x131*x59 + x134*x82
dx138 = dx109*parms[56] + dx117*parms[57] + dx131*x59 + dx134*x82 + dx137*parms[55] + dx59*x131 + dx82*x134
x139 = x138*x20
dx139 = dx138*x20 + dx20*x138
x140 = parms[54]*x119 - parms[56]*x111 + parms[57]*x122 - parms[66]*x121 - parms[67]*x125 - parms[69]*x123 - x127*x38 - x130*x57
dx140 = -dx111*parms[56] + dx119*parms[54] - dx121*parms[66] + dx122*parms[57] - dx123*parms[69] - dx125*parms[67] - dx127*x38 - dx130*x57 - dx38*x127 - dx57*x130
x141 = parms[74]*x128 + parms[76]*x126 + parms[77]*x121 + parms[78]*x129 + x124*x79
dx141 = dx121*parms[77] + dx124*x79 + dx126*parms[76] + dx128*parms[74] + dx129*parms[78] + dx79*x124
x142 = parms[61]*x125 + parms[63]*x120 + parms[64]*x110 + parms[68]*x118 + x123*x78 - x141
dx142 = dx110*parms[64] + dx118*parms[68] + dx120*parms[63] + dx123*x78 + dx125*parms[61] - dx141 + dx78*x123
x143 = parms[72]*x128 + parms[73]*x126 + parms[74]*x121 + parms[79]*x133 + x129*x70
dx143 = dx121*parms[74] + dx126*parms[73] + dx128*parms[72] + dx129*x70 + dx133*parms[79] + dx70*x129
x144 = parms[73]*x128 + parms[75]*x126 + parms[76]*x121 + parms[80]*x124 + x133*x68
dx144 = dx121*parms[76] + dx124*parms[80] + dx126*parms[75] + dx128*parms[73] + dx133*x68 + dx68*x133
x145 = parms[60]*x125 + parms[61]*x120 + parms[62]*x110 + parms[67]*x123 + parms[68]*x133 + x143*x57 + x144*x76
dx145 = dx110*parms[62] + dx120*parms[61] + dx123*parms[67] + dx125*parms[60] + dx133*parms[68] + dx143*x57 + dx144*x76 + dx57*x143 + dx76*x144
x146 = parms[48]*x111 + parms[49]*x109 + parms[50]*x119 + parms[55]*x107 + parms[56]*x123 + x142*x82 + x145*x59
dx146 = dx107*parms[55] + dx109*parms[49] + dx111*parms[48] + dx119*parms[50] + dx123*parms[56] + dx142*x82 + dx145*x59 + dx59*x145 + dx82*x142
x147 = -x107
dx147 = -dx107
x148 = -parms[67]
dx148 = 0
x149 = parms[62]*x125 + parms[64]*x120 + parms[65]*x110 + parms[66]*x132 + x118*x148 + x143*x38 + x144*x57
dx149 = dx110*parms[65] + dx118*x148 + dx120*parms[64] + dx125*parms[62] + dx132*parms[66] + dx143*x38 + dx144*x57 + dx148*x118 + dx38*x143 + dx57*x144
x150 = parms[49]*x111 + parms[51]*x109 + parms[52]*x119 + parms[54]*x147 + parms[56]*x117 - x149
dx150 = dx109*parms[51] + dx111*parms[49] + dx117*parms[56] + dx119*parms[52] + dx147*parms[54] - dx149
x151 = parms[38]*x108 + parms[40]*x119 + parms[41]*x105 + parms[42]*x107 + x113*x92 - 0.27747*x139 + x140*x91 + x146*x40 + x150*x90
dx151 = dx105*parms[41] + dx107*parms[42] + dx108*parms[38] + dx113*x92 + dx119*parms[40] - 0.27747*dx139 + dx140*x91 + dx146*x40 + dx150*x90 + dx40*x146 + dx90*x150 + dx91*x140 + dx92*x113
x152 = parms[43]*x112 + parms[44]*x119 + parms[45]*x113 + x139 + x140*x40
dx152 = dx112*parms[43] + dx113*parms[45] + dx119*parms[44] + dx139 + dx140*x40 + dx40*x140
x153 = x13*x152
dx153 = dx13*x152 + dx152*x13
x154 = -0.27747*x20
dx154 = -0.27747*dx20
x155 = parms[36]*x108 + parms[37]*x119 + parms[38]*x105 + parms[43]*x115 + parms[44]*x147 + x138*x75 + x140*x154 + x146*x20 + x150*x40
dx155 = dx105*parms[38] + dx108*parms[36] + dx115*parms[43] + dx119*parms[37] + dx138*x75 + dx140*x154 + dx146*x20 + dx147*parms[44] + dx150*x40 + dx154*x140 + dx20*x146 + dx40*x150 + dx75*x138
x156 = -parms[55]
dx156 = 0
x157 = parms[50]*x111 + parms[52]*x109 + parms[53]*x119 + parms[54]*x122 + x117*x156 + x142*x59 + x145*x30
dx157 = dx109*parms[52] + dx111*parms[50] + dx117*x156 + dx119*parms[53] + dx122*parms[54] + dx142*x59 + dx145*x30 + dx156*x117 + dx30*x145 + dx59*x142
x158 = parms[37]*x108 + parms[39]*x119 + parms[40]*x105 + parms[44]*x113 + x115*x86 + x157
dx158 = dx105*parms[40] + dx108*parms[37] + dx113*parms[44] + dx115*x86 + dx119*parms[39] + dx157 + dx86*x115
x159 = parms[42]*x137 + parms[43]*x108 + parms[45]*x115 + x138*x40 + x140*x90
dx159 = dx108*parms[43] + dx115*parms[45] + dx137*parms[42] + dx138*x40 + dx140*x90 + dx40*x138 + dx90*x140
x160 = parms[26]*x2 + parms[28]*x89 + parms[30]*x102 + x100*x103 + x13*x155 + x158*x9 + 0.00502*x159
dx160 = dx100*x103 + dx102*parms[30] + dx103*x100 + dx13*x155 + dx155*x13 + dx158*x9 + 0.00502*dx159 + dx2*parms[26] + dx89*parms[28] + dx9*x158
x161 = -x9
dx161 = -dx9
x162 = x13*x20
dx162 = dx13*x20 + dx20*x13
x163 = x162*x59 + x30*x9
dx163 = dx162*x59 + dx30*x9 + dx59*x162 + dx9*x30
x164 = x13*x40
dx164 = dx13*x40 + dx40*x13
x165 = -x164
dx165 = -dx164
x166 = -x163*x38 - x165*x37
dx166 = -dx163*x38 - dx165*x37 - dx37*x165 - dx38*x163
x167 = x163*x57 + x165*x38
dx167 = dx163*x57 + dx165*x38 + dx38*x165 + dx57*x163
x168 = 0.27747*x13 + 0.00502
dx168 = 0.27747*dx13
x169 = x168*x40
dx169 = dx168*x40 + dx40*x168
x170 = x169*x82
dx170 = dx169*x82 + dx82*x169
x171 = -x170
dx171 = -dx170
x172 = x169*x59
dx172 = dx169*x59 + dx59*x169
x173 = -x162*x30 - x29*x9
dx173 = -dx162*x30 - dx29*x9 - dx30*x162 - dx9*x29
x174 = -x173
dx174 = -dx173
x175 = x168*x90
dx175 = dx168*x90 + dx90*x168
x176 = -x175
dx176 = -dx175
x177 = x172*x57 + x176*x38
dx177 = dx172*x57 + dx176*x38 + dx38*x176 + dx57*x172
x178 = -parms[79]*x174 + parms[80]*x166 + parms[81]*x177
dx178 = dx166*parms[80] - dx174*parms[79] + dx177*parms[81]
x179 = -x172*x38 - x176*x37
dx179 = -dx172*x38 - dx176*x37 - dx37*x176 - dx38*x172
x180 = parms[78]*x174 - parms[80]*x167 + parms[81]*x179
dx180 = -dx167*parms[80] + dx174*parms[78] + dx179*parms[81]
x181 = parms[55]*x161 + parms[56]*x164 + parms[57]*x169 + x59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + x82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
dx181 = dx161*parms[55] - dx163*parms[68]*x82 + dx164*parms[56] + dx165*(parms[66]*x82 - parms[67]*x59) + dx166*parms[78]*x82 - dx167*parms[79]*x82 + dx169*parms[57] + dx170*parms[69]*x82 - dx171*parms[81]*x82 + dx172*parms[69]*x59 + dx173*parms[68]*x59 + dx178*x57*x59 + dx180*x59*x76 + dx57*x178*x59 + dx59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + dx76*x180*x59 + dx82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
x182 = parms[54]*x9 - parms[56]*x162 + parms[57]*x175 - parms[66]*x174 - parms[67]*x163 - parms[69]*x176 - x178*x38 - x180*x57
dx182 = -dx162*parms[56] - dx163*parms[67] - dx174*parms[66] + dx175*parms[57] - dx176*parms[69] - dx178*x38 - dx180*x57 - dx38*x178 - dx57*x180 + dx9*parms[54]
x183 = parms[74]*x167 + parms[76]*x166 + parms[77]*x174 + parms[78]*x179 + x177*x79
dx183 = dx166*parms[76] + dx167*parms[74] + dx174*parms[77] + dx177*x79 + dx179*parms[78] + dx79*x177
x184 = parms[61]*x163 + parms[63]*x173 + parms[64]*x165 + parms[68]*x172 + x176*x78 - x183
dx184 = dx163*parms[61] + dx165*parms[64] + dx172*parms[68] + dx173*parms[63] + dx176*x78 - dx183 + dx78*x176
x185 = parms[73]*x167 + parms[75]*x166 + parms[76]*x174 + parms[80]*x177 + x171*x68
dx185 = dx166*parms[75] + dx167*parms[73] + dx171*x68 + dx174*parms[76] + dx177*parms[80] + dx68*x171
x186 = parms[72]*x167 + parms[73]*x166 + parms[74]*x174 + parms[79]*x171 + x179*x70
dx186 = dx166*parms[73] + dx167*parms[72] + dx171*parms[79] + dx174*parms[74] + dx179*x70 + dx70*x179
x187 = parms[60]*x163 + parms[61]*x173 + parms[62]*x165 + parms[67]*x176 + parms[68]*x171 + x185*x76 + x186*x57
dx187 = dx163*parms[60] + dx165*parms[62] + dx171*parms[68] + dx173*parms[61] + dx176*parms[67] + dx185*x76 + dx186*x57 + dx57*x186 + dx76*x185
x188 = parms[50]*x162 + parms[52]*x164 + parms[53]*x9 + parms[54]*x175 + x156*x169 + x184*x59 + x187*x30
dx188 = dx156*x169 + dx162*parms[50] + dx164*parms[52] + dx169*x156 + dx175*parms[54] + dx184*x59 + dx187*x30 + dx30*x187 + dx59*x184 + dx9*parms[53]
x189 = parms[48]*x162 + parms[49]*x164 + parms[50]*x9 + parms[56]*x176 + x184*x82 + x187*x59
dx189 = dx162*parms[48] + dx164*parms[49] + dx176*parms[56] + dx184*x82 + dx187*x59 + dx59*x187 + dx82*x184 + dx9*parms[50]
x190 = parms[62]*x163 + parms[64]*x173 + parms[65]*x165 + parms[66]*x170 + x148*x172 + x185*x57 + x186*x38
dx190 = dx148*x172 + dx163*parms[62] + dx165*parms[65] + dx170*parms[66] + dx172*x148 + dx173*parms[64] + dx185*x57 + dx186*x38 + dx38*x186 + dx57*x185
x191 = parms[49]*x162 + parms[51]*x164 + parms[52]*x9 + parms[56]*x169 - x190
dx191 = dx162*parms[49] + dx164*parms[51] + dx169*parms[56] - dx190 + dx9*parms[52]
x192 = parms[38]*x13 + parms[40]*x9 - 0.27747*x181*x20 + x182*x91 + x189*x40 + x191*x90
dx192 = dx13*parms[38] - 0.27747*dx181*x20 + dx182*x91 + dx189*x40 + dx191*x90 - 0.27747*dx20*x181 + dx40*x189 + dx9*parms[40] + dx90*x191 + dx91*x182
x193 = x154*x82
dx193 = dx154*x82 + dx82*x154
x194 = -x193
dx194 = -dx193
x195 = x40*x82
dx195 = dx40*x82 + dx82*x40
x196 = -x195
dx196 = -dx195
x197 = x40*x59
dx197 = dx40*x59 + dx59*x40
x198 = -x90
dx198 = -dx90
x199 = x197*x57 + x198*x38
dx199 = dx197*x57 + dx198*x38 + dx38*x198 + dx57*x197
x200 = x154*x59
dx200 = dx154*x59 + dx59*x154
x201 = -x91
dx201 = -dx91
x202 = -x200*x38 - x201*x37
dx202 = -dx200*x38 - dx201*x37 - dx37*x201 - dx38*x200
x203 = -x197*x38 - x198*x37
dx203 = -dx197*x38 - dx198*x37 - dx37*x198 - dx38*x197
x204 = parms[72]*x199 + parms[73]*x203 + parms[74]*x196 + parms[79]*x194 + x202*x70
dx204 = dx194*parms[79] + dx196*parms[74] + dx199*parms[72] + dx202*x70 + dx203*parms[73] + dx70*x202
x205 = x200*x57 + x201*x38
dx205 = dx200*x57 + dx201*x38 + dx38*x201 + dx57*x200
x206 = parms[73]*x199 + parms[75]*x203 + parms[76]*x196 + parms[80]*x205 + x194*x68
dx206 = dx194*x68 + dx196*parms[76] + dx199*parms[73] + dx203*parms[75] + dx205*parms[80] + dx68*x194
x207 = parms[62]*x197 + parms[64]*x195 + parms[65]*x198 + parms[66]*x193 + x148*x200 + x204*x38 + x206*x57
dx207 = dx148*x200 + dx193*parms[66] + dx195*parms[64] + dx197*parms[62] + dx198*parms[65] + dx200*x148 + dx204*x38 + dx206*x57 + dx38*x204 + dx57*x206
x208 = parms[78]*x196 - parms[80]*x199 + parms[81]*x202
dx208 = dx196*parms[78] - dx199*parms[80] + dx202*parms[81]
x209 = -parms[79]*x196 + parms[80]*x203 + parms[81]*x205
dx209 = -dx196*parms[79] + dx203*parms[80] + dx205*parms[81]
x210 = parms[60]*x197 + parms[61]*x195 + parms[62]*x198 + parms[67]*x201 + parms[68]*x194 + x204*x57 + x206*x76
dx210 = dx194*parms[68] + dx195*parms[61] + dx197*parms[60] + dx198*parms[62] + dx201*parms[67] + dx204*x57 + dx206*x76 + dx57*x204 + dx76*x206
x211 = parms[74]*x199 + parms[76]*x203 + parms[77]*x196 + parms[78]*x202 + x205*x79
dx211 = dx196*parms[77] + dx199*parms[74] + dx202*parms[78] + dx203*parms[76] + dx205*x79 + dx79*x205
x212 = parms[61]*x197 + parms[63]*x195 + parms[64]*x198 + parms[68]*x200 + x201*x78 - x211
dx212 = dx195*parms[63] + dx197*parms[61] + dx198*parms[64] + dx200*parms[68] + dx201*x78 - dx211 + dx78*x201
x213 = parms[50]*x40 + parms[52]*x90 + parms[54]*x91 + x154*x156 + x210*x30 + x212*x59
dx213 = dx154*x156 + dx156*x154 + dx210*x30 + dx212*x59 + dx30*x210 + dx40*parms[50] + dx59*x212 + dx90*parms[52] + dx91*parms[54]
x214 = -x59
dx214 = -dx59
x215 = x30*x76
dx215 = dx30*x76 + dx76*x30
x216 = x30*x57
dx216 = dx30*x57 + dx57*x30
x217 = parms[72]*x216 + parms[73]*x215 + parms[74]*x214
dx217 = dx214*parms[74] + dx215*parms[73] + dx216*parms[72]
x218 = parms[73]*x216 + parms[75]*x215 + parms[76]*x214
dx218 = dx214*parms[76] + dx215*parms[75] + dx216*parms[73]
x219 = parms[74]*x216 + parms[76]*x215 + parms[77]*x214
dx219 = dx214*parms[77] + dx215*parms[76] + dx216*parms[74]
x220 = parms[62]*x30 + parms[64]*x59 + x217*x38 + x218*x57
dx220 = dx217*x38 + dx218*x57 + dx30*parms[62] + dx38*x217 + dx57*x218 + dx59*parms[64]
x221 = parms[74]*x38 + parms[76]*x57
dx221 = dx38*parms[74] + dx57*parms[76]
#
dMdq5_out[0] = dx0*(2*parms[12]*x0 + 2*parms[13]*x4 - 0.27857*x66 + x7*x96 + x88*x89 + 0.27857*x99) - dx101*x4 + dx4*(2*parms[13]*x0 + 2*parms[15]*x4 - x101 - 0.03175*x66 + 0.03175*x99) + dx66*(-0.27857*x0 - 0.03175*x4) + dx7*x0*x96 + dx88*x0*x89 + dx89*x0*x88 + dx96*x0*x7 + dx99*(0.27857*x0 + 0.03175*x4)
dMdq5_out[1] = dx104
dMdq5_out[2] = dx101
dMdq5_out[3] = dx93
dMdq5_out[4] = dx85
dMdq5_out[5] = dx72
dMdq5_out[6] = dx80
dMdq5_out[7] = dx104
dMdq5_out[8] = dx102*(parms[32]*x7 + 2*parms[33]*x102 - x159) + dx103*(2*parms[32]*x89 + 2*parms[33]*x103 + x135*x64 + x152*x9) - 0.03175*dx105*parms[30] + dx115*parms[32]*x2 + dx135*x103*x64 + dx136*(0.00502*x89 - 0.03175) - dx151*x89 + dx152*x103*x9 + dx153*(0.00502*x89 - 0.03175) + dx155*x2*x9 + dx158*x2*x64 - dx159*x102 + dx2*(2*parms[24]*x2 + 2*parms[25]*x89 - 0.0635*parms[31] + parms[32]*x115 + x155*x9 + x158*x64) + dx64*(x103*x135 + x158*x2) + dx7*parms[32]*x102 + dx89*(2*parms[25]*x2 + 2*parms[27]*x89 + 0.03175*parms[30] + 2*parms[32]*x103 + 0.00502*x136 - x151 + 0.00502*x153) + dx9*(x103*x152 + x155*x2)
dMdq5_out[9] = dx160
dMdq5_out[10] = dx151
dMdq5_out[11] = dx157
dMdq5_out[12] = dx149
dMdq5_out[13] = dx141
dMdq5_out[14] = dx101
dMdq5_out[15] = dx160
dMdq5_out[16] = dx13*(2*parms[36]*x13 + 2*parms[37]*x9 + 0.01004*parms[43] + x154*x182 + x181*x75 + x189*x20 + x191*x40) + dx154*x13*x182 + 0.00502*dx161*parms[42] + dx181*(x13*x75 + 0.00502*x40) + dx182*(x13*x154 + 0.00502*x90) + dx188*x9 + dx189*x13*x20 + dx191*x13*x40 + dx20*x13*x189 + dx40*(x13*x191 + 0.00502*x181) + dx75*x13*x181 + dx9*(2*parms[37]*x13 + 2*parms[39]*x9 - 0.00502*parms[42] + x188) + 0.00502*dx90*x182
dMdq5_out[17] = dx192
dMdq5_out[18] = dx188
dMdq5_out[19] = dx190
dMdq5_out[20] = dx183
dMdq5_out[21] = dx93
dMdq5_out[22] = dx151
dMdq5_out[23] = dx192
dMdq5_out[24] = dx154*(2*parms[56]*x90 + 2*parms[57]*x154 + x59*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x82*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194)) + dx193*parms[69]*x154*x82 - dx194*parms[81]*x154*x82 + dx195*parms[68]*x154*x59 - dx196*parms[66]*x91 + dx197*(-parms[67]*x91 - parms[68]*x154*x82) + dx198*x154*(parms[66]*x82 - parms[67]*x59) - dx199*parms[79]*x154*x82 + dx200*parms[69]*x154*x59 + dx201*(parms[56]*x40 - parms[69]*x91) + dx203*parms[78]*x154*x82 - dx207*x90 + dx208*(x154*x59*x76 - x57*x91) + dx209*(x154*x57*x59 - x38*x91) + dx210*x40*x59 + dx212*x40*x82 - dx38*x209*x91 + dx40*(2*parms[48]*x40 + 2*parms[49]*x90 + parms[56]*x201 - parms[56]*x91 + x210*x59 + x212*x82) + dx57*(x154*x209*x59 - x208*x91) + dx59*(x154*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x210*x40) + dx76*x154*x208*x59 + dx82*(x154*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194) + x212*x40) + dx90*(2*parms[49]*x40 + 2*parms[51]*x90 + 2*parms[56]*x154 - x207) + dx91*(-parms[56]*x40 + 2*parms[57]*x91 - parms[66]*x196 - parms[67]*x197 - parms[69]*x201 - x208*x57 - x209*x38)
dMdq5_out[25] = dx213
dMdq5_out[26] = dx207
dMdq5_out[27] = dx211
dMdq5_out[28] = dx85
dMdq5_out[29] = dx157
dMdq5_out[30] = dx188
dMdq5_out[31] = dx213
dMdq5_out[32] = dx217*x30*x57 + dx218*x30*x76 - dx219*x59 + dx30*(2*parms[60]*x30 + 2*parms[61]*x59 + x217*x57 + x218*x76) + dx57*x217*x30 + dx59*(2*parms[61]*x30 + 2*parms[63]*x59 - x219) + dx76*x218*x30
dMdq5_out[33] = dx220
dMdq5_out[34] = dx219
dMdq5_out[35] = dx72
dMdq5_out[36] = dx149
dMdq5_out[37] = dx190
dMdq5_out[38] = dx207
dMdq5_out[39] = dx220
dMdq5_out[40] = dx38*(2*parms[72]*x38 + 2*parms[73]*x57) + dx57*(2*parms[73]*x38 + 2*parms[75]*x57)
dMdq5_out[41] = dx221
dMdq5_out[42] = dx80
dMdq5_out[43] = dx141
dMdq5_out[44] = dx183
dMdq5_out[45] = dx211
dMdq5_out[46] = dx219
dMdq5_out[47] = dx221
dMdq5_out[48] = 0
#
return dMdq5_out
if jt_num == 6:
#
dMdq6_out = [0]*49
#
x0 = cos(q[1])
dx0 = 0
x1 = -x0
dx1 = -dx0
x2 = cos(q[2])
dx2 = 0
x3 = x1*x2
dx3 = dx1*x2 + dx2*x1
x4 = -sin(q[1])
dx4 = 0
x5 = -x4
dx5 = -dx4
x6 = 0.27857*x0 - 0.03175*x5
dx6 = 0.27857*dx0 - 0.03175*dx5
x7 = -x2
dx7 = -dx2
x8 = x6*x7
dx8 = dx6*x7 + dx7*x6
x9 = cos(q[3])
dx9 = 0
x10 = sin(q[2])
dx10 = 0
x11 = x1*x10
dx11 = dx1*x10 + dx10*x1
x12 = -x11
dx12 = -dx11
x13 = sin(q[3])
dx13 = 0
x14 = x12*x13 + x5*x9
dx14 = dx12*x13 + dx13*x12 + dx5*x9 + dx9*x5
x15 = -x3
dx15 = -dx3
x16 = -x15
dx16 = -dx15
x17 = -0.00502*x13*x15 + x8*x9
dx17 = -0.00502*dx13*x15 - 0.00502*dx15*x13 + dx8*x9 + dx9*x8
x18 = sin(q[4])
dx18 = 0
x19 = 0.27747*x16 + x17
dx19 = 0.27747*dx16 + dx17
x20 = cos(q[4])
dx20 = 0
x21 = x10*x6
dx21 = dx10*x6 + dx6*x10
x22 = -x21
dx22 = -dx21
x23 = x22 + 0.00502*x5
dx23 = dx22 + 0.00502*dx5
x24 = x11*x9 + x13*x5
dx24 = dx11*x9 + dx13*x5 + dx5*x13 + dx9*x11
x25 = x23 + 0.27747*x24
dx25 = dx23 + 0.27747*dx24
x26 = -x18*x19 - x20*x25
dx26 = -dx18*x19 - dx19*x18 - dx20*x25 - dx25*x20
x27 = x16*x18 + x20*x24
dx27 = dx16*x18 + dx18*x16 + dx20*x24 + dx24*x20
x28 = -x27
dx28 = -dx27
x29 = sin(q[5])
dx29 = 0
x30 = cos(q[5])
dx30 = 0
x31 = x14*x30 + x28*x29
dx31 = dx14*x30 + dx28*x29 + dx29*x28 + dx30*x14
x32 = -x26
dx32 = -dx26
x33 = -x14*x29 - x27*x30
dx33 = -dx14*x29 - dx27*x30 - dx29*x14 - dx30*x27
x34 = -x33
dx34 = -dx33
x35 = -x15*x20 - x18*x24
dx35 = -dx15*x20 - dx18*x24 - dx20*x15 - dx24*x18
x36 = -x35
dx36 = -dx35
x37 = sin(q[6])
dx37 = cos(q[6])
x38 = cos(q[6])
dx38 = -sin(q[6])
x39 = -x31*x38 - x36*x37
dx39 = -dx31*x38 - dx36*x37 - dx37*x36 - dx38*x31
x40 = -x18
dx40 = -dx18
x41 = x19*x20 + x25*x40
dx41 = dx19*x20 + dx20*x19 + dx25*x40 + dx40*x25
x42 = -x41
dx42 = -dx41
x43 = -x13*x8 - 0.00502*x15*x9
dx43 = -dx13*x8 - 0.00502*dx15*x9 - dx8*x13 - 0.00502*dx9*x15
x44 = x29*x42 + x30*x43
dx44 = dx29*x42 + dx30*x43 + dx42*x29 + dx43*x30
x45 = -x44
dx45 = -dx44
x46 = x32*x38 + x37*x45
dx46 = dx32*x38 + dx37*x45 + dx38*x32 + dx45*x37
x47 = -parms[79]*x34 + parms[80]*x39 + parms[81]*x46
dx47 = -dx34*parms[79] + dx39*parms[80] + dx46*parms[81]
x48 = -x32*x37 - x38*x44
dx48 = -dx32*x37 - dx37*x32 - dx38*x44 - dx44*x38
x49 = -x31
dx49 = -dx31
x50 = x36*x38 + x37*x49
dx50 = dx36*x38 + dx37*x49 + dx38*x36 + dx49*x37
x51 = -parms[78]*x34 + parms[80]*x50 - parms[81]*x48
dx51 = -dx34*parms[78] - dx48*parms[81] + dx50*parms[80]
x52 = parms[54]*x14 + parms[56]*x28 + parms[57]*x26 - parms[66]*x34 - parms[67]*x31 - parms[69]*x32 - x37*x51 - x38*x47
dx52 = dx14*parms[54] + dx26*parms[57] + dx28*parms[56] - dx31*parms[67] - dx32*parms[69] - dx34*parms[66] - dx37*x51 - dx38*x47 - dx47*x38 - dx51*x37
x53 = -x14
dx53 = -dx14
x54 = -x29*x43 - x30*x41
dx54 = -dx29*x43 - dx30*x41 - dx41*x30 - dx43*x29
x55 = -x54
dx55 = -dx54
x56 = -parms[66]*x36 - parms[68]*x49 - parms[69]*x54 - parms[78]*x39 + parms[79]*x50 + parms[81]*x55
dx56 = -dx36*parms[66] - dx39*parms[78] - dx49*parms[68] + dx50*parms[79] - dx54*parms[69] + dx55*parms[81]
x57 = -x37
dx57 = -dx37
x58 = -parms[67]*x36 + parms[68]*x33 + parms[69]*x44 + x38*x51 + x47*x57
dx58 = dx33*parms[68] - dx36*parms[67] + dx38*x51 + dx44*parms[69] + dx47*x57 + dx51*x38 + dx57*x47
x59 = -x29
dx59 = -dx29
x60 = parms[55]*x53 + parms[56]*x35 + parms[57]*x41 + x30*x56 + x58*x59
dx60 = dx30*x56 + dx35*parms[56] + dx41*parms[57] + dx53*parms[55] + dx56*x30 + dx58*x59 + dx59*x58
x61 = x20*x60
dx61 = dx20*x60 + dx60*x20
x62 = parms[43]*x16 + parms[44]*x14 + parms[45]*x17 + x40*x52 + x61
dx62 = dx14*parms[44] + dx16*parms[43] + dx17*parms[45] + dx40*x52 + dx52*x40 + dx61
x63 = parms[42]*x15 - parms[44]*x24 + parms[45]*x43 + parms[54]*x36 + parms[55]*x27 + parms[57]*x43 + x29*x56 + x30*x58
dx63 = dx15*parms[42] - dx24*parms[44] + dx27*parms[55] + dx29*x56 + dx30*x58 + dx36*parms[54] + dx43*(parms[45] + parms[57]) + dx56*x29 + dx58*x30
x64 = -x13
dx64 = -dx13
x65 = -parms[31]*x5 + parms[32]*x3 + parms[33]*x8 + x62*x9 + x63*x64
dx65 = dx3*parms[32] - dx5*parms[31] + dx62*x9 + dx63*x64 + dx64*x63 + dx8*parms[33] + dx9*x62
x66 = x2*x65
dx66 = dx2*x65 + dx65*x2
x67 = -x43
dx67 = -dx43
x68 = -parms[78]
dx68 = 0
x69 = parms[73]*x50 + parms[75]*x39 + parms[76]*x34 + parms[80]*x46 + x55*x68
dx69 = dx34*parms[76] + dx39*parms[75] + dx46*parms[80] + dx50*parms[73] + dx55*x68 + dx68*x55
x70 = -parms[80]
dx70 = 0
x71 = parms[72]*x50 + parms[73]*x39 + parms[74]*x34 + parms[79]*x55 + x48*x70
dx71 = dx34*parms[74] + dx39*parms[73] + dx48*x70 + dx50*parms[72] + dx55*parms[79] + dx70*x48
x72 = parms[62]*x31 + parms[64]*x33 + parms[65]*x36 + parms[66]*x54 + parms[67]*x45 + x38*x71 + x57*x69
dx72 = dx31*parms[62] + dx33*parms[64] + dx36*parms[65] + dx38*x71 + dx45*parms[67] + dx54*parms[66] + dx57*x69 + dx69*x57 + dx71*x38
x73 = parms[49]*x27 + parms[51]*x35 + parms[52]*x14 + parms[54]*x67 + parms[56]*x41 - x72
dx73 = dx14*parms[52] + dx27*parms[49] + dx35*parms[51] + dx41*parms[56] + dx67*parms[54] - dx72
x74 = x20*x52
dx74 = dx20*x52 + dx52*x20
x75 = -0.27747*x18
dx75 = -0.27747*dx18
x76 = -x38
dx76 = -dx38
x77 = parms[60]*x31 + parms[61]*x33 + parms[62]*x36 + parms[67]*x32 + parms[68]*x55 + x57*x71 + x69*x76
dx77 = dx31*parms[60] + dx32*parms[67] + dx33*parms[61] + dx36*parms[62] + dx55*parms[68] + dx57*x71 + dx69*x76 + dx71*x57 + dx76*x69
x78 = -parms[66]
dx78 = 0
x79 = -parms[79]
dx79 = 0
x80 = parms[74]*x50 + parms[76]*x39 + parms[77]*x34 + parms[78]*x48 + x46*x79
dx80 = dx34*parms[77] + dx39*parms[76] + dx46*x79 + dx48*parms[78] + dx50*parms[74] + dx79*x46
x81 = parms[61]*x31 + parms[63]*x33 + parms[64]*x36 + parms[68]*x44 + x32*x78 - x80
dx81 = dx31*parms[61] + dx32*x78 + dx33*parms[63] + dx36*parms[64] + dx44*parms[68] + dx78*x32 - dx80
x82 = -x30
dx82 = -dx30
x83 = parms[48]*x27 + parms[49]*x35 + parms[50]*x14 + parms[55]*x43 + parms[56]*x32 + x59*x77 + x81*x82
dx83 = dx14*parms[50] + dx27*parms[48] + dx32*parms[56] + dx35*parms[49] + dx43*parms[55] + dx59*x77 + dx77*x59 + dx81*x82 + dx82*x81
x84 = parms[36]*x24 + parms[37]*x14 + parms[38]*x15 + parms[43]*x23 + parms[44]*x67 + x20*x83 + x40*x73 + x60*x75 - 0.27747*x74
dx84 = dx14*parms[37] + dx15*parms[38] + dx20*x83 + dx23*parms[43] + dx24*parms[36] + dx40*x73 + dx60*x75 + dx67*parms[44] + dx73*x40 - 0.27747*dx74 + dx75*x60 + dx83*x20
x85 = parms[50]*x27 + parms[52]*x35 + parms[53]*x14 + parms[54]*x26 + parms[55]*x42 + x30*x77 + x59*x81
dx85 = dx14*parms[53] + dx26*parms[54] + dx27*parms[50] + dx30*x77 + dx35*parms[52] + dx42*parms[55] + dx59*x81 + dx77*x30 + dx81*x59
x86 = -parms[42]
dx86 = 0
x87 = parms[37]*x24 + parms[39]*x14 + parms[40]*x15 + parms[44]*x17 + x23*x86 + x85
dx87 = dx14*parms[39] + dx15*parms[40] + dx17*parms[44] + dx23*x86 + dx24*parms[37] + dx85 + dx86*x23
x88 = parms[24]*x11 + parms[25]*x3 + parms[26]*x5 + parms[32]*x22 + x64*x87 + x84*x9
dx88 = dx11*parms[24] + dx22*parms[32] + dx3*parms[25] + dx5*parms[26] + dx64*x87 + dx84*x9 + dx87*x64 + dx9*x84
x89 = -x10
dx89 = -dx10
x90 = -x20
dx90 = -dx20
x91 = 0.27747*x18
dx91 = 0.27747*dx18
x92 = -parms[43]
dx92 = 0
x93 = parms[38]*x24 + parms[40]*x14 + parms[41]*x15 + parms[42]*x43 + x17*x92 + x40*x83 + x52*x91 - 0.27747*x61 + x73*x90
dx93 = dx14*parms[40] + dx15*parms[41] + dx17*x92 + dx24*parms[38] + dx40*x83 + dx43*parms[42] + dx52*x91 - 0.27747*dx61 + dx73*x90 + dx83*x40 + dx90*x73 + dx91*x52 + dx92*x17
x94 = x13*x62
dx94 = dx13*x62 + dx62*x13
x95 = x63*x9
dx95 = dx63*x9 + dx9*x63
x96 = parms[25]*x11 + parms[27]*x3 + parms[28]*x5 + parms[32]*x8 - x93 + 0.00502*x94 + 0.00502*x95
dx96 = dx11*parms[25] + dx3*parms[27] + dx5*parms[28] + dx8*parms[32] - dx93 + 0.00502*dx94 + 0.00502*dx95
x97 = parms[42]*x53 + parms[43]*x24 + parms[45]*x23 + x40*x60 - x74
dx97 = dx23*parms[45] + dx24*parms[43] + dx40*x60 + dx53*parms[42] + dx60*x40 - dx74
x98 = parms[30]*x5 + parms[32]*x12 + parms[33]*x21 - x97
dx98 = dx12*parms[32] + dx21*parms[33] + dx5*parms[30] - dx97
x99 = x10*x98
dx99 = dx10*x98 + dx98*x10
x100 = -parms[31]
dx100 = 0
x101 = parms[26]*x11 + parms[28]*x3 + parms[29]*x5 + parms[30]*x21 + x100*x8 + x13*x84 + x87*x9 + 0.00502*x97
dx101 = dx100*x8 + dx11*parms[26] + dx13*x84 + dx21*parms[30] + dx3*parms[28] + dx5*parms[29] + dx8*x100 + dx84*x13 + dx87*x9 + dx9*x87 + 0.00502*dx97
x102 = -0.27857*x2
dx102 = -0.27857*dx2
x103 = -0.27857*x10
dx103 = -0.27857*dx10
x104 = parms[14]*x0 + parms[16]*x4 - 0.03175*parms[30]*x15 - 0.03175*parms[31]*x11 + x102*x98 + x103*x65 + x2*x88 + x89*x96 - 0.03175*x94 - 0.03175*x95
dx104 = dx0*parms[14] + dx102*x98 + dx103*x65 - 0.03175*dx11*parms[31] - 0.03175*dx15*parms[30] + dx2*x88 + dx4*parms[16] + dx65*x103 + dx88*x2 + dx89*x96 - 0.03175*dx94 - 0.03175*dx95 + dx96*x89 + dx98*x102
x105 = -x89
dx105 = -dx89
x106 = 0.00502*x105 + 0.03175
dx106 = 0.00502*dx105
x107 = -x103*x13 - x106*x9
dx107 = -dx103*x13 - dx106*x9 - dx13*x103 - dx9*x106
x108 = x2*x9
dx108 = dx2*x9 + dx9*x2
x109 = -x105*x20 - x108*x18
dx109 = -dx105*x20 - dx108*x18 - dx18*x108 - dx20*x105
x110 = -x109
dx110 = -dx109
x111 = x105*x40 + x108*x20
dx111 = dx105*x40 + dx108*x20 + dx20*x108 + dx40*x105
x112 = -x105
dx112 = -dx105
x113 = x103*x9 + x106*x64
dx113 = dx103*x9 + dx106*x64 + dx64*x106 + dx9*x103
x114 = 0.27747*x112 + x113
dx114 = 0.27747*dx112 + dx113
x115 = -x102
dx115 = -dx102
x116 = 0.27747*x108 + x115
dx116 = 0.27747*dx108 + dx115
x117 = x114*x20 + x116*x40
dx117 = dx114*x20 + dx116*x40 + dx20*x114 + dx40*x116
x118 = x107*x30 + x117*x59
dx118 = dx107*x30 + dx117*x59 + dx30*x107 + dx59*x117
x119 = x2*x64
dx119 = dx2*x64 + dx64*x2
x120 = -x111*x30 - x119*x29
dx120 = -dx111*x30 - dx119*x29 - dx29*x119 - dx30*x111
x121 = -x120
dx121 = -dx120
x122 = -x114*x18 - x116*x20
dx122 = -dx114*x18 - dx116*x20 - dx18*x114 - dx20*x116
x123 = -x122
dx123 = -dx122
x124 = x118*x57 + x123*x38
dx124 = dx118*x57 + dx123*x38 + dx38*x123 + dx57*x118
x125 = x111*x59 + x119*x30
dx125 = dx111*x59 + dx119*x30 + dx30*x119 + dx59*x111
x126 = -x110*x37 - x125*x38
dx126 = -dx110*x37 - dx125*x38 - dx37*x110 - dx38*x125
x127 = -parms[79]*x121 + parms[80]*x126 + parms[81]*x124
dx127 = -dx121*parms[79] + dx124*parms[81] + dx126*parms[80]
x128 = x110*x38 + x125*x57
dx128 = dx110*x38 + dx125*x57 + dx38*x110 + dx57*x125
x129 = -x118*x38 - x123*x37
dx129 = -dx118*x38 - dx123*x37 - dx37*x123 - dx38*x118
x130 = parms[78]*x121 - parms[80]*x128 + parms[81]*x129
dx130 = dx121*parms[78] - dx128*parms[80] + dx129*parms[81]
x131 = -parms[67]*x110 + parms[68]*x120 + parms[69]*x118 + x127*x57 + x130*x76
dx131 = -dx110*parms[67] + dx118*parms[69] + dx120*parms[68] + dx127*x57 + dx130*x76 + dx57*x127 + dx76*x130
x132 = -x107*x29 - x117*x30
dx132 = -dx107*x29 - dx117*x30 - dx29*x107 - dx30*x117
x133 = -x132
dx133 = -dx132
x134 = parms[66]*x110 - parms[68]*x125 + parms[69]*x132 + parms[78]*x126 - parms[79]*x128 - parms[81]*x133
dx134 = dx110*parms[66] - dx125*parms[68] + dx126*parms[78] - dx128*parms[79] + dx132*parms[69] - dx133*parms[81]
x135 = parms[42]*x105 - parms[44]*x108 + parms[45]*x107 + parms[54]*x110 + parms[55]*x111 + parms[57]*x107 + x131*x30 + x134*x59
dx135 = dx105*parms[42] + dx107*(parms[45] + parms[57]) - dx108*parms[44] + dx110*parms[54] + dx111*parms[55] + dx131*x30 + dx134*x59 + dx30*x131 + dx59*x134
x136 = x135*x9
dx136 = dx135*x9 + dx9*x135
x137 = -x119
dx137 = -dx119
x138 = parms[55]*x137 + parms[56]*x109 + parms[57]*x117 + x131*x59 + x134*x82
dx138 = dx109*parms[56] + dx117*parms[57] + dx131*x59 + dx134*x82 + dx137*parms[55] + dx59*x131 + dx82*x134
x139 = x138*x20
dx139 = dx138*x20 + dx20*x138
x140 = parms[54]*x119 - parms[56]*x111 + parms[57]*x122 - parms[66]*x121 - parms[67]*x125 - parms[69]*x123 - x127*x38 - x130*x57
dx140 = -dx111*parms[56] + dx119*parms[54] - dx121*parms[66] + dx122*parms[57] - dx123*parms[69] - dx125*parms[67] - dx127*x38 - dx130*x57 - dx38*x127 - dx57*x130
x141 = parms[74]*x128 + parms[76]*x126 + parms[77]*x121 + parms[78]*x129 + x124*x79
dx141 = dx121*parms[77] + dx124*x79 + dx126*parms[76] + dx128*parms[74] + dx129*parms[78] + dx79*x124
x142 = parms[61]*x125 + parms[63]*x120 + parms[64]*x110 + parms[68]*x118 + x123*x78 - x141
dx142 = dx110*parms[64] + dx118*parms[68] + dx120*parms[63] + dx123*x78 + dx125*parms[61] - dx141 + dx78*x123
x143 = parms[72]*x128 + parms[73]*x126 + parms[74]*x121 + parms[79]*x133 + x129*x70
dx143 = dx121*parms[74] + dx126*parms[73] + dx128*parms[72] + dx129*x70 + dx133*parms[79] + dx70*x129
x144 = parms[73]*x128 + parms[75]*x126 + parms[76]*x121 + parms[80]*x124 + x133*x68
dx144 = dx121*parms[76] + dx124*parms[80] + dx126*parms[75] + dx128*parms[73] + dx133*x68 + dx68*x133
x145 = parms[60]*x125 + parms[61]*x120 + parms[62]*x110 + parms[67]*x123 + parms[68]*x133 + x143*x57 + x144*x76
dx145 = dx110*parms[62] + dx120*parms[61] + dx123*parms[67] + dx125*parms[60] + dx133*parms[68] + dx143*x57 + dx144*x76 + dx57*x143 + dx76*x144
x146 = parms[48]*x111 + parms[49]*x109 + parms[50]*x119 + parms[55]*x107 + parms[56]*x123 + x142*x82 + x145*x59
dx146 = dx107*parms[55] + dx109*parms[49] + dx111*parms[48] + dx119*parms[50] + dx123*parms[56] + dx142*x82 + dx145*x59 + dx59*x145 + dx82*x142
x147 = -x107
dx147 = -dx107
x148 = -parms[67]
dx148 = 0
x149 = parms[62]*x125 + parms[64]*x120 + parms[65]*x110 + parms[66]*x132 + x118*x148 + x143*x38 + x144*x57
dx149 = dx110*parms[65] + dx118*x148 + dx120*parms[64] + dx125*parms[62] + dx132*parms[66] + dx143*x38 + dx144*x57 + dx148*x118 + dx38*x143 + dx57*x144
x150 = parms[49]*x111 + parms[51]*x109 + parms[52]*x119 + parms[54]*x147 + parms[56]*x117 - x149
dx150 = dx109*parms[51] + dx111*parms[49] + dx117*parms[56] + dx119*parms[52] + dx147*parms[54] - dx149
x151 = parms[38]*x108 + parms[40]*x119 + parms[41]*x105 + parms[42]*x107 + x113*x92 - 0.27747*x139 + x140*x91 + x146*x40 + x150*x90
dx151 = dx105*parms[41] + dx107*parms[42] + dx108*parms[38] + dx113*x92 + dx119*parms[40] - 0.27747*dx139 + dx140*x91 + dx146*x40 + dx150*x90 + dx40*x146 + dx90*x150 + dx91*x140 + dx92*x113
x152 = parms[43]*x112 + parms[44]*x119 + parms[45]*x113 + x139 + x140*x40
dx152 = dx112*parms[43] + dx113*parms[45] + dx119*parms[44] + dx139 + dx140*x40 + dx40*x140
x153 = x13*x152
dx153 = dx13*x152 + dx152*x13
x154 = -0.27747*x20
dx154 = -0.27747*dx20
x155 = parms[36]*x108 + parms[37]*x119 + parms[38]*x105 + parms[43]*x115 + parms[44]*x147 + x138*x75 + x140*x154 + x146*x20 + x150*x40
dx155 = dx105*parms[38] + dx108*parms[36] + dx115*parms[43] + dx119*parms[37] + dx138*x75 + dx140*x154 + dx146*x20 + dx147*parms[44] + dx150*x40 + dx154*x140 + dx20*x146 + dx40*x150 + dx75*x138
x156 = -parms[55]
dx156 = 0
x157 = parms[50]*x111 + parms[52]*x109 + parms[53]*x119 + parms[54]*x122 + x117*x156 + x142*x59 + x145*x30
dx157 = dx109*parms[52] + dx111*parms[50] + dx117*x156 + dx119*parms[53] + dx122*parms[54] + dx142*x59 + dx145*x30 + dx156*x117 + dx30*x145 + dx59*x142
x158 = parms[37]*x108 + parms[39]*x119 + parms[40]*x105 + parms[44]*x113 + x115*x86 + x157
dx158 = dx105*parms[40] + dx108*parms[37] + dx113*parms[44] + dx115*x86 + dx119*parms[39] + dx157 + dx86*x115
x159 = parms[42]*x137 + parms[43]*x108 + parms[45]*x115 + x138*x40 + x140*x90
dx159 = dx108*parms[43] + dx115*parms[45] + dx137*parms[42] + dx138*x40 + dx140*x90 + dx40*x138 + dx90*x140
x160 = parms[26]*x2 + parms[28]*x89 + parms[30]*x102 + x100*x103 + x13*x155 + x158*x9 + 0.00502*x159
dx160 = dx100*x103 + dx102*parms[30] + dx103*x100 + dx13*x155 + dx155*x13 + dx158*x9 + 0.00502*dx159 + dx2*parms[26] + dx89*parms[28] + dx9*x158
x161 = -x9
dx161 = -dx9
x162 = x13*x20
dx162 = dx13*x20 + dx20*x13
x163 = x162*x59 + x30*x9
dx163 = dx162*x59 + dx30*x9 + dx59*x162 + dx9*x30
x164 = x13*x40
dx164 = dx13*x40 + dx40*x13
x165 = -x164
dx165 = -dx164
x166 = -x163*x38 - x165*x37
dx166 = -dx163*x38 - dx165*x37 - dx37*x165 - dx38*x163
x167 = x163*x57 + x165*x38
dx167 = dx163*x57 + dx165*x38 + dx38*x165 + dx57*x163
x168 = 0.27747*x13 + 0.00502
dx168 = 0.27747*dx13
x169 = x168*x40
dx169 = dx168*x40 + dx40*x168
x170 = x169*x82
dx170 = dx169*x82 + dx82*x169
x171 = -x170
dx171 = -dx170
x172 = x169*x59
dx172 = dx169*x59 + dx59*x169
x173 = -x162*x30 - x29*x9
dx173 = -dx162*x30 - dx29*x9 - dx30*x162 - dx9*x29
x174 = -x173
dx174 = -dx173
x175 = x168*x90
dx175 = dx168*x90 + dx90*x168
x176 = -x175
dx176 = -dx175
x177 = x172*x57 + x176*x38
dx177 = dx172*x57 + dx176*x38 + dx38*x176 + dx57*x172
x178 = -parms[79]*x174 + parms[80]*x166 + parms[81]*x177
dx178 = dx166*parms[80] - dx174*parms[79] + dx177*parms[81]
x179 = -x172*x38 - x176*x37
dx179 = -dx172*x38 - dx176*x37 - dx37*x176 - dx38*x172
x180 = parms[78]*x174 - parms[80]*x167 + parms[81]*x179
dx180 = -dx167*parms[80] + dx174*parms[78] + dx179*parms[81]
x181 = parms[55]*x161 + parms[56]*x164 + parms[57]*x169 + x59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + x82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
dx181 = dx161*parms[55] - dx163*parms[68]*x82 + dx164*parms[56] + dx165*(parms[66]*x82 - parms[67]*x59) + dx166*parms[78]*x82 - dx167*parms[79]*x82 + dx169*parms[57] + dx170*parms[69]*x82 - dx171*parms[81]*x82 + dx172*parms[69]*x59 + dx173*parms[68]*x59 + dx178*x57*x59 + dx180*x59*x76 + dx57*x178*x59 + dx59*(-parms[67]*x165 + parms[68]*x173 + parms[69]*x172 + x178*x57 + x180*x76) + dx76*x180*x59 + dx82*(parms[66]*x165 - parms[68]*x163 + parms[69]*x170 + parms[78]*x166 - parms[79]*x167 - parms[81]*x171)
x182 = parms[54]*x9 - parms[56]*x162 + parms[57]*x175 - parms[66]*x174 - parms[67]*x163 - parms[69]*x176 - x178*x38 - x180*x57
dx182 = -dx162*parms[56] - dx163*parms[67] - dx174*parms[66] + dx175*parms[57] - dx176*parms[69] - dx178*x38 - dx180*x57 - dx38*x178 - dx57*x180 + dx9*parms[54]
x183 = parms[74]*x167 + parms[76]*x166 + parms[77]*x174 + parms[78]*x179 + x177*x79
dx183 = dx166*parms[76] + dx167*parms[74] + dx174*parms[77] + dx177*x79 + dx179*parms[78] + dx79*x177
x184 = parms[61]*x163 + parms[63]*x173 + parms[64]*x165 + parms[68]*x172 + x176*x78 - x183
dx184 = dx163*parms[61] + dx165*parms[64] + dx172*parms[68] + dx173*parms[63] + dx176*x78 - dx183 + dx78*x176
x185 = parms[73]*x167 + parms[75]*x166 + parms[76]*x174 + parms[80]*x177 + x171*x68
dx185 = dx166*parms[75] + dx167*parms[73] + dx171*x68 + dx174*parms[76] + dx177*parms[80] + dx68*x171
x186 = parms[72]*x167 + parms[73]*x166 + parms[74]*x174 + parms[79]*x171 + x179*x70
dx186 = dx166*parms[73] + dx167*parms[72] + dx171*parms[79] + dx174*parms[74] + dx179*x70 + dx70*x179
x187 = parms[60]*x163 + parms[61]*x173 + parms[62]*x165 + parms[67]*x176 + parms[68]*x171 + x185*x76 + x186*x57
dx187 = dx163*parms[60] + dx165*parms[62] + dx171*parms[68] + dx173*parms[61] + dx176*parms[67] + dx185*x76 + dx186*x57 + dx57*x186 + dx76*x185
x188 = parms[50]*x162 + parms[52]*x164 + parms[53]*x9 + parms[54]*x175 + x156*x169 + x184*x59 + x187*x30
dx188 = dx156*x169 + dx162*parms[50] + dx164*parms[52] + dx169*x156 + dx175*parms[54] + dx184*x59 + dx187*x30 + dx30*x187 + dx59*x184 + dx9*parms[53]
x189 = parms[48]*x162 + parms[49]*x164 + parms[50]*x9 + parms[56]*x176 + x184*x82 + x187*x59
dx189 = dx162*parms[48] + dx164*parms[49] + dx176*parms[56] + dx184*x82 + dx187*x59 + dx59*x187 + dx82*x184 + dx9*parms[50]
x190 = parms[62]*x163 + parms[64]*x173 + parms[65]*x165 + parms[66]*x170 + x148*x172 + x185*x57 + x186*x38
dx190 = dx148*x172 + dx163*parms[62] + dx165*parms[65] + dx170*parms[66] + dx172*x148 + dx173*parms[64] + dx185*x57 + dx186*x38 + dx38*x186 + dx57*x185
x191 = parms[49]*x162 + parms[51]*x164 + parms[52]*x9 + parms[56]*x169 - x190
dx191 = dx162*parms[49] + dx164*parms[51] + dx169*parms[56] - dx190 + dx9*parms[52]
x192 = parms[38]*x13 + parms[40]*x9 - 0.27747*x181*x20 + x182*x91 + x189*x40 + x191*x90
dx192 = dx13*parms[38] - 0.27747*dx181*x20 + dx182*x91 + dx189*x40 + dx191*x90 - 0.27747*dx20*x181 + dx40*x189 + dx9*parms[40] + dx90*x191 + dx91*x182
x193 = x154*x82
dx193 = dx154*x82 + dx82*x154
x194 = -x193
dx194 = -dx193
x195 = x40*x82
dx195 = dx40*x82 + dx82*x40
x196 = -x195
dx196 = -dx195
x197 = x40*x59
dx197 = dx40*x59 + dx59*x40
x198 = -x90
dx198 = -dx90
x199 = x197*x57 + x198*x38
dx199 = dx197*x57 + dx198*x38 + dx38*x198 + dx57*x197
x200 = x154*x59
dx200 = dx154*x59 + dx59*x154
x201 = -x91
dx201 = -dx91
x202 = -x200*x38 - x201*x37
dx202 = -dx200*x38 - dx201*x37 - dx37*x201 - dx38*x200
x203 = -x197*x38 - x198*x37
dx203 = -dx197*x38 - dx198*x37 - dx37*x198 - dx38*x197
x204 = parms[72]*x199 + parms[73]*x203 + parms[74]*x196 + parms[79]*x194 + x202*x70
dx204 = dx194*parms[79] + dx196*parms[74] + dx199*parms[72] + dx202*x70 + dx203*parms[73] + dx70*x202
x205 = x200*x57 + x201*x38
dx205 = dx200*x57 + dx201*x38 + dx38*x201 + dx57*x200
x206 = parms[73]*x199 + parms[75]*x203 + parms[76]*x196 + parms[80]*x205 + x194*x68
dx206 = dx194*x68 + dx196*parms[76] + dx199*parms[73] + dx203*parms[75] + dx205*parms[80] + dx68*x194
x207 = parms[62]*x197 + parms[64]*x195 + parms[65]*x198 + parms[66]*x193 + x148*x200 + x204*x38 + x206*x57
dx207 = dx148*x200 + dx193*parms[66] + dx195*parms[64] + dx197*parms[62] + dx198*parms[65] + dx200*x148 + dx204*x38 + dx206*x57 + dx38*x204 + dx57*x206
x208 = parms[78]*x196 - parms[80]*x199 + parms[81]*x202
dx208 = dx196*parms[78] - dx199*parms[80] + dx202*parms[81]
x209 = -parms[79]*x196 + parms[80]*x203 + parms[81]*x205
dx209 = -dx196*parms[79] + dx203*parms[80] + dx205*parms[81]
x210 = parms[60]*x197 + parms[61]*x195 + parms[62]*x198 + parms[67]*x201 + parms[68]*x194 + x204*x57 + x206*x76
dx210 = dx194*parms[68] + dx195*parms[61] + dx197*parms[60] + dx198*parms[62] + dx201*parms[67] + dx204*x57 + dx206*x76 + dx57*x204 + dx76*x206
x211 = parms[74]*x199 + parms[76]*x203 + parms[77]*x196 + parms[78]*x202 + x205*x79
dx211 = dx196*parms[77] + dx199*parms[74] + dx202*parms[78] + dx203*parms[76] + dx205*x79 + dx79*x205
x212 = parms[61]*x197 + parms[63]*x195 + parms[64]*x198 + parms[68]*x200 + x201*x78 - x211
dx212 = dx195*parms[63] + dx197*parms[61] + dx198*parms[64] + dx200*parms[68] + dx201*x78 - dx211 + dx78*x201
x213 = parms[50]*x40 + parms[52]*x90 + parms[54]*x91 + x154*x156 + x210*x30 + x212*x59
dx213 = dx154*x156 + dx156*x154 + dx210*x30 + dx212*x59 + dx30*x210 + dx40*parms[50] + dx59*x212 + dx90*parms[52] + dx91*parms[54]
x214 = -x59
dx214 = -dx59
x215 = x30*x76
dx215 = dx30*x76 + dx76*x30
x216 = x30*x57
dx216 = dx30*x57 + dx57*x30
x217 = parms[72]*x216 + parms[73]*x215 + parms[74]*x214
dx217 = dx214*parms[74] + dx215*parms[73] + dx216*parms[72]
x218 = parms[73]*x216 + parms[75]*x215 + parms[76]*x214
dx218 = dx214*parms[76] + dx215*parms[75] + dx216*parms[73]
x219 = parms[74]*x216 + parms[76]*x215 + parms[77]*x214
dx219 = dx214*parms[77] + dx215*parms[76] + dx216*parms[74]
x220 = parms[62]*x30 + parms[64]*x59 + x217*x38 + x218*x57
dx220 = dx217*x38 + dx218*x57 + dx30*parms[62] + dx38*x217 + dx57*x218 + dx59*parms[64]
x221 = parms[74]*x38 + parms[76]*x57
dx221 = dx38*parms[74] + dx57*parms[76]
#
dMdq6_out[0] = dx0*(2*parms[12]*x0 + 2*parms[13]*x4 - 0.27857*x66 + x7*x96 + x88*x89 + 0.27857*x99) - dx101*x4 + dx4*(2*parms[13]*x0 + 2*parms[15]*x4 - x101 - 0.03175*x66 + 0.03175*x99) + dx66*(-0.27857*x0 - 0.03175*x4) + dx7*x0*x96 + dx88*x0*x89 + dx89*x0*x88 + dx96*x0*x7 + dx99*(0.27857*x0 + 0.03175*x4)
dMdq6_out[1] = dx104
dMdq6_out[2] = dx101
dMdq6_out[3] = dx93
dMdq6_out[4] = dx85
dMdq6_out[5] = dx72
dMdq6_out[6] = dx80
dMdq6_out[7] = dx104
dMdq6_out[8] = dx102*(parms[32]*x7 + 2*parms[33]*x102 - x159) + dx103*(2*parms[32]*x89 + 2*parms[33]*x103 + x135*x64 + x152*x9) - 0.03175*dx105*parms[30] + dx115*parms[32]*x2 + dx135*x103*x64 + dx136*(0.00502*x89 - 0.03175) - dx151*x89 + dx152*x103*x9 + dx153*(0.00502*x89 - 0.03175) + dx155*x2*x9 + dx158*x2*x64 - dx159*x102 + dx2*(2*parms[24]*x2 + 2*parms[25]*x89 - 0.0635*parms[31] + parms[32]*x115 + x155*x9 + x158*x64) + dx64*(x103*x135 + x158*x2) + dx7*parms[32]*x102 + dx89*(2*parms[25]*x2 + 2*parms[27]*x89 + 0.03175*parms[30] + 2*parms[32]*x103 + 0.00502*x136 - x151 + 0.00502*x153) + dx9*(x103*x152 + x155*x2)
dMdq6_out[9] = dx160
dMdq6_out[10] = dx151
dMdq6_out[11] = dx157
dMdq6_out[12] = dx149
dMdq6_out[13] = dx141
dMdq6_out[14] = dx101
dMdq6_out[15] = dx160
dMdq6_out[16] = dx13*(2*parms[36]*x13 + 2*parms[37]*x9 + 0.01004*parms[43] + x154*x182 + x181*x75 + x189*x20 + x191*x40) + dx154*x13*x182 + 0.00502*dx161*parms[42] + dx181*(x13*x75 + 0.00502*x40) + dx182*(x13*x154 + 0.00502*x90) + dx188*x9 + dx189*x13*x20 + dx191*x13*x40 + dx20*x13*x189 + dx40*(x13*x191 + 0.00502*x181) + dx75*x13*x181 + dx9*(2*parms[37]*x13 + 2*parms[39]*x9 - 0.00502*parms[42] + x188) + 0.00502*dx90*x182
dMdq6_out[17] = dx192
dMdq6_out[18] = dx188
dMdq6_out[19] = dx190
dMdq6_out[20] = dx183
dMdq6_out[21] = dx93
dMdq6_out[22] = dx151
dMdq6_out[23] = dx192
dMdq6_out[24] = dx154*(2*parms[56]*x90 + 2*parms[57]*x154 + x59*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x82*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194)) + dx193*parms[69]*x154*x82 - dx194*parms[81]*x154*x82 + dx195*parms[68]*x154*x59 - dx196*parms[66]*x91 + dx197*(-parms[67]*x91 - parms[68]*x154*x82) + dx198*x154*(parms[66]*x82 - parms[67]*x59) - dx199*parms[79]*x154*x82 + dx200*parms[69]*x154*x59 + dx201*(parms[56]*x40 - parms[69]*x91) + dx203*parms[78]*x154*x82 - dx207*x90 + dx208*(x154*x59*x76 - x57*x91) + dx209*(x154*x57*x59 - x38*x91) + dx210*x40*x59 + dx212*x40*x82 - dx38*x209*x91 + dx40*(2*parms[48]*x40 + 2*parms[49]*x90 + parms[56]*x201 - parms[56]*x91 + x210*x59 + x212*x82) + dx57*(x154*x209*x59 - x208*x91) + dx59*(x154*(-parms[67]*x198 + parms[68]*x195 + parms[69]*x200 + x208*x76 + x209*x57) + x210*x40) + dx76*x154*x208*x59 + dx82*(x154*(parms[66]*x198 - parms[68]*x197 + parms[69]*x193 + parms[78]*x203 - parms[79]*x199 - parms[81]*x194) + x212*x40) + dx90*(2*parms[49]*x40 + 2*parms[51]*x90 + 2*parms[56]*x154 - x207) + dx91*(-parms[56]*x40 + 2*parms[57]*x91 - parms[66]*x196 - parms[67]*x197 - parms[69]*x201 - x208*x57 - x209*x38)
dMdq6_out[25] = dx213
dMdq6_out[26] = dx207
dMdq6_out[27] = dx211
dMdq6_out[28] = dx85
dMdq6_out[29] = dx157
dMdq6_out[30] = dx188
dMdq6_out[31] = dx213
dMdq6_out[32] = dx217*x30*x57 + dx218*x30*x76 - dx219*x59 + dx30*(2*parms[60]*x30 + 2*parms[61]*x59 + x217*x57 + x218*x76) + dx57*x217*x30 + dx59*(2*parms[61]*x30 + 2*parms[63]*x59 - x219) + dx76*x218*x30
dMdq6_out[33] = dx220
dMdq6_out[34] = dx219
dMdq6_out[35] = dx72
dMdq6_out[36] = dx149
dMdq6_out[37] = dx190
dMdq6_out[38] = dx207
dMdq6_out[39] = dx220
dMdq6_out[40] = dx38*(2*parms[72]*x38 + 2*parms[73]*x57) + dx57*(2*parms[73]*x38 + 2*parms[75]*x57)
dMdq6_out[41] = dx221
dMdq6_out[42] = dx80
dMdq6_out[43] = dx141
dMdq6_out[44] = dx183
dMdq6_out[45] = dx211
dMdq6_out[46] = dx219
dMdq6_out[47] = dx221
dMdq6_out[48] = 0
#
return dMdq6_out
| 60.999671 | 1,258 | 0.568268 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1,789 | 0.009657 |
7d4d986638e6a4a9e2c7f55747f9a7a304afe8fb | 2,401 | py | Python | back/color.py | PoCInnovation/AI4UX | 78a60e35f755e2ab58d469748c363daa4a4222c9 | [
"MIT"
] | null | null | null | back/color.py | PoCInnovation/AI4UX | 78a60e35f755e2ab58d469748c363daa4a4222c9 | [
"MIT"
] | null | null | null | back/color.py | PoCInnovation/AI4UX | 78a60e35f755e2ab58d469748c363daa4a4222c9 | [
"MIT"
] | null | null | null | import extcolors
import PIL
def new_image(image, x1, y1, x2, y2):
area = (x1, y1, x2, y2)
tmp = image.crop(area)
return tmp
def nbColor_daltonisme(image, total):
protanopie = [] # Ne voit pas le rouge
deutéranopie = [] # Ne voit pas le vert
tritanopie = [] # Ne voit pas le bleu
color_tab = []
colors, pixel_count = extcolors.extract_from_image(image)
above = 0
for color in colors:
percentage = color[1] / total * 100
if percentage > 15:
above += 1
if percentage > 5 and color[0][2] > 200 and color[0][0] < 200 and color[0][1] < 200:
tritanopie.append(color)
if percentage > 5 and color[0][1] > 200 and color[0][0] < 200 and color[0][2] < 200:
deutéranopie.append(color)
if percentage > 5 and color[0][0] > 200 and color[0][1] < 200 and color[0][2] < 200:
protanopie.append(color)
color_tuple = (color[0][0], color[0][1], color[0][2], percentage)
color_tab.append(color_tuple)
return above, (len(protanopie) + len(deutéranopie) + len(tritanopie)) / len(colors), color_tab
def padding_ratio(image, total):
colors, pixel = extcolors.extract_from_image(image)
i = 0
total_percent = 0
percentage = 0
for color in colors:
percentage = color[1] / total * 100
if i < 3:
total_percent += percentage
i += 1
return total_percent
def dataColor(image):
width, height = image.size
total = width * height
above, score_dalto, color_tab = nbColor_daltonisme(image, total)
image_first = new_image(image, 0, 0, width // 4, height)
size_first_w, size_first_h = image_first.size
total_first = size_first_w * size_first_h
percent_first = padding_ratio(image_first, total_first)
image_forth = new_image(image, (width // 4) * 3, 0, width, height)
size_forth_w, size_forth_h = image_forth.size
total_forth = size_forth_w * size_forth_h
percent_forth = padding_ratio(image_forth, total_forth)
score_color = 1.0
if above > 3:
score_color -= 0.15 * (above - 3)
if score_color < 0.0:
score_color = 0.0
return score_color, 1 - score_dalto, round(percent_first) / 100, round(percent_forth) / 100, color_tab
if __name__ == "__main__":
## IMAGE ##
image = PIL.Image.open("web_screenshot.png")
print(dataColor(image))
| 33.347222 | 106 | 0.63307 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 105 | 0.043677 |
7d4f4e96803718430d878ca088bcaed92b3079cc | 3,822 | py | Python | base_pool/mysql_pool/mysql_views.py | zhanzhangwei/kafka-study | 6be4167319b855c9560e92932aae628f87a5e680 | [
"Apache-2.0"
] | null | null | null | base_pool/mysql_pool/mysql_views.py | zhanzhangwei/kafka-study | 6be4167319b855c9560e92932aae628f87a5e680 | [
"Apache-2.0"
] | null | null | null | base_pool/mysql_pool/mysql_views.py | zhanzhangwei/kafka-study | 6be4167319b855c9560e92932aae628f87a5e680 | [
"Apache-2.0"
] | null | null | null | import json
import pymysql
import datetime
from dbutils.pooled_db import PooledDB
import pymysql
from conf.common import *
class MysqlClient(object):
__pool = None
def __init__(self):
"""
:param mincached:连接池中空闲连接的初始数量
:param maxcached:连接池中空闲连接的最大数量
:param maxshared:共享连接的最大数量
:param maxconnections:创建连接池的最大数量
:param blocking:超过最大连接数量时候的表现,为True等待连接数量下降,为false直接报错处理
:param maxusage:单个连接的最大重复使用次数
:param setsession:optional list of SQL commands that may serve to prepare
the session, e.g. ["set datestyle to ...", "set time zone ..."]
:param reset:how connections should be reset when returned to the pool
(False or None to rollback transcations started with begin(),
True to always issue a rollback for safety's sake)
:param host:数据库ip地址
:param port:数据库端口
:param db:库名
:param user:用户名
:param passwd:密码
:param charset:字符编码
"""
mincached = 10
maxcached = 20
maxshared = 10
maxconnections = 200
blocking = True
maxusage = 100
setsession = None
reset = True
host = MYSQL_HOST
port = MYSQL_PORT
db = DATABASE
user = USER
passwd = PASSWORD
charset = 'utf8mb4'
if not self.__pool:
self.__class__.__pool = PooledDB(pymysql,
mincached, maxcached,
maxshared, maxconnections, blocking,
maxusage, setsession, reset,
host=host, port=port, db=db,
user=user, passwd=passwd,
charset=charset,
cursorclass=pymysql.cursors.DictCursor
)
self._conn = None
self._cursor = None
self.__get_conn()
def __get_conn(self):
self._conn = self.__pool.connection()
self._cursor = self._conn.cursor()
def close(self):
try:
self._cursor.close()
self._conn.close()
except Exception as e:
print(e)
def __execute(self, sql, param=()):
count = self._cursor.execute(sql, param)
print(count)
return count
@staticmethod
def __dict_datetime_obj_to_str(result_dict):
"""把字典里面的datatime对象转成字符串,使json转换不出错"""
if result_dict:
result_replace = {k: v.__str__() for k, v in result_dict.items() if isinstance(v, datetime.datetime)}
result_dict.update(result_replace)
return result_dict
def select_one(self, sql, param=()):
"""查询单个结果"""
count = self.__execute(sql, param)
result = self._cursor.fetchone()
""":type result:dict"""
result = self.__dict_datetime_obj_to_str(result)
return count, result
def select_many(self, sql, param=()):
"""
查询多个结果
:param sql: qsl语句
:param param: sql参数
:return: 结果数量和查询结果集
"""
count = self.__execute(sql, param)
result = self._cursor.fetchall()
""":type result:list"""
[self.__dict_datetime_obj_to_str(row_dict) for row_dict in result]
return count, result
def execute(self, sql, param=()):
count = self.__execute(sql, param)
return count
def begin(self):
"""开启事务"""
self._conn.autocommit(0)
def end(self, option='commit'):
"""结束事务"""
if option == 'commit':
self._conn.autocommit()
else:
self._conn.rollback()
mysql_client = MysqlClient()
| 30.576 | 113 | 0.545526 | 3,992 | 0.961928 | 0 | 0 | 361 | 0.086988 | 0 | 0 | 1,379 | 0.332289 |
7d507f34d285e67fb744c8b50084ce59c5e7e8eb | 2,065 | py | Python | script/sklearn_like_toolkit/warpper/wrapperGridSearchCV.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | script/sklearn_like_toolkit/warpper/wrapperGridSearchCV.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | script/sklearn_like_toolkit/warpper/wrapperGridSearchCV.py | demetoir/MLtools | 8c42fcd4cc71728333d9c116ade639fe57d50d37 | [
"MIT"
] | null | null | null | from sklearn import model_selection
from sklearn.externals.joblib import Parallel
from tqdm import tqdm
from script.sklearn_like_toolkit.warpper.base.MixIn import ClfWrapperMixIn, MetaBaseWrapperClfWithABC
import multiprocessing
CPU_COUNT = multiprocessing.cpu_count()
# TODO using packtools.grid_search GridSearchCVProgressBar make warning ...
# but copied code just work fine, wtf??
# from pactools.grid_search import GridSearchCVProgressBar as _GridSearchCVProgressBar
class GridSearchCVProgressBar(model_selection.GridSearchCV):
"""Monkey patch Parallel to have a progress bar during grid search"""
def _get_param_iterator(self):
"""Return ParameterGrid instance for the given param_grid"""
iterator = super(GridSearchCVProgressBar, self)._get_param_iterator()
iterator = list(iterator)
n_candidates = len(iterator)
cv = model_selection._split.check_cv(self.cv, None)
n_splits = getattr(cv, 'n_splits', 3)
max_value = n_candidates * n_splits
class ParallelProgressBar(Parallel):
def __call__(self, iterable):
bar = tqdm(max_value=max_value, title='GridSearchCV')
bar.iterable = iterable
# iterable = bar(iterable)
return super(ParallelProgressBar, self).__call__(iterable)
# Monkey patch
model_selection._search.Parallel = ParallelProgressBar
return iterator
class wrapperGridSearchCV(GridSearchCVProgressBar, ClfWrapperMixIn, metaclass=MetaBaseWrapperClfWithABC):
def __init__(self, estimator, param_grid, scoring=None, fit_params=None, n_jobs=CPU_COUNT, iid=True, refit=True,
cv=None, verbose=0, pre_dispatch='2*n_jobs', error_score='raise', return_train_score="warn"):
GridSearchCVProgressBar.__init__(
self, estimator, param_grid, scoring, fit_params, n_jobs, iid, refit, cv, verbose, pre_dispatch,
error_score, return_train_score)
ClfWrapperMixIn.__init__(self)
| 42.142857 | 117 | 0.708475 | 1,568 | 0.759322 | 0 | 0 | 0 | 0 | 0 | 0 | 421 | 0.203874 |
7d51198f9982dd8e78ac7d042e281aeb60c728be | 3,331 | py | Python | cliente/templates/forms.py | ricardosmbr/smartcon | 6f6090b586e717b38066c20a7d620c4abae0a915 | [
"Apache-2.0"
] | null | null | null | cliente/templates/forms.py | ricardosmbr/smartcon | 6f6090b586e717b38066c20a7d620c4abae0a915 | [
"Apache-2.0"
] | 1 | 2021-06-02T00:21:09.000Z | 2021-06-02T00:21:09.000Z | cliente/templates/forms.py | ricardosmbr/smartcon | 6f6090b586e717b38066c20a7d620c4abae0a915 | [
"Apache-2.0"
] | null | null | null | from django import forms
from sistema.mail import send_mail_template
from .models import Cliente
from usuario.models import Usuario
from carteira.models import Carteira
from eth_account import Account
class MostrarCarteira(forms.ModelForm):
name = forms.CharField(widget=forms.TextInput(attrs={'readonly':'True'}))
saldo = forms.FloatField(widget=forms.TextInput(attrs={'readonly':'True'}))
public_key = forms.CharField(label='Chave Pública',widget=forms.TextInput(attrs={'readonly':'True'}))
public_key.widget.attrs.update({'size':'40'})
private_key = forms.CharField(label='Chave Privada',widget=forms.TextInput(attrs={'readonly':'True'}))
private_key.widget.attrs.update({'size':'50'})
class Meta:
model = Carteira
fields = ['name','saldo','public_key','private_key']
class CarteiraNovaForm(forms.ModelForm):
name = forms.CharField(label='Nome',widget=forms.TextInput(attrs={'size':'20'}))
public_key = forms.CharField(label='Chave Pública',widget=forms.TextInput(attrs={'readonly':'True'}))
public_key.widget.attrs.update({'size':'40'})
private_key = forms.CharField(label='Chave Privada',widget=forms.TextInput(attrs={'readonly':'True'}))
private_key.widget.attrs.update({'size':'50'})
class Meta:
model = Carteira
fields = ['name','id_cliente','public_key','private_key']
def __init__(self, *args, **kwargs):
user = kwargs.pop('user','')
super(CarteiraNovaForm, self).__init__(*args, **kwargs)
self.fields['id_cliente']=forms.ModelChoiceField(
label='Cliente',
queryset=Cliente.objects.filter(id_usuario=user)
)
class gerar(forms.ModelForm):
public_key = forms.CharField(label='Chave Pública',widget=forms.TextInput(attrs={'readonly':'True'}))
conta = Account.create('KEYSMASHMAX FJAFJKLDSKF7JKFDJ 1530')
public_key.widget.attrs.update({'value':conta.address})
public_key.widget.attrs.update({'size':'40'})
private_key = forms.CharField(label='Chave Privada',widget=forms.TextInput(attrs={'readonly':'True'}))
private_key.widget.attrs.update({'value':conta.privateKey})
private_key.widget.attrs.update({'size':'50'})
def __init__(self, *args, **kwargs):
argu = kwargs.pop('name','')
cli = kwargs.pop('id_cliente','')
super(CarteiraNovaForm.gerar, self).__init__(*args, **kwargs)
self.fields['name']=forms.CharField(label = 'Nome',widget=forms.TextInput(attrs={'value':argu,'readonly':'True'}))
self.fields['id_cliente']=forms.ModelChoiceField(
label = 'Cliente',
queryset=Cliente.objects.filter(id=cli),
initial=0,
)
class Meta:
model = Carteira
fields = ['name','id_cliente','public_key','private_key']
@login_required
def carteira_gerar(request):
template_name = 'carteira_gerar.html'
carteira = Carteira.objects.all().first()
context = {}
form = CarteiraNovaForm(user=request.user.id)
if request.method == 'POST':
form = CarteiraNovaForm(user=request.user.id).gerar(name = request.POST["name"],id_cliente = request.POST["id_cliente"])
if form.is_valid():
form.save()
messages.success(request,"Carteira gerada com sucesso",extra_tags='text-success')
redirect('cli:cliente')
else:
messages.success(request,"Errro",extra_tags='text-danger')
redirect('cli:cliente')
#return redirect('cli:cliente')
context['form'] = form
return render(request, template_name, context)
| 36.604396 | 122 | 0.724107 | 2,426 | 0.727654 | 0 | 0 | 695 | 0.208458 | 0 | 0 | 749 | 0.224655 |
ada38af6048efec02fc99b80ce0ab842cf2993cb | 1,584 | py | Python | weibospider/settings.py | czyczyyzc/WeiboSpider | 41b9c97cb01d41cb4a62efdd452451b5ef25bdbc | [
"MIT"
] | 2 | 2021-03-26T03:02:52.000Z | 2021-04-01T11:08:46.000Z | weibospider/settings.py | czyczyyzc/WeiboSpider | 41b9c97cb01d41cb4a62efdd452451b5ef25bdbc | [
"MIT"
] | null | null | null | weibospider/settings.py | czyczyyzc/WeiboSpider | 41b9c97cb01d41cb4a62efdd452451b5ef25bdbc | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import random
BOT_NAME = 'spider'
SPIDER_MODULES = ['spiders']
NEWSPIDER_MODULE = 'spiders'
ROBOTSTXT_OBEY = False
cookies_file = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'cookies.txt')
with open(cookies_file, 'r', encoding='utf-8-sig', newline='') as f:
cookies = f.readlines()
cookies = [cookie.strip() for cookie in cookies]
COOKIES = dict(zip(range(len(cookies)), cookies))
# change cookie to yours
DEFAULT_REQUEST_HEADERS = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36',
'Cookie': COOKIES[int(os.path.basename(os.path.split(os.path.realpath(__file__))[0]).split('_')[-1]) % len(COOKIES.keys())],
'X-Forwarded-For': '%s.%s.%s.%s' % (random.randrange(1, 200, 20), random.randrange(1, 200, 20), random.randrange(1, 200, 20), random.randrange(1, 200, 20)),
}
CONCURRENT_REQUESTS = 50
DOWNLOAD_DELAY = 3
AUTOTHROTTLE_ENABLED = True
LOG_LEVEL = "INFO" # 输出级别
LOG_STDOUT = True # 是否标准输出
DOWNLOADER_MIDDLEWARES = {
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': None,
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': None,
'middlewares.IPProxyMiddleware': 100,
'scrapy.downloadermiddlewares.httpproxy.HttpProxyMiddleware': 101,
}
# ITEM_PIPELINES = {
# 'pipelines.MongoDBPipeline': 300,
# }
ITEM_PIPELINES = {
'pipelines.CSVPipeline': 300,
}
SAVE_ROOT = os.path.join(os.path.split(os.path.realpath(__file__))[0], 'temp')
MONGO_HOST = '127.0.0.1'
MONGO_PORT = 27017
| 29.886792 | 160 | 0.704545 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 618 | 0.385287 |